forked from zaclys/searxng
[mod] len() removed from conditions
This commit is contained in:
parent
239299d45e
commit
c1d7d30b8e
|
@ -163,7 +163,7 @@ def score_results(results):
|
||||||
duplicated = new_res
|
duplicated = new_res
|
||||||
break
|
break
|
||||||
if duplicated:
|
if duplicated:
|
||||||
if len(res.get('content', '')) > len(duplicated.get('content', '')): # noqa
|
if res.get('content') > duplicated.get('content'):
|
||||||
duplicated['content'] = res['content']
|
duplicated['content'] = res['content']
|
||||||
duplicated['score'] += score
|
duplicated['score'] += score
|
||||||
duplicated['engines'].append(res['engine'])
|
duplicated['engines'].append(res['engine'])
|
||||||
|
|
|
@ -39,7 +39,7 @@ def parse(query):
|
||||||
|
|
||||||
def do_query(data, q):
|
def do_query(data, q):
|
||||||
ret = []
|
ret = []
|
||||||
if not len(q):
|
if not q:
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
qkey = q[0]
|
qkey = q[0]
|
||||||
|
|
|
@ -35,7 +35,7 @@ def response(resp):
|
||||||
title = link.text_content()
|
title = link.text_content()
|
||||||
|
|
||||||
content = ''
|
content = ''
|
||||||
if len(result.xpath('./p[@class="desc"]')):
|
if result.xpath('./p[@class="desc"]'):
|
||||||
content = result.xpath('./p[@class="desc"]')[0].text_content()
|
content = result.xpath('./p[@class="desc"]')[0].text_content()
|
||||||
|
|
||||||
results.append({'url': url, 'title': title, 'content': content})
|
results.append({'url': url, 'title': title, 'content': content})
|
||||||
|
|
|
@ -23,7 +23,7 @@ if xpath_results is a string element, then it's already done
|
||||||
def extract_text(xpath_results):
|
def extract_text(xpath_results):
|
||||||
if type(xpath_results) == list:
|
if type(xpath_results) == list:
|
||||||
# it's list of result : concat everything using recursive call
|
# it's list of result : concat everything using recursive call
|
||||||
if not len(xpath_results):
|
if not xpath_results:
|
||||||
raise Exception('Empty url resultset')
|
raise Exception('Empty url resultset')
|
||||||
result = ''
|
result = ''
|
||||||
for e in xpath_results:
|
for e in xpath_results:
|
||||||
|
|
|
@ -13,7 +13,7 @@ def request(query, params):
|
||||||
def response(resp):
|
def response(resp):
|
||||||
raw_search_results = loads(resp.text)
|
raw_search_results = loads(resp.text)
|
||||||
|
|
||||||
if not len(raw_search_results):
|
if not raw_search_results:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
search_results = raw_search_results.get('channels', {})[0].get('items', [])
|
search_results = raw_search_results.get('channels', {})[0].get('items', [])
|
||||||
|
@ -26,10 +26,10 @@ def response(resp):
|
||||||
tmp_result['url'] = result['link']
|
tmp_result['url'] = result['link']
|
||||||
tmp_result['content'] = ''
|
tmp_result['content'] = ''
|
||||||
|
|
||||||
if len(result['description']):
|
if result['description']:
|
||||||
tmp_result['content'] += result['description'] + "<br/>"
|
tmp_result['content'] += result['description'] + "<br/>"
|
||||||
|
|
||||||
if len(result['pubDate']):
|
if result['pubDate']:
|
||||||
tmp_result['content'] += result['pubDate'] + "<br/>"
|
tmp_result['content'] += result['pubDate'] + "<br/>"
|
||||||
|
|
||||||
if result['size'] != '-1':
|
if result['size'] != '-1':
|
||||||
|
|
|
@ -22,9 +22,10 @@ def response(resp):
|
||||||
if not 'feed' in search_results:
|
if not 'feed' in search_results:
|
||||||
return results
|
return results
|
||||||
feed = search_results['feed']
|
feed = search_results['feed']
|
||||||
|
|
||||||
for result in feed['entry']:
|
for result in feed['entry']:
|
||||||
url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
|
url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
|
||||||
if not len(url):
|
if not url:
|
||||||
return
|
return
|
||||||
# remove tracking
|
# remove tracking
|
||||||
url = url[0].replace('feature=youtube_gdata', '')
|
url = url[0].replace('feature=youtube_gdata', '')
|
||||||
|
@ -32,12 +33,13 @@ def response(resp):
|
||||||
url = url[:-1]
|
url = url[:-1]
|
||||||
title = result['title']['$t']
|
title = result['title']['$t']
|
||||||
content = ''
|
content = ''
|
||||||
|
|
||||||
thumbnail = ''
|
thumbnail = ''
|
||||||
if len(result['media$group']['media$thumbnail']):
|
|
||||||
|
if result['media$group']['media$thumbnail']:
|
||||||
thumbnail = result['media$group']['media$thumbnail'][0]['url']
|
thumbnail = result['media$group']['media$thumbnail'][0]['url']
|
||||||
content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail) # noqa
|
content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail) # noqa
|
||||||
if len(content):
|
|
||||||
|
if content:
|
||||||
content += '<br />' + result['content']['$t']
|
content += '<br />' + result['content']['$t']
|
||||||
else:
|
else:
|
||||||
content = result['content']['$t']
|
content = result['content']['$t']
|
||||||
|
|
|
@ -49,7 +49,7 @@ class Search(object):
|
||||||
|
|
||||||
self.categories = []
|
self.categories = []
|
||||||
|
|
||||||
if len(self.engines):
|
if self.engines:
|
||||||
self.categories = list(set(engine['category']
|
self.categories = list(set(engine['category']
|
||||||
for engine in self.engines))
|
for engine in self.engines))
|
||||||
else:
|
else:
|
||||||
|
@ -59,13 +59,13 @@ class Search(object):
|
||||||
if not category in categories:
|
if not category in categories:
|
||||||
continue
|
continue
|
||||||
self.categories.append(category)
|
self.categories.append(category)
|
||||||
if not len(self.categories):
|
if not self.categories:
|
||||||
cookie_categories = request.cookies.get('categories', '')
|
cookie_categories = request.cookies.get('categories', '')
|
||||||
cookie_categories = cookie_categories.split(',')
|
cookie_categories = cookie_categories.split(',')
|
||||||
for ccateg in cookie_categories:
|
for ccateg in cookie_categories:
|
||||||
if ccateg in categories:
|
if ccateg in categories:
|
||||||
self.categories.append(ccateg)
|
self.categories.append(ccateg)
|
||||||
if not len(self.categories):
|
if not self.categories:
|
||||||
self.categories = ['general']
|
self.categories = ['general']
|
||||||
|
|
||||||
for categ in self.categories:
|
for categ in self.categories:
|
||||||
|
|
|
@ -91,7 +91,7 @@ def render(template_name, **kwargs):
|
||||||
for ccateg in cookie_categories:
|
for ccateg in cookie_categories:
|
||||||
if ccateg in categories:
|
if ccateg in categories:
|
||||||
kwargs['selected_categories'].append(ccateg)
|
kwargs['selected_categories'].append(ccateg)
|
||||||
if not len(kwargs['selected_categories']):
|
if not kwargs['selected_categories']:
|
||||||
kwargs['selected_categories'] = ['general']
|
kwargs['selected_categories'] = ['general']
|
||||||
return render_template(template_name, **kwargs)
|
return render_template(template_name, **kwargs)
|
||||||
|
|
||||||
|
@ -150,12 +150,12 @@ def index():
|
||||||
elif search.request_data.get('format') == 'csv':
|
elif search.request_data.get('format') == 'csv':
|
||||||
csv = UnicodeWriter(cStringIO.StringIO())
|
csv = UnicodeWriter(cStringIO.StringIO())
|
||||||
keys = ('title', 'url', 'content', 'host', 'engine', 'score')
|
keys = ('title', 'url', 'content', 'host', 'engine', 'score')
|
||||||
if len(search.results):
|
if search.results:
|
||||||
csv.writerow(keys)
|
csv.writerow(keys)
|
||||||
for row in search.results:
|
for row in search.results:
|
||||||
row['host'] = row['parsed_url'].netloc
|
row['host'] = row['parsed_url'].netloc
|
||||||
csv.writerow([row.get(key, '') for key in keys])
|
csv.writerow([row.get(key, '') for key in keys])
|
||||||
csv.stream.seek(0)
|
csv.stream.seek(0)
|
||||||
response = Response(csv.stream.read(), mimetype='application/csv')
|
response = Response(csv.stream.read(), mimetype='application/csv')
|
||||||
cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query)
|
cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query)
|
||||||
response.headers.add('Content-Disposition', cont_disp)
|
response.headers.add('Content-Disposition', cont_disp)
|
||||||
|
|
Loading…
Reference in New Issue