searxngRebrandZaclys/searx/engines/json_engine.py

152 lines
3.6 KiB
Python
Raw Permalink Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
from collections.abc import Iterable
2016-11-30 17:43:03 +00:00
from json import loads
from urllib.parse import urlencode
from searx.utils import to_string, html_to_text
2016-11-30 17:43:03 +00:00
2013-11-19 14:49:52 +00:00
2014-01-20 01:31:20 +00:00
search_url = None
url_query = None
2013-11-19 14:49:52 +00:00
content_query = None
2014-01-20 01:31:20 +00:00
title_query = None
content_html_to_text = False
title_html_to_text = False
2016-11-30 17:43:03 +00:00
paging = False
2016-08-12 10:15:27 +00:00
suggestion_query = ''
results_query = ''
2013-11-19 14:49:52 +00:00
cookies = {}
headers = {}
'''Some engines might offer different result based on cookies or headers.
Possible use-case: To set safesearch cookie or header to moderate.'''
# parameters for engines with paging support
#
# number of results on each page
# (only needed if the site requires not a page number, but an offset)
page_size = 1
# number of the first page (usually 0 or 1)
first_page_num = 1
2014-01-20 01:31:20 +00:00
2013-11-19 14:49:52 +00:00
def iterate(iterable):
if type(iterable) == dict:
2016-11-30 17:43:03 +00:00
it = iterable.items()
2013-11-19 14:49:52 +00:00
else:
it = enumerate(iterable)
for index, value in it:
yield str(index), value
2014-01-20 01:31:20 +00:00
2013-11-19 14:49:52 +00:00
def is_iterable(obj):
2014-01-20 01:31:20 +00:00
if type(obj) == str:
return False
2013-11-19 14:49:52 +00:00
return isinstance(obj, Iterable)
2014-01-20 01:31:20 +00:00
2013-11-19 14:49:52 +00:00
def parse(query):
q = []
for part in query.split('/'):
if part == '':
continue
else:
q.append(part)
return q
2014-01-20 01:31:20 +00:00
2013-11-19 14:49:52 +00:00
def do_query(data, q):
ret = []
2014-02-11 12:13:51 +00:00
if not q:
2013-11-19 14:49:52 +00:00
return ret
qkey = q[0]
2014-01-20 01:31:20 +00:00
for key, value in iterate(data):
2013-11-19 14:49:52 +00:00
if len(q) == 1:
if key == qkey:
ret.append(value)
elif is_iterable(value):
ret.extend(do_query(value, q))
else:
if not is_iterable(value):
continue
if key == qkey:
ret.extend(do_query(value, q[1:]))
else:
ret.extend(do_query(value, q))
return ret
2014-01-20 01:31:20 +00:00
2013-11-19 14:49:52 +00:00
def query(data, query_string):
q = parse(query_string)
return do_query(data, q)
2014-01-20 01:31:20 +00:00
2013-11-19 14:49:52 +00:00
def request(query, params):
query = urlencode({'q': query})[2:]
fp = {'query': query}
if paging and search_url.find('{pageno}') >= 0:
fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
params['cookies'].update(cookies)
params['headers'].update(headers)
params['url'] = search_url.format(**fp)
2013-11-19 14:49:52 +00:00
params['query'] = query
2013-11-19 14:49:52 +00:00
return params
def identity(arg):
return arg
2013-11-19 14:49:52 +00:00
def response(resp):
results = []
json = loads(resp.text)
title_filter = html_to_text if title_html_to_text else identity
content_filter = html_to_text if content_html_to_text else identity
2016-08-12 10:15:27 +00:00
if results_query:
rs = query(json, results_query)
if not len(rs):
return results
for result in rs[0]:
try:
url = query(result, url_query)[0]
title = query(result, title_query)[0]
except:
continue
try:
content = query(result, content_query)[0]
except:
content = ""
results.append(
{
'url': to_string(url),
'title': title_filter(to_string(title)),
'content': content_filter(to_string(content)),
}
)
2016-08-12 10:15:27 +00:00
else:
for url, title, content in zip(query(json, url_query), query(json, title_query), query(json, content_query)):
results.append(
{
'url': to_string(url),
'title': title_filter(to_string(title)),
'content': content_filter(to_string(content)),
}
)
2016-08-12 10:15:27 +00:00
if not suggestion_query:
return results
for suggestion in query(json, suggestion_query):
results.append({'suggestion': suggestion})
2013-11-19 14:49:52 +00:00
return results