2013-10-26 00:22:20 +00:00
|
|
|
from lxml import html
|
2020-10-02 16:13:56 +00:00
|
|
|
from urllib.parse import urlencode
|
|
|
|
from searx.utils import extract_text, extract_url, eval_xpath
|
2013-10-26 00:22:20 +00:00
|
|
|
|
2014-01-20 01:31:20 +00:00
|
|
|
search_url = None
|
|
|
|
url_xpath = None
|
2013-10-26 00:22:20 +00:00
|
|
|
content_xpath = None
|
2014-01-20 01:31:20 +00:00
|
|
|
title_xpath = None
|
2019-07-25 05:46:41 +00:00
|
|
|
thumbnail_xpath = False
|
2020-11-03 10:35:53 +00:00
|
|
|
categories = []
|
2016-11-30 17:43:03 +00:00
|
|
|
paging = False
|
2013-11-13 18:33:09 +00:00
|
|
|
suggestion_xpath = ''
|
2013-10-26 11:45:43 +00:00
|
|
|
results_xpath = ''
|
2016-05-19 05:38:43 +00:00
|
|
|
cached_xpath = ''
|
|
|
|
cached_url = ''
|
2013-10-26 00:22:20 +00:00
|
|
|
|
2016-03-28 13:15:03 +00:00
|
|
|
# parameters for engines with paging support
|
|
|
|
#
|
|
|
|
# number of results on each page
|
|
|
|
# (only needed if the site requires not a page number, but an offset)
|
|
|
|
page_size = 1
|
|
|
|
# number of the first page (usually 0 or 1)
|
|
|
|
first_page_num = 1
|
|
|
|
|
2014-01-20 01:31:20 +00:00
|
|
|
|
2013-10-26 00:22:20 +00:00
|
|
|
def request(query, params):
|
|
|
|
query = urlencode({'q': query})[2:]
|
2016-03-28 13:15:03 +00:00
|
|
|
|
|
|
|
fp = {'query': query}
|
|
|
|
if paging and search_url.find('{pageno}') >= 0:
|
2016-08-14 11:46:54 +00:00
|
|
|
fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
|
2016-03-28 13:15:03 +00:00
|
|
|
|
|
|
|
params['url'] = search_url.format(**fp)
|
2013-10-26 00:22:20 +00:00
|
|
|
params['query'] = query
|
2016-03-28 13:15:03 +00:00
|
|
|
|
2013-10-26 00:22:20 +00:00
|
|
|
return params
|
|
|
|
|
|
|
|
|
|
|
|
def response(resp):
|
|
|
|
results = []
|
|
|
|
dom = html.fromstring(resp.text)
|
2016-05-19 05:38:43 +00:00
|
|
|
is_onion = True if 'onions' in categories else False
|
|
|
|
|
2013-10-26 11:45:43 +00:00
|
|
|
if results_xpath:
|
2019-11-15 08:31:37 +00:00
|
|
|
for result in eval_xpath(dom, results_xpath):
|
|
|
|
url = extract_url(eval_xpath(result, url_xpath), search_url)
|
|
|
|
title = extract_text(eval_xpath(result, title_xpath))
|
|
|
|
content = extract_text(eval_xpath(result, content_xpath))
|
2019-07-25 05:46:41 +00:00
|
|
|
tmp_result = {'url': url, 'title': title, 'content': content}
|
|
|
|
|
|
|
|
# add thumbnail if available
|
|
|
|
if thumbnail_xpath:
|
2019-11-15 08:31:37 +00:00
|
|
|
thumbnail_xpath_result = eval_xpath(result, thumbnail_xpath)
|
2019-07-25 07:31:47 +00:00
|
|
|
if len(thumbnail_xpath_result) > 0:
|
|
|
|
tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
|
2019-07-25 05:46:41 +00:00
|
|
|
|
2016-05-19 05:38:43 +00:00
|
|
|
# add alternative cached url if available
|
|
|
|
if cached_xpath:
|
|
|
|
tmp_result['cached_url'] = cached_url + extract_text(result.xpath(cached_xpath))
|
|
|
|
|
|
|
|
if is_onion:
|
|
|
|
tmp_result['is_onion'] = True
|
|
|
|
|
2019-07-25 05:46:41 +00:00
|
|
|
results.append(tmp_result)
|
2013-10-26 11:45:43 +00:00
|
|
|
else:
|
2016-05-19 05:38:43 +00:00
|
|
|
if cached_xpath:
|
|
|
|
for url, title, content, cached in zip(
|
|
|
|
(extract_url(x, search_url) for
|
|
|
|
x in dom.xpath(url_xpath)),
|
|
|
|
map(extract_text, dom.xpath(title_xpath)),
|
|
|
|
map(extract_text, dom.xpath(content_xpath)),
|
|
|
|
map(extract_text, dom.xpath(cached_xpath))
|
|
|
|
):
|
|
|
|
results.append({'url': url, 'title': title, 'content': content,
|
|
|
|
'cached_url': cached_url + cached, 'is_onion': is_onion})
|
|
|
|
else:
|
|
|
|
for url, title, content in zip(
|
|
|
|
(extract_url(x, search_url) for
|
|
|
|
x in dom.xpath(url_xpath)),
|
|
|
|
map(extract_text, dom.xpath(title_xpath)),
|
|
|
|
map(extract_text, dom.xpath(content_xpath))
|
|
|
|
):
|
|
|
|
results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})
|
2013-10-26 11:45:43 +00:00
|
|
|
|
2013-11-13 18:33:09 +00:00
|
|
|
if not suggestion_xpath:
|
|
|
|
return results
|
2019-11-15 08:31:37 +00:00
|
|
|
for suggestion in eval_xpath(dom, suggestion_xpath):
|
2014-01-05 13:06:52 +00:00
|
|
|
results.append({'suggestion': extract_text(suggestion)})
|
2013-10-26 00:22:20 +00:00
|
|
|
return results
|