[mod] various engines: use eval_xpath* functions and searx.exceptions.*

Engine list: ahmia, duckduckgo_images, elasticsearch, google, google_images, google_videos, youtube_api
This commit is contained in:
Alexandre Flament 2020-11-26 17:22:54 +01:00
parent ad72803ed9
commit 64cccae99e
7 changed files with 44 additions and 52 deletions

View File

@ -12,7 +12,7 @@
from urllib.parse import urlencode, urlparse, parse_qs from urllib.parse import urlencode, urlparse, parse_qs
from lxml.html import fromstring from lxml.html import fromstring
from searx.engines.xpath import extract_url, extract_text from searx.engines.xpath import extract_url, extract_text, eval_xpath_list, eval_xpath
# engine config # engine config
categories = ['onions'] categories = ['onions']
@ -50,17 +50,17 @@ def response(resp):
# trim results so there's not way too many at once # trim results so there's not way too many at once
first_result_index = page_size * (resp.search_params.get('pageno', 1) - 1) first_result_index = page_size * (resp.search_params.get('pageno', 1) - 1)
all_results = dom.xpath(results_xpath) all_results = eval_xpath_list(dom, results_xpath)
trimmed_results = all_results[first_result_index:first_result_index + page_size] trimmed_results = all_results[first_result_index:first_result_index + page_size]
# get results # get results
for result in trimmed_results: for result in trimmed_results:
# remove ahmia url and extract the actual url for the result # remove ahmia url and extract the actual url for the result
raw_url = extract_url(result.xpath(url_xpath), search_url) raw_url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
cleaned_url = parse_qs(urlparse(raw_url).query).get('redirect_url', [''])[0] cleaned_url = parse_qs(urlparse(raw_url).query).get('redirect_url', [''])[0]
title = extract_text(result.xpath(title_xpath)) title = extract_text(eval_xpath(result, title_xpath))
content = extract_text(result.xpath(content_xpath)) content = extract_text(eval_xpath(result, content_xpath))
results.append({'url': cleaned_url, results.append({'url': cleaned_url,
'title': title, 'title': title,
@ -68,11 +68,11 @@ def response(resp):
'is_onion': True}) 'is_onion': True})
# get spelling corrections # get spelling corrections
for correction in dom.xpath(correction_xpath): for correction in eval_xpath_list(dom, correction_xpath):
results.append({'correction': extract_text(correction)}) results.append({'correction': extract_text(correction)})
# get number of results # get number of results
number_of_results = dom.xpath(number_of_results_xpath) number_of_results = eval_xpath(dom, number_of_results_xpath)
if number_of_results: if number_of_results:
try: try:
results.append({'number_of_results': int(extract_text(number_of_results))}) results.append({'number_of_results': int(extract_text(number_of_results))})

View File

@ -15,6 +15,7 @@
from json import loads from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from searx.exceptions import SearxEngineAPIException
from searx.engines.duckduckgo import get_region_code from searx.engines.duckduckgo import get_region_code
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
from searx.poolrequests import get from searx.poolrequests import get
@ -37,7 +38,7 @@ def get_vqd(query, headers):
res = get(query_url, headers=headers) res = get(query_url, headers=headers)
content = res.text content = res.text
if content.find('vqd=\'') == -1: if content.find('vqd=\'') == -1:
raise Exception('Request failed') raise SearxEngineAPIException('Request failed')
vqd = content[content.find('vqd=\'') + 5:] vqd = content[content.find('vqd=\'') + 5:]
vqd = vqd[:vqd.find('\'')] vqd = vqd[:vqd.find('\'')]
return vqd return vqd
@ -71,10 +72,7 @@ def response(resp):
results = [] results = []
content = resp.text content = resp.text
try:
res_json = loads(content) res_json = loads(content)
except:
raise Exception('Cannot parse results')
# parse results # parse results
for result in res_json['results']: for result in res_json['results']:

View File

@ -1,5 +1,6 @@
from json import loads, dumps from json import loads, dumps
from requests.auth import HTTPBasicAuth from requests.auth import HTTPBasicAuth
from searx.exceptions import SearxEngineAPIException
base_url = 'http://localhost:9200' base_url = 'http://localhost:9200'
@ -107,7 +108,7 @@ def response(resp):
resp_json = loads(resp.text) resp_json = loads(resp.text)
if 'error' in resp_json: if 'error' in resp_json:
raise Exception(resp_json['error']) raise SearxEngineAPIException(resp_json['error'])
for result in resp_json['hits']['hits']: for result in resp_json['hits']['hits']:
r = {key: str(value) if not key.startswith('_') else value for key, value in result['_source'].items()} r = {key: str(value) if not key.startswith('_') else value for key, value in result['_source'].items()}

View File

@ -20,9 +20,10 @@ Definitions`_.
from urllib.parse import urlencode, urlparse from urllib.parse import urlencode, urlparse
from lxml import html from lxml import html
from flask_babel import gettext
from searx import logger from searx import logger
from searx.utils import match_language, extract_text, eval_xpath from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
from searx.exceptions import SearxEngineCaptchaException
logger = logger.getChild('google engine') logger = logger.getChild('google engine')
@ -131,14 +132,6 @@ suggestion_xpath = '//div[contains(@class, "card-section")]//a'
spelling_suggestion_xpath = '//div[@class="med"]/p/a' spelling_suggestion_xpath = '//div[@class="med"]/p/a'
def extract_text_from_dom(result, xpath):
"""returns extract_text on the first result selected by the xpath or None"""
r = eval_xpath(result, xpath)
if len(r) > 0:
return extract_text(r[0])
return None
def get_lang_country(params, lang_list, custom_aliases): def get_lang_country(params, lang_list, custom_aliases):
"""Returns a tuple with *langauage* on its first and *country* on its second """Returns a tuple with *langauage* on its first and *country* on its second
position.""" position."""
@ -210,10 +203,10 @@ def response(resp):
# detect google sorry # detect google sorry
resp_url = urlparse(resp.url) resp_url = urlparse(resp.url)
if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect': if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
raise RuntimeWarning('sorry.google.com') raise SearxEngineCaptchaException()
if resp_url.path.startswith('/sorry'): if resp_url.path.startswith('/sorry'):
raise RuntimeWarning(gettext('CAPTCHA required')) raise SearxEngineCaptchaException()
# which subdomain ? # which subdomain ?
# subdomain = resp.search_params.get('google_subdomain') # subdomain = resp.search_params.get('google_subdomain')
@ -230,17 +223,16 @@ def response(resp):
# results --> number_of_results # results --> number_of_results
try: try:
_txt = eval_xpath(dom, '//div[@id="result-stats"]//text()')[0] _txt = eval_xpath_getindex(dom, '//div[@id="result-stats"]//text()', 0)
_digit = ''.join([n for n in _txt if n.isdigit()]) _digit = ''.join([n for n in _txt if n.isdigit()])
number_of_results = int(_digit) number_of_results = int(_digit)
results.append({'number_of_results': number_of_results}) results.append({'number_of_results': number_of_results})
except Exception as e: # pylint: disable=broad-except except Exception as e: # pylint: disable=broad-except
logger.debug("did not 'number_of_results'") logger.debug("did not 'number_of_results'")
logger.error(e, exc_info=True) logger.error(e, exc_info=True)
# parse results # parse results
for result in eval_xpath(dom, results_xpath): for result in eval_xpath_list(dom, results_xpath):
# google *sections* # google *sections*
if extract_text(eval_xpath(result, g_section_with_header)): if extract_text(eval_xpath(result, g_section_with_header)):
@ -248,14 +240,14 @@ def response(resp):
continue continue
try: try:
title_tag = eval_xpath(result, title_xpath) title_tag = eval_xpath_getindex(result, title_xpath, 0, default=None)
if not title_tag: if title_tag is None:
# this not one of the common google results *section* # this not one of the common google results *section*
logger.debug('ingoring <div class="g" ../> section: missing title') logger.debug('ingoring <div class="g" ../> section: missing title')
continue continue
title = extract_text(title_tag[0]) title = extract_text(title_tag)
url = eval_xpath(result, href_xpath)[0] url = eval_xpath_getindex(result, href_xpath, 0)
content = extract_text_from_dom(result, content_xpath) content = extract_text(eval_xpath_getindex(result, content_xpath, 0, default=None), allow_none=True)
results.append({ results.append({
'url': url, 'url': url,
'title': title, 'title': title,
@ -270,11 +262,11 @@ def response(resp):
continue continue
# parse suggestion # parse suggestion
for suggestion in eval_xpath(dom, suggestion_xpath): for suggestion in eval_xpath_list(dom, suggestion_xpath):
# append suggestion # append suggestion
results.append({'suggestion': extract_text(suggestion)}) results.append({'suggestion': extract_text(suggestion)})
for correction in eval_xpath(dom, spelling_suggestion_xpath): for correction in eval_xpath_list(dom, spelling_suggestion_xpath):
results.append({'correction': extract_text(correction)}) results.append({'correction': extract_text(correction)})
# return results # return results
@ -286,7 +278,7 @@ def _fetch_supported_languages(resp):
ret_val = {} ret_val = {}
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
radio_buttons = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lr"]') radio_buttons = eval_xpath_list(dom, '//*[@id="langSec"]//input[@name="lr"]')
for x in radio_buttons: for x in radio_buttons:
name = x.get("data-name") name = x.get("data-name")

View File

@ -26,8 +26,8 @@ Definitions`_.
from urllib.parse import urlencode, urlparse, unquote from urllib.parse import urlencode, urlparse, unquote
from lxml import html from lxml import html
from flask_babel import gettext
from searx import logger from searx import logger
from searx.exceptions import SearxEngineCaptchaException
from searx.utils import extract_text, eval_xpath from searx.utils import extract_text, eval_xpath
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
@ -128,10 +128,10 @@ def response(resp):
# detect google sorry # detect google sorry
resp_url = urlparse(resp.url) resp_url = urlparse(resp.url)
if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect': if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
raise RuntimeWarning('sorry.google.com') raise SearxEngineCaptchaException()
if resp_url.path.startswith('/sorry'): if resp_url.path.startswith('/sorry'):
raise RuntimeWarning(gettext('CAPTCHA required')) raise SearxEngineCaptchaException()
# which subdomain ? # which subdomain ?
# subdomain = resp.search_params.get('google_subdomain') # subdomain = resp.search_params.get('google_subdomain')

View File

@ -13,7 +13,7 @@
from datetime import date, timedelta from datetime import date, timedelta
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx.utils import extract_text from searx.utils import extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
import re import re
# engine dependent config # engine dependent config
@ -66,11 +66,11 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
# parse results # parse results
for result in dom.xpath('//div[@class="g"]'): for result in eval_xpath_list(dom, '//div[@class="g"]'):
title = extract_text(result.xpath('.//h3')) title = extract_text(eval_xpath(result, './/h3'))
url = result.xpath('.//div[@class="r"]/a/@href')[0] url = eval_xpath_getindex(result, './/div[@class="r"]/a/@href', 0)
content = extract_text(result.xpath('.//span[@class="st"]')) content = extract_text(eval_xpath(result, './/span[@class="st"]'))
# get thumbnails # get thumbnails
script = str(dom.xpath('//script[contains(., "_setImagesSrc")]')[0].text) script = str(dom.xpath('//script[contains(., "_setImagesSrc")]')[0].text)

View File

@ -11,6 +11,7 @@
from json import loads from json import loads
from dateutil import parser from dateutil import parser
from urllib.parse import urlencode from urllib.parse import urlencode
from searx.exceptions import SearxEngineAPIException
# engine dependent config # engine dependent config
categories = ['videos', 'music'] categories = ['videos', 'music']
@ -48,7 +49,7 @@ def response(resp):
search_results = loads(resp.text) search_results = loads(resp.text)
if 'error' in search_results and 'message' in search_results['error']: if 'error' in search_results and 'message' in search_results['error']:
raise Exception(search_results['error']['message']) raise SearxEngineAPIException(search_results['error']['message'])
# return empty array if there are no results # return empty array if there are no results
if 'items' not in search_results: if 'items' not in search_results: