forked from zaclys/searxng
[mod] various engines: use eval_xpath* functions and searx.exceptions.*
Engine list: ahmia, duckduckgo_images, elasticsearch, google, google_images, google_videos, youtube_api
This commit is contained in:
parent
ad72803ed9
commit
64cccae99e
|
@ -12,7 +12,7 @@
|
|||
|
||||
from urllib.parse import urlencode, urlparse, parse_qs
|
||||
from lxml.html import fromstring
|
||||
from searx.engines.xpath import extract_url, extract_text
|
||||
from searx.engines.xpath import extract_url, extract_text, eval_xpath_list, eval_xpath
|
||||
|
||||
# engine config
|
||||
categories = ['onions']
|
||||
|
@ -50,17 +50,17 @@ def response(resp):
|
|||
|
||||
# trim results so there's not way too many at once
|
||||
first_result_index = page_size * (resp.search_params.get('pageno', 1) - 1)
|
||||
all_results = dom.xpath(results_xpath)
|
||||
all_results = eval_xpath_list(dom, results_xpath)
|
||||
trimmed_results = all_results[first_result_index:first_result_index + page_size]
|
||||
|
||||
# get results
|
||||
for result in trimmed_results:
|
||||
# remove ahmia url and extract the actual url for the result
|
||||
raw_url = extract_url(result.xpath(url_xpath), search_url)
|
||||
raw_url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
|
||||
cleaned_url = parse_qs(urlparse(raw_url).query).get('redirect_url', [''])[0]
|
||||
|
||||
title = extract_text(result.xpath(title_xpath))
|
||||
content = extract_text(result.xpath(content_xpath))
|
||||
title = extract_text(eval_xpath(result, title_xpath))
|
||||
content = extract_text(eval_xpath(result, content_xpath))
|
||||
|
||||
results.append({'url': cleaned_url,
|
||||
'title': title,
|
||||
|
@ -68,11 +68,11 @@ def response(resp):
|
|||
'is_onion': True})
|
||||
|
||||
# get spelling corrections
|
||||
for correction in dom.xpath(correction_xpath):
|
||||
for correction in eval_xpath_list(dom, correction_xpath):
|
||||
results.append({'correction': extract_text(correction)})
|
||||
|
||||
# get number of results
|
||||
number_of_results = dom.xpath(number_of_results_xpath)
|
||||
number_of_results = eval_xpath(dom, number_of_results_xpath)
|
||||
if number_of_results:
|
||||
try:
|
||||
results.append({'number_of_results': int(extract_text(number_of_results))})
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
from json import loads
|
||||
from urllib.parse import urlencode
|
||||
from searx.exceptions import SearxEngineAPIException
|
||||
from searx.engines.duckduckgo import get_region_code
|
||||
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
|
||||
from searx.poolrequests import get
|
||||
|
@ -37,7 +38,7 @@ def get_vqd(query, headers):
|
|||
res = get(query_url, headers=headers)
|
||||
content = res.text
|
||||
if content.find('vqd=\'') == -1:
|
||||
raise Exception('Request failed')
|
||||
raise SearxEngineAPIException('Request failed')
|
||||
vqd = content[content.find('vqd=\'') + 5:]
|
||||
vqd = vqd[:vqd.find('\'')]
|
||||
return vqd
|
||||
|
@ -71,10 +72,7 @@ def response(resp):
|
|||
results = []
|
||||
|
||||
content = resp.text
|
||||
try:
|
||||
res_json = loads(content)
|
||||
except:
|
||||
raise Exception('Cannot parse results')
|
||||
res_json = loads(content)
|
||||
|
||||
# parse results
|
||||
for result in res_json['results']:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
from json import loads, dumps
|
||||
from requests.auth import HTTPBasicAuth
|
||||
from searx.exceptions import SearxEngineAPIException
|
||||
|
||||
|
||||
base_url = 'http://localhost:9200'
|
||||
|
@ -107,7 +108,7 @@ def response(resp):
|
|||
|
||||
resp_json = loads(resp.text)
|
||||
if 'error' in resp_json:
|
||||
raise Exception(resp_json['error'])
|
||||
raise SearxEngineAPIException(resp_json['error'])
|
||||
|
||||
for result in resp_json['hits']['hits']:
|
||||
r = {key: str(value) if not key.startswith('_') else value for key, value in result['_source'].items()}
|
||||
|
|
|
@ -20,9 +20,10 @@ Definitions`_.
|
|||
|
||||
from urllib.parse import urlencode, urlparse
|
||||
from lxml import html
|
||||
from flask_babel import gettext
|
||||
from searx import logger
|
||||
from searx.utils import match_language, extract_text, eval_xpath
|
||||
from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
|
||||
from searx.exceptions import SearxEngineCaptchaException
|
||||
|
||||
|
||||
logger = logger.getChild('google engine')
|
||||
|
||||
|
@ -131,14 +132,6 @@ suggestion_xpath = '//div[contains(@class, "card-section")]//a'
|
|||
spelling_suggestion_xpath = '//div[@class="med"]/p/a'
|
||||
|
||||
|
||||
def extract_text_from_dom(result, xpath):
|
||||
"""returns extract_text on the first result selected by the xpath or None"""
|
||||
r = eval_xpath(result, xpath)
|
||||
if len(r) > 0:
|
||||
return extract_text(r[0])
|
||||
return None
|
||||
|
||||
|
||||
def get_lang_country(params, lang_list, custom_aliases):
|
||||
"""Returns a tuple with *langauage* on its first and *country* on its second
|
||||
position."""
|
||||
|
@ -210,10 +203,10 @@ def response(resp):
|
|||
# detect google sorry
|
||||
resp_url = urlparse(resp.url)
|
||||
if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
|
||||
raise RuntimeWarning('sorry.google.com')
|
||||
raise SearxEngineCaptchaException()
|
||||
|
||||
if resp_url.path.startswith('/sorry'):
|
||||
raise RuntimeWarning(gettext('CAPTCHA required'))
|
||||
raise SearxEngineCaptchaException()
|
||||
|
||||
# which subdomain ?
|
||||
# subdomain = resp.search_params.get('google_subdomain')
|
||||
|
@ -229,18 +222,17 @@ def response(resp):
|
|||
logger.debug("did not found 'answer'")
|
||||
|
||||
# results --> number_of_results
|
||||
try:
|
||||
_txt = eval_xpath(dom, '//div[@id="result-stats"]//text()')[0]
|
||||
_digit = ''.join([n for n in _txt if n.isdigit()])
|
||||
number_of_results = int(_digit)
|
||||
results.append({'number_of_results': number_of_results})
|
||||
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
logger.debug("did not 'number_of_results'")
|
||||
logger.error(e, exc_info=True)
|
||||
try:
|
||||
_txt = eval_xpath_getindex(dom, '//div[@id="result-stats"]//text()', 0)
|
||||
_digit = ''.join([n for n in _txt if n.isdigit()])
|
||||
number_of_results = int(_digit)
|
||||
results.append({'number_of_results': number_of_results})
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
logger.debug("did not 'number_of_results'")
|
||||
logger.error(e, exc_info=True)
|
||||
|
||||
# parse results
|
||||
for result in eval_xpath(dom, results_xpath):
|
||||
for result in eval_xpath_list(dom, results_xpath):
|
||||
|
||||
# google *sections*
|
||||
if extract_text(eval_xpath(result, g_section_with_header)):
|
||||
|
@ -248,14 +240,14 @@ def response(resp):
|
|||
continue
|
||||
|
||||
try:
|
||||
title_tag = eval_xpath(result, title_xpath)
|
||||
if not title_tag:
|
||||
title_tag = eval_xpath_getindex(result, title_xpath, 0, default=None)
|
||||
if title_tag is None:
|
||||
# this not one of the common google results *section*
|
||||
logger.debug('ingoring <div class="g" ../> section: missing title')
|
||||
continue
|
||||
title = extract_text(title_tag[0])
|
||||
url = eval_xpath(result, href_xpath)[0]
|
||||
content = extract_text_from_dom(result, content_xpath)
|
||||
title = extract_text(title_tag)
|
||||
url = eval_xpath_getindex(result, href_xpath, 0)
|
||||
content = extract_text(eval_xpath_getindex(result, content_xpath, 0, default=None), allow_none=True)
|
||||
results.append({
|
||||
'url': url,
|
||||
'title': title,
|
||||
|
@ -270,11 +262,11 @@ def response(resp):
|
|||
continue
|
||||
|
||||
# parse suggestion
|
||||
for suggestion in eval_xpath(dom, suggestion_xpath):
|
||||
for suggestion in eval_xpath_list(dom, suggestion_xpath):
|
||||
# append suggestion
|
||||
results.append({'suggestion': extract_text(suggestion)})
|
||||
|
||||
for correction in eval_xpath(dom, spelling_suggestion_xpath):
|
||||
for correction in eval_xpath_list(dom, spelling_suggestion_xpath):
|
||||
results.append({'correction': extract_text(correction)})
|
||||
|
||||
# return results
|
||||
|
@ -286,7 +278,7 @@ def _fetch_supported_languages(resp):
|
|||
ret_val = {}
|
||||
dom = html.fromstring(resp.text)
|
||||
|
||||
radio_buttons = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lr"]')
|
||||
radio_buttons = eval_xpath_list(dom, '//*[@id="langSec"]//input[@name="lr"]')
|
||||
|
||||
for x in radio_buttons:
|
||||
name = x.get("data-name")
|
||||
|
|
|
@ -26,8 +26,8 @@ Definitions`_.
|
|||
|
||||
from urllib.parse import urlencode, urlparse, unquote
|
||||
from lxml import html
|
||||
from flask_babel import gettext
|
||||
from searx import logger
|
||||
from searx.exceptions import SearxEngineCaptchaException
|
||||
from searx.utils import extract_text, eval_xpath
|
||||
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
|
||||
|
||||
|
@ -128,10 +128,10 @@ def response(resp):
|
|||
# detect google sorry
|
||||
resp_url = urlparse(resp.url)
|
||||
if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
|
||||
raise RuntimeWarning('sorry.google.com')
|
||||
raise SearxEngineCaptchaException()
|
||||
|
||||
if resp_url.path.startswith('/sorry'):
|
||||
raise RuntimeWarning(gettext('CAPTCHA required'))
|
||||
raise SearxEngineCaptchaException()
|
||||
|
||||
# which subdomain ?
|
||||
# subdomain = resp.search_params.get('google_subdomain')
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
from datetime import date, timedelta
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from searx.utils import extract_text
|
||||
from searx.utils import extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
|
||||
import re
|
||||
|
||||
# engine dependent config
|
||||
|
@ -66,11 +66,11 @@ def response(resp):
|
|||
dom = html.fromstring(resp.text)
|
||||
|
||||
# parse results
|
||||
for result in dom.xpath('//div[@class="g"]'):
|
||||
for result in eval_xpath_list(dom, '//div[@class="g"]'):
|
||||
|
||||
title = extract_text(result.xpath('.//h3'))
|
||||
url = result.xpath('.//div[@class="r"]/a/@href')[0]
|
||||
content = extract_text(result.xpath('.//span[@class="st"]'))
|
||||
title = extract_text(eval_xpath(result, './/h3'))
|
||||
url = eval_xpath_getindex(result, './/div[@class="r"]/a/@href', 0)
|
||||
content = extract_text(eval_xpath(result, './/span[@class="st"]'))
|
||||
|
||||
# get thumbnails
|
||||
script = str(dom.xpath('//script[contains(., "_setImagesSrc")]')[0].text)
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
from json import loads
|
||||
from dateutil import parser
|
||||
from urllib.parse import urlencode
|
||||
from searx.exceptions import SearxEngineAPIException
|
||||
|
||||
# engine dependent config
|
||||
categories = ['videos', 'music']
|
||||
|
@ -48,7 +49,7 @@ def response(resp):
|
|||
search_results = loads(resp.text)
|
||||
|
||||
if 'error' in search_results and 'message' in search_results['error']:
|
||||
raise Exception(search_results['error']['message'])
|
||||
raise SearxEngineAPIException(search_results['error']['message'])
|
||||
|
||||
# return empty array if there are no results
|
||||
if 'items' not in search_results:
|
||||
|
|
Loading…
Reference in New Issue