searxngRebrandZaclys/searx/engines/yahoo.py

161 lines
4.9 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Yahoo (Web)
"""
2014-01-30 01:33:24 +00:00
from urllib.parse import unquote, urlencode
2014-01-30 01:33:24 +00:00
from lxml import html
from searx.utils import extract_text, extract_url, match_language, eval_xpath
2014-01-30 01:33:24 +00:00
# about
about = {
"website": 'https://search.yahoo.com/',
"wikidata_id": None,
"official_api_documentation": 'https://developer.yahoo.com/api/',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
2014-09-01 14:17:29 +00:00
# engine dependent config
2014-01-30 01:33:24 +00:00
categories = ['general']
2014-09-01 14:17:29 +00:00
paging = True
2016-07-17 16:42:30 +00:00
time_range_support = True
2014-09-01 14:17:29 +00:00
# search-url
2014-12-16 16:26:16 +00:00
base_url = 'https://search.yahoo.com/'
search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
2016-07-17 16:42:30 +00:00
search_url_with_time = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}&age={age}&btf={btf}&fr2=time'
2014-09-01 14:17:29 +00:00
supported_languages_url = 'https://search.yahoo.com/web/advanced'
2014-09-01 14:17:29 +00:00
# specific xpath variables
results_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' Sr ')]"
2014-01-30 01:33:24 +00:00
url_xpath = './/h3/a/@href'
title_xpath = './/h3/a'
content_xpath = './/div[contains(@class, "compText")]'
suggestion_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' AlsoTry ')]//a"
2014-01-30 01:33:24 +00:00
2016-07-17 16:42:30 +00:00
time_range_dict = {'day': ['1d', 'd'],
'week': ['1w', 'w'],
'month': ['1m', 'm']}
2014-03-10 15:46:11 +00:00
language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'}
2016-07-19 08:14:11 +00:00
2014-09-01 14:17:29 +00:00
# remove yahoo-specific tracking-url
2014-03-08 18:09:03 +00:00
def parse_url(url_string):
2014-03-18 09:06:14 +00:00
endings = ['/RS', '/RK']
endpositions = []
2015-02-04 19:41:40 +00:00
start = url_string.find('http', url_string.find('/RU=') + 1)
2014-09-01 14:17:29 +00:00
2014-03-18 09:06:14 +00:00
for ending in endings:
endpos = url_string.rfind(ending)
if endpos > -1:
endpositions.append(endpos)
if start == 0 or len(endpositions) == 0:
return url_string
else:
end = min(endpositions)
return unquote(url_string[start:end])
2014-01-30 01:33:24 +00:00
2014-03-10 15:46:11 +00:00
2016-07-17 16:42:30 +00:00
def _get_url(query, offset, language, time_range):
2016-07-25 22:22:05 +00:00
if time_range in time_range_dict:
2016-07-17 16:42:30 +00:00
return base_url + search_url_with_time.format(offset=offset,
query=urlencode({'p': query}),
lang=language,
age=time_range_dict[time_range][0],
btf=time_range_dict[time_range][1])
return base_url + search_url.format(offset=offset,
query=urlencode({'p': query}),
lang=language)
def _get_language(params):
if params['language'] == 'all':
return 'en'
language = match_language(params['language'], supported_languages, language_aliases)
if language not in language_aliases.values():
language = language.split('-')[0]
language = language.replace('-', '_').lower()
return language
2014-09-01 14:17:29 +00:00
# do search-request
2014-01-30 01:33:24 +00:00
def request(query, params):
if params['time_range'] and params['time_range'] not in time_range_dict:
return params
2014-01-30 01:33:24 +00:00
offset = (params['pageno'] - 1) * 10 + 1
language = _get_language(params)
2014-09-01 14:17:29 +00:00
2016-07-17 16:42:30 +00:00
params['url'] = _get_url(query, offset, language, params['time_range'])
2014-09-01 14:17:29 +00:00
# TODO required?
2014-01-31 04:10:49 +00:00
params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
2014-01-31 04:11:37 +00:00
.format(lang=language)
2014-09-01 14:17:29 +00:00
2014-01-30 01:33:24 +00:00
return params
2014-09-01 14:17:29 +00:00
# get response from search-request
2014-01-30 01:33:24 +00:00
def response(resp):
results = []
2014-09-01 14:17:29 +00:00
2014-01-30 01:33:24 +00:00
dom = html.fromstring(resp.text)
2016-06-27 22:06:50 +00:00
try:
results_num = int(eval_xpath(dom, '//div[@class="compPagination"]/span[last()]/text()')[0]
2016-06-27 22:06:50 +00:00
.split()[0].replace(',', ''))
results.append({'number_of_results': results_num})
except:
pass
2014-09-01 14:17:29 +00:00
# parse results
for result in eval_xpath(dom, results_xpath):
2014-03-21 15:36:13 +00:00
try:
url = parse_url(extract_url(eval_xpath(result, url_xpath), search_url))
title = extract_text(eval_xpath(result, title_xpath)[0])
2014-03-21 15:36:13 +00:00
except:
continue
2014-09-01 14:17:29 +00:00
content = extract_text(eval_xpath(result, content_xpath)[0])
2014-01-30 01:33:24 +00:00
2014-09-01 14:17:29 +00:00
# append result
results.append({'url': url,
'title': title,
2014-09-01 14:17:29 +00:00
'content': content})
# if no suggestion found, return results
suggestions = eval_xpath(dom, suggestion_xpath)
if not suggestions:
2014-01-30 01:33:24 +00:00
return results
2014-09-01 14:17:29 +00:00
# parse suggestion
for suggestion in suggestions:
2014-09-01 14:17:29 +00:00
# append suggestion
2014-01-30 01:33:24 +00:00
results.append({'suggestion': extract_text(suggestion)})
2014-09-01 14:17:29 +00:00
# return results
2014-01-30 01:33:24 +00:00
return results
# get supported languages from their site
def _fetch_supported_languages(resp):
supported_languages = []
dom = html.fromstring(resp.text)
options = eval_xpath(dom, '//div[@id="yschlang"]/span/label/input')
for option in options:
code_parts = eval_xpath(option, './@value')[0][5:].split('_')
if len(code_parts) == 2:
code = code_parts[0] + '-' + code_parts[1].upper()
else:
code = code_parts[0]
supported_languages.append(code)
return supported_languages