2016-09-06 14:36:04 +00:00
|
|
|
"""
|
|
|
|
Dictzone
|
|
|
|
|
|
|
|
@website https://dictzone.com/
|
|
|
|
@provide-api no
|
|
|
|
@using-api no
|
|
|
|
@results HTML (using search portal)
|
|
|
|
@stable no (HTML can change)
|
|
|
|
@parse url, title, content
|
|
|
|
"""
|
|
|
|
|
2016-09-06 09:47:27 +00:00
|
|
|
import re
|
2020-08-06 15:42:46 +00:00
|
|
|
from urllib.parse import urljoin
|
2016-09-06 09:47:27 +00:00
|
|
|
from lxml import html
|
2019-11-15 08:31:37 +00:00
|
|
|
from searx.utils import is_valid_lang, eval_xpath
|
2016-09-06 09:47:27 +00:00
|
|
|
|
2016-09-06 10:34:20 +00:00
|
|
|
categories = ['general']
|
2020-08-06 15:42:46 +00:00
|
|
|
url = 'https://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}'
|
2016-09-06 09:47:27 +00:00
|
|
|
weight = 100
|
|
|
|
|
2016-11-30 17:43:03 +00:00
|
|
|
parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) ([^ ]+)$', re.I)
|
2016-09-06 09:47:27 +00:00
|
|
|
results_xpath = './/table[@id="r"]/tr'
|
|
|
|
|
|
|
|
|
|
|
|
def request(query, params):
|
2016-11-30 17:43:03 +00:00
|
|
|
m = parser_re.match(query)
|
2016-09-06 09:47:27 +00:00
|
|
|
if not m:
|
|
|
|
return params
|
|
|
|
|
|
|
|
from_lang, to_lang, query = m.groups()
|
|
|
|
|
2016-09-06 12:12:46 +00:00
|
|
|
from_lang = is_valid_lang(from_lang)
|
|
|
|
to_lang = is_valid_lang(to_lang)
|
2016-09-06 09:47:27 +00:00
|
|
|
|
2016-09-06 12:12:46 +00:00
|
|
|
if not from_lang or not to_lang:
|
2016-09-06 10:46:18 +00:00
|
|
|
return params
|
2016-09-06 09:47:27 +00:00
|
|
|
|
2016-09-06 14:43:48 +00:00
|
|
|
params['url'] = url.format(from_lang=from_lang[2],
|
|
|
|
to_lang=to_lang[2],
|
2020-08-06 15:42:46 +00:00
|
|
|
query=query.decode())
|
2016-09-06 09:47:27 +00:00
|
|
|
|
|
|
|
return params
|
|
|
|
|
2016-09-06 12:24:08 +00:00
|
|
|
|
2016-09-06 09:47:27 +00:00
|
|
|
def response(resp):
|
|
|
|
results = []
|
|
|
|
|
|
|
|
dom = html.fromstring(resp.text)
|
|
|
|
|
2019-11-15 08:31:37 +00:00
|
|
|
for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]):
|
2016-09-06 09:47:27 +00:00
|
|
|
try:
|
2019-11-15 08:31:37 +00:00
|
|
|
from_result, to_results_raw = eval_xpath(result, './td')
|
2016-09-06 09:47:27 +00:00
|
|
|
except:
|
|
|
|
continue
|
|
|
|
|
|
|
|
to_results = []
|
2019-11-15 08:31:37 +00:00
|
|
|
for to_result in eval_xpath(to_results_raw, './p/a'):
|
2016-09-06 09:47:27 +00:00
|
|
|
t = to_result.text_content()
|
|
|
|
if t.strip():
|
|
|
|
to_results.append(to_result.text_content())
|
|
|
|
|
|
|
|
results.append({
|
2016-09-06 10:37:26 +00:00
|
|
|
'url': urljoin(resp.url, '?%d' % k),
|
2016-12-09 10:44:24 +00:00
|
|
|
'title': from_result.text_content(),
|
|
|
|
'content': '; '.join(to_results)
|
2016-09-06 09:47:27 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return results
|