From 906320d271d4c8e8aa72b240457ee343edcd4312 Mon Sep 17 00:00:00 2001 From: Robby O'Connor Date: Sun, 18 Aug 2019 18:16:12 -0400 Subject: [PATCH 01/10] add --no-cache to package installs Add --no-cache to apk upgrade and apk add calls. --- Dockerfile | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index fb4f2cb67..33535837b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,8 +25,8 @@ RUN addgroup -g ${SEARX_GID} searx && \ COPY requirements.txt ./requirements.txt -RUN apk -U upgrade \ - && apk add -t build-dependencies \ +RUN apk upgrade --no-cache \ + && apk add --no-cache -t build-dependencies \ build-base \ py3-setuptools \ python3-dev \ @@ -36,7 +36,7 @@ RUN apk -U upgrade \ openssl-dev \ tar \ git \ - && apk add \ + && apk add --no-cache \ ca-certificates \ su-exec \ python3 \ @@ -48,8 +48,7 @@ RUN apk -U upgrade \ uwsgi-python3 \ && pip3 install --upgrade pip \ && pip3 install --no-cache -r requirements.txt \ - && apk del build-dependencies \ - && rm -f /var/cache/apk/* + && apk del build-dependencies COPY --chown=searx:searx . . From dae5bcf253235c7551d6120b881bc07ae3b7abcc Mon Sep 17 00:00:00 2001 From: Robert O'Connor Date: Sun, 18 Aug 2019 18:26:40 -0400 Subject: [PATCH 02/10] some other minor tweaks to move stuff very unlikely to change to the top in efforts to delay cache invalidation --- Dockerfile | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Dockerfile b/Dockerfile index 33535837b..c0e6cbc88 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,16 @@ FROM alpine:3.10 +ENTRYPOINT ["/sbin/tini","--","/usr/local/searx/dockerfiles/docker-entrypoint.sh"] +EXPOSE 8080 +VOLUME /etc/searx +VOLUME /var/log/uwsgi +RUN addgroup -g ${SEARX_GID} searx && \ + adduser -u ${SEARX_UID} -D -h /usr/local/searx -s /bin/sh -G searx searx -ARG VERSION_GITCOMMIT=unknow -ARG SEARX_GIT_VERSION=unknow +ARG VERSION_GITCOMMIT=unknown +ARG SEARX_GIT_VERSION=unknown -ARG SEARX_GID=1000 -ARG SEARX_UID=1000 +ARG SEARX_GID=977 +ARG SEARX_UID=977 ARG TIMESTAMP_SETTINGS=0 ARG TIMESTAMP_UWSGI=0 @@ -14,14 +20,9 @@ ARG LABEL_VCS_URL= ENV BASE_URL= \ MORTY_KEY= \ MORTY_URL= -EXPOSE 8080 -VOLUME /etc/searx -VOLUME /var/log/uwsgi WORKDIR /usr/local/searx -RUN addgroup -g ${SEARX_GID} searx && \ - adduser -u ${SEARX_UID} -D -h /usr/local/searx -s /bin/sh -G searx searx COPY requirements.txt ./requirements.txt @@ -59,7 +60,6 @@ RUN su searx -c "/usr/bin/python3 -m compileall -q searx"; \ echo "VERSION_STRING = VERSION_STRING + \"-$VERSION_GITCOMMIT\"" >> /usr/local/searx/searx/version.py; \ fi -ENTRYPOINT ["/sbin/tini","--","/usr/local/searx/dockerfiles/docker-entrypoint.sh"] # Keep this argument at the end since it change each time ARG LABEL_DATE= From 86d1a4931fcdef88312b330bffa908afe5a70abf Mon Sep 17 00:00:00 2001 From: Robert O'Connor Date: Sun, 18 Aug 2019 18:29:02 -0400 Subject: [PATCH 03/10] .. --- Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index c0e6cbc88..f336d4b3d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,8 +3,6 @@ ENTRYPOINT ["/sbin/tini","--","/usr/local/searx/dockerfiles/docker-entrypoint.sh EXPOSE 8080 VOLUME /etc/searx VOLUME /var/log/uwsgi -RUN addgroup -g ${SEARX_GID} searx && \ - adduser -u ${SEARX_UID} -D -h /usr/local/searx -s /bin/sh -G searx searx ARG VERSION_GITCOMMIT=unknown ARG SEARX_GIT_VERSION=unknown @@ -12,6 +10,9 @@ ARG SEARX_GIT_VERSION=unknown ARG SEARX_GID=977 ARG SEARX_UID=977 +RUN addgroup -g ${SEARX_GID} searx && \ + adduser -u ${SEARX_UID} -D -h /usr/local/searx -s /bin/sh -G searx searx + ARG TIMESTAMP_SETTINGS=0 ARG TIMESTAMP_UWSGI=0 ARG LABEL_VCS_REF= From 3ea5ea8faebab3f5cecc05757dec4eced82af900 Mon Sep 17 00:00:00 2001 From: x250 <49014564+x250@users.noreply.github.com> Date: Sun, 3 Nov 2019 13:21:41 -0500 Subject: [PATCH 04/10] Mojeek engine hotfix --- searx/settings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/searx/settings.yml b/searx/settings.yml index 835fbe5f6..0384052c1 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -744,7 +744,7 @@ engines: title_xpath : ./h2 content_xpath : ./p[@class="s"] suggestion_xpath : /html/body//div[@class="top-info"]/p[@class="top-info spell"]/a - first_page_num : 1 + first_page_num : 0 page_size : 10 disabled : True From 85b37233458c21b775bf98568c0a5c9260aa14fe Mon Sep 17 00:00:00 2001 From: Dalf Date: Fri, 15 Nov 2019 09:31:37 +0100 Subject: [PATCH 05/10] [mod] speed optimization compile XPath only once avoid redundant call to urlparse get_locale(webapp.py): avoid useless call to request.accept_languages.best_match --- searx/engines/bing.py | 20 ++++++++++---------- searx/engines/dictzone.py | 8 ++++---- searx/engines/doku.py | 15 ++++++++------- searx/engines/duckduckgo.py | 10 +++++----- searx/engines/duden.py | 15 ++++++++------- searx/engines/gigablast.py | 5 +++-- searx/engines/google.py | 32 ++++++++++++++++---------------- searx/engines/startpage.py | 9 +++++---- searx/engines/wikidata.py | 18 +----------------- searx/engines/xpath.py | 20 ++++++++++---------- searx/engines/yahoo.py | 18 +++++++++--------- searx/results.py | 8 +++++--- searx/utils.py | 15 +++++++++++++++ searx/webapp.py | 16 +++++++--------- 14 files changed, 106 insertions(+), 103 deletions(-) diff --git a/searx/engines/bing.py b/searx/engines/bing.py index 1e614867b..ed0b87dbd 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -18,7 +18,7 @@ from lxml import html from searx import logger, utils from searx.engines.xpath import extract_text from searx.url_utils import urlencode -from searx.utils import match_language, gen_useragent +from searx.utils import match_language, gen_useragent, eval_xpath logger = logger.getChild('bing engine') @@ -65,11 +65,11 @@ def response(resp): dom = html.fromstring(resp.text) # parse results - for result in dom.xpath('//div[@class="sa_cc"]'): - link = result.xpath('.//h3/a')[0] + for result in eval_xpath(dom, '//div[@class="sa_cc"]'): + link = eval_xpath(result, './/h3/a')[0] url = link.attrib.get('href') title = extract_text(link) - content = extract_text(result.xpath('.//p')) + content = extract_text(eval_xpath(result, './/p')) # append result results.append({'url': url, @@ -77,11 +77,11 @@ def response(resp): 'content': content}) # parse results again if nothing is found yet - for result in dom.xpath('//li[@class="b_algo"]'): - link = result.xpath('.//h2/a')[0] + for result in eval_xpath(dom, '//li[@class="b_algo"]'): + link = eval_xpath(result, './/h2/a')[0] url = link.attrib.get('href') title = extract_text(link) - content = extract_text(result.xpath('.//p')) + content = extract_text(eval_xpath(result, './/p')) # append result results.append({'url': url, @@ -89,7 +89,7 @@ def response(resp): 'content': content}) try: - result_len_container = "".join(dom.xpath('//span[@class="sb_count"]/text()')) + result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]/text()')) result_len_container = utils.to_string(result_len_container) if "-" in result_len_container: # Remove the part "from-to" for paginated request ... @@ -113,9 +113,9 @@ def response(resp): def _fetch_supported_languages(resp): supported_languages = [] dom = html.fromstring(resp.text) - options = dom.xpath('//div[@id="limit-languages"]//input') + options = eval_xpath(dom, '//div[@id="limit-languages"]//input') for option in options: - code = option.xpath('./@id')[0].replace('_', '-') + code = eval_xpath(option, './@id')[0].replace('_', '-') if code == 'nb': code = 'no' supported_languages.append(code) diff --git a/searx/engines/dictzone.py b/searx/engines/dictzone.py index 09db048cc..423af0971 100644 --- a/searx/engines/dictzone.py +++ b/searx/engines/dictzone.py @@ -11,7 +11,7 @@ import re from lxml import html -from searx.utils import is_valid_lang +from searx.utils import is_valid_lang, eval_xpath from searx.url_utils import urljoin categories = ['general'] @@ -47,14 +47,14 @@ def response(resp): dom = html.fromstring(resp.text) - for k, result in enumerate(dom.xpath(results_xpath)[1:]): + for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]): try: - from_result, to_results_raw = result.xpath('./td') + from_result, to_results_raw = eval_xpath(result, './td') except: continue to_results = [] - for to_result in to_results_raw.xpath('./p/a'): + for to_result in eval_xpath(to_results_raw, './p/a'): t = to_result.text_content() if t.strip(): to_results.append(to_result.text_content()) diff --git a/searx/engines/doku.py b/searx/engines/doku.py index a391be444..d20e66026 100644 --- a/searx/engines/doku.py +++ b/searx/engines/doku.py @@ -11,6 +11,7 @@ from lxml.html import fromstring from searx.engines.xpath import extract_text +from searx.utils import eval_xpath from searx.url_utils import urlencode # engine dependent config @@ -45,16 +46,16 @@ def response(resp): # parse results # Quickhits - for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'): + for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'): try: - res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] + res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1] except: continue if not res_url: continue - title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) + title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title')) # append result results.append({'title': title, @@ -62,13 +63,13 @@ def response(resp): 'url': base_url + res_url}) # Search results - for r in doc.xpath('//dl[@class="search_results"]/*'): + for r in eval_xpath(doc, '//dl[@class="search_results"]/*'): try: if r.tag == "dt": - res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] - title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) + res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1] + title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title')) elif r.tag == "dd": - content = extract_text(r.xpath('.')) + content = extract_text(eval_xpath(r, '.')) # append result results.append({'title': title, diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index e77ef0126..0d2c0af2d 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -18,7 +18,7 @@ from json import loads from searx.engines.xpath import extract_text from searx.poolrequests import get from searx.url_utils import urlencode -from searx.utils import match_language +from searx.utils import match_language, eval_xpath # engine dependent config categories = ['general'] @@ -106,19 +106,19 @@ def response(resp): doc = fromstring(resp.text) # parse results - for i, r in enumerate(doc.xpath(result_xpath)): + for i, r in enumerate(eval_xpath(doc, result_xpath)): if i >= 30: break try: - res_url = r.xpath(url_xpath)[-1] + res_url = eval_xpath(r, url_xpath)[-1] except: continue if not res_url: continue - title = extract_text(r.xpath(title_xpath)) - content = extract_text(r.xpath(content_xpath)) + title = extract_text(eval_xpath(r, title_xpath)) + content = extract_text(eval_xpath(r, content_xpath)) # append result results.append({'title': title, diff --git a/searx/engines/duden.py b/searx/engines/duden.py index 444f18c1f..cf2f1a278 100644 --- a/searx/engines/duden.py +++ b/searx/engines/duden.py @@ -11,6 +11,7 @@ from lxml import html, etree import re from searx.engines.xpath import extract_text +from searx.utils import eval_xpath from searx.url_utils import quote, urljoin from searx import logger @@ -52,9 +53,9 @@ def response(resp): dom = html.fromstring(resp.text) try: - number_of_results_string = re.sub('[^0-9]', '', dom.xpath( - '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0] - ) + number_of_results_string =\ + re.sub('[^0-9]', '', + eval_xpath(dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0]) results.append({'number_of_results': int(number_of_results_string)}) @@ -62,12 +63,12 @@ def response(resp): logger.debug("Couldn't read number of results.") pass - for result in dom.xpath('//section[not(contains(@class, "essay"))]'): + for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'): try: - url = result.xpath('.//h2/a')[0].get('href') + url = eval_xpath(result, './/h2/a')[0].get('href') url = urljoin(base_url, url) - title = result.xpath('string(.//h2/a)').strip() - content = extract_text(result.xpath('.//p')) + title = eval_xpath(result, 'string(.//h2/a)').strip() + content = extract_text(eval_xpath(result, './/p')) # append result results.append({'url': url, 'title': title, diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py index 6b0402233..a84f3f69d 100644 --- a/searx/engines/gigablast.py +++ b/searx/engines/gigablast.py @@ -15,6 +15,7 @@ from json import loads from time import time from lxml.html import fromstring from searx.url_utils import urlencode +from searx.utils import eval_xpath # engine dependent config categories = ['general'] @@ -99,9 +100,9 @@ def response(resp): def _fetch_supported_languages(resp): supported_languages = [] dom = fromstring(resp.text) - links = dom.xpath('//span[@id="menu2"]/a') + links = eval_xpath(dom, '//span[@id="menu2"]/a') for link in links: - href = link.xpath('./@href')[0].split('lang%3A') + href = eval_xpath(link, './@href')[0].split('lang%3A') if len(href) == 2: code = href[1].split('_') if len(code) == 2: diff --git a/searx/engines/google.py b/searx/engines/google.py index 03f0523e7..a82b1f1f7 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -14,7 +14,7 @@ from lxml import html, etree from searx.engines.xpath import extract_text, extract_url from searx import logger from searx.url_utils import urlencode, urlparse, parse_qsl -from searx.utils import match_language +from searx.utils import match_language, eval_xpath logger = logger.getChild('google engine') @@ -156,7 +156,7 @@ def parse_url(url_string, google_hostname): # returns extract_text on the first result selected by the xpath or None def extract_text_from_dom(result, xpath): - r = result.xpath(xpath) + r = eval_xpath(result, xpath) if len(r) > 0: return extract_text(r[0]) return None @@ -226,21 +226,21 @@ def response(resp): # convert the text to dom dom = html.fromstring(resp.text) - instant_answer = dom.xpath('//div[@id="_vBb"]//text()') + instant_answer = eval_xpath(dom, '//div[@id="_vBb"]//text()') if instant_answer: results.append({'answer': u' '.join(instant_answer)}) try: - results_num = int(dom.xpath('//div[@id="resultStats"]//text()')[0] + results_num = int(eval_xpath(dom, '//div[@id="resultStats"]//text()')[0] .split()[1].replace(',', '')) results.append({'number_of_results': results_num}) except: pass # parse results - for result in dom.xpath(results_xpath): + for result in eval_xpath(dom, results_xpath): try: - title = extract_text(result.xpath(title_xpath)[0]) - url = parse_url(extract_url(result.xpath(url_xpath), google_url), google_hostname) + title = extract_text(eval_xpath(result, title_xpath)[0]) + url = parse_url(extract_url(eval_xpath(result, url_xpath), google_url), google_hostname) parsed_url = urlparse(url, google_hostname) # map result @@ -249,7 +249,7 @@ def response(resp): continue # if parsed_url.path.startswith(maps_path) or parsed_url.netloc.startswith(map_hostname_start): # print "yooooo"*30 - # x = result.xpath(map_near) + # x = eval_xpath(result, map_near) # if len(x) > 0: # # map : near the location # results = results + parse_map_near(parsed_url, x, google_hostname) @@ -286,11 +286,11 @@ def response(resp): continue # parse suggestion - for suggestion in dom.xpath(suggestion_xpath): + for suggestion in eval_xpath(dom, suggestion_xpath): # append suggestion results.append({'suggestion': extract_text(suggestion)}) - for correction in dom.xpath(spelling_suggestion_xpath): + for correction in eval_xpath(dom, spelling_suggestion_xpath): results.append({'correction': extract_text(correction)}) # return results @@ -299,9 +299,9 @@ def response(resp): def parse_images(result, google_hostname): results = [] - for image in result.xpath(images_xpath): - url = parse_url(extract_text(image.xpath(image_url_xpath)[0]), google_hostname) - img_src = extract_text(image.xpath(image_img_src_xpath)[0]) + for image in eval_xpath(result, images_xpath): + url = parse_url(extract_text(eval_xpath(image, image_url_xpath)[0]), google_hostname) + img_src = extract_text(eval_xpath(image, image_img_src_xpath)[0]) # append result results.append({'url': url, @@ -388,10 +388,10 @@ def attributes_to_html(attributes): def _fetch_supported_languages(resp): supported_languages = {} dom = html.fromstring(resp.text) - options = dom.xpath('//*[@id="langSec"]//input[@name="lr"]') + options = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lr"]') for option in options: - code = option.xpath('./@value')[0].split('_')[-1] - name = option.xpath('./@data-name')[0].title() + code = eval_xpath(option, './@value')[0].split('_')[-1] + name = eval_xpath(option, './@data-name')[0].title() supported_languages[code] = {"name": name} return supported_languages diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py index 0f0ec6e18..76567396f 100644 --- a/searx/engines/startpage.py +++ b/searx/engines/startpage.py @@ -16,6 +16,7 @@ from datetime import datetime, timedelta import re from searx.engines.xpath import extract_text from searx.languages import language_codes +from searx.utils import eval_xpath # engine dependent config categories = ['general'] @@ -70,8 +71,8 @@ def response(resp): dom = html.fromstring(resp.text) # parse results - for result in dom.xpath(results_xpath): - links = result.xpath(link_xpath) + for result in eval_xpath(dom, results_xpath): + links = eval_xpath(result, link_xpath) if not links: continue link = links[0] @@ -87,8 +88,8 @@ def response(resp): title = extract_text(link) - if result.xpath(content_xpath): - content = extract_text(result.xpath(content_xpath)) + if eval_xpath(result, content_xpath): + content = extract_text(eval_xpath(result, content_xpath)) else: content = '' diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py index 5ea2b9958..e913b3915 100644 --- a/searx/engines/wikidata.py +++ b/searx/engines/wikidata.py @@ -16,7 +16,7 @@ from searx.poolrequests import get from searx.engines.xpath import extract_text from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url from searx.url_utils import urlencode -from searx.utils import match_language +from searx.utils import match_language, eval_xpath from json import loads from lxml.html import fromstring @@ -57,22 +57,6 @@ language_fallback_xpath = '//sup[contains(@class,"wb-language-fallback-indicator calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]' media_xpath = value_xpath + '//div[contains(@class,"commons-media-caption")]//a' -# xpath_cache -xpath_cache = {} - - -def get_xpath(xpath_str): - result = xpath_cache.get(xpath_str, None) - if not result: - result = etree.XPath(xpath_str) - xpath_cache[xpath_str] = result - return result - - -def eval_xpath(element, xpath_str): - xpath = get_xpath(xpath_str) - return xpath(element) - def get_id_cache(result): id_cache = {} diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py index 61494ce4e..b75896cc7 100644 --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -1,6 +1,6 @@ from lxml import html from lxml.etree import _ElementStringResult, _ElementUnicodeResult -from searx.utils import html_to_text +from searx.utils import html_to_text, eval_xpath from searx.url_utils import unquote, urlencode, urljoin, urlparse search_url = None @@ -104,15 +104,15 @@ def response(resp): results = [] dom = html.fromstring(resp.text) if results_xpath: - for result in dom.xpath(results_xpath): - url = extract_url(result.xpath(url_xpath), search_url) - title = extract_text(result.xpath(title_xpath)) - content = extract_text(result.xpath(content_xpath)) + for result in eval_xpath(dom, results_xpath): + url = extract_url(eval_xpath(result, url_xpath), search_url) + title = extract_text(eval_xpath(result, title_xpath)) + content = extract_text(eval_xpath(result, content_xpath)) tmp_result = {'url': url, 'title': title, 'content': content} # add thumbnail if available if thumbnail_xpath: - thumbnail_xpath_result = result.xpath(thumbnail_xpath) + thumbnail_xpath_result = eval_xpath(result, thumbnail_xpath) if len(thumbnail_xpath_result) > 0: tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url) @@ -120,14 +120,14 @@ def response(resp): else: for url, title, content in zip( (extract_url(x, search_url) for - x in dom.xpath(url_xpath)), - map(extract_text, dom.xpath(title_xpath)), - map(extract_text, dom.xpath(content_xpath)) + x in eval_xpath(dom, url_xpath)), + map(extract_text, eval_xpath(dom, title_xpath)), + map(extract_text, eval_xpath(dom, content_xpath)) ): results.append({'url': url, 'title': title, 'content': content}) if not suggestion_xpath: return results - for suggestion in dom.xpath(suggestion_xpath): + for suggestion in eval_xpath(dom, suggestion_xpath): results.append({'suggestion': extract_text(suggestion)}) return results diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py index 73b78bcf7..36c1a11f8 100644 --- a/searx/engines/yahoo.py +++ b/searx/engines/yahoo.py @@ -14,7 +14,7 @@ from lxml import html from searx.engines.xpath import extract_text, extract_url from searx.url_utils import unquote, urlencode -from searx.utils import match_language +from searx.utils import match_language, eval_xpath # engine dependent config categories = ['general'] @@ -109,21 +109,21 @@ def response(resp): dom = html.fromstring(resp.text) try: - results_num = int(dom.xpath('//div[@class="compPagination"]/span[last()]/text()')[0] + results_num = int(eval_xpath(dom, '//div[@class="compPagination"]/span[last()]/text()')[0] .split()[0].replace(',', '')) results.append({'number_of_results': results_num}) except: pass # parse results - for result in dom.xpath(results_xpath): + for result in eval_xpath(dom, results_xpath): try: - url = parse_url(extract_url(result.xpath(url_xpath), search_url)) - title = extract_text(result.xpath(title_xpath)[0]) + url = parse_url(extract_url(eval_xpath(result, url_xpath), search_url)) + title = extract_text(eval_xpath(result, title_xpath)[0]) except: continue - content = extract_text(result.xpath(content_xpath)[0]) + content = extract_text(eval_xpath(result, content_xpath)[0]) # append result results.append({'url': url, @@ -131,7 +131,7 @@ def response(resp): 'content': content}) # if no suggestion found, return results - suggestions = dom.xpath(suggestion_xpath) + suggestions = eval_xpath(dom, suggestion_xpath) if not suggestions: return results @@ -148,9 +148,9 @@ def response(resp): def _fetch_supported_languages(resp): supported_languages = [] dom = html.fromstring(resp.text) - options = dom.xpath('//div[@id="yschlang"]/span/label/input') + options = eval_xpath(dom, '//div[@id="yschlang"]/span/label/input') for option in options: - code_parts = option.xpath('./@value')[0][5:].split('_') + code_parts = eval_xpath(option, './@value')[0][5:].split('_') if len(code_parts) == 2: code = code_parts[0] + '-' + code_parts[1].upper() else: diff --git a/searx/results.py b/searx/results.py index a127024c8..3b1e4bd62 100644 --- a/searx/results.py +++ b/searx/results.py @@ -67,8 +67,9 @@ def merge_two_infoboxes(infobox1, infobox2): for url2 in infobox2.get('urls', []): unique_url = True - for url1 in infobox1.get('urls', []): - if compare_urls(urlparse(url1.get('url', '')), urlparse(url2.get('url', ''))): + parsed_url2 = urlparse(url2.get('url', '')) + for url1 in urls1: + if compare_urls(urlparse(url1.get('url', '')), parsed_url2): unique_url = False break if unique_url: @@ -188,8 +189,9 @@ class ResultContainer(object): add_infobox = True infobox_id = infobox.get('id', None) if infobox_id is not None: + parsed_url_infobox_id = urlparse(infobox_id) for existingIndex in self.infoboxes: - if compare_urls(urlparse(existingIndex.get('id', '')), urlparse(infobox_id)): + if compare_urls(urlparse(existingIndex.get('id', '')), parsed_url_infobox_id): merge_two_infoboxes(existingIndex, infobox) add_infobox = False diff --git a/searx/utils.py b/searx/utils.py index e61a134f7..5ea9dc89c 100644 --- a/searx/utils.py +++ b/searx/utils.py @@ -13,6 +13,7 @@ from numbers import Number from os.path import splitext, join from io import open from random import choice +from lxml.etree import XPath import sys import json @@ -51,6 +52,7 @@ ecma_unescape2_re = re.compile(r'%([0-9a-fA-F]{2})', re.UNICODE) useragents = json.loads(open(os.path.dirname(os.path.realpath(__file__)) + "/data/useragents.json", 'r', encoding='utf-8').read()) +xpath_cache = dict() lang_to_lc_cache = dict() @@ -450,3 +452,16 @@ def get_engine_from_settings(name): return engine return {} + + +def get_xpath(xpath_str): + result = xpath_cache.get(xpath_str, None) + if result is None: + result = XPath(xpath_str) + xpath_cache[xpath_str] = result + return result + + +def eval_xpath(element, xpath_str): + xpath = get_xpath(xpath_str) + return xpath(element) diff --git a/searx/webapp.py b/searx/webapp.py index 3bb29140a..00895d96c 100644 --- a/searx/webapp.py +++ b/searx/webapp.py @@ -154,20 +154,18 @@ outgoing_proxies = settings['outgoing'].get('proxies') or None @babel.localeselector def get_locale(): - locale = request.accept_languages.best_match(settings['locales'].keys()) - - if request.preferences.get_value('locale') != '': - locale = request.preferences.get_value('locale') + if 'locale' in request.form\ + and request.form['locale'] in settings['locales']: + return request.form['locale'] if 'locale' in request.args\ and request.args['locale'] in settings['locales']: - locale = request.args['locale'] + return request.args['locale'] - if 'locale' in request.form\ - and request.form['locale'] in settings['locales']: - locale = request.form['locale'] + if request.preferences.get_value('locale') != '': + return request.preferences.get_value('locale') - return locale + return request.accept_languages.best_match(settings['locales'].keys()) # code-highlighter From 9299355570e32c4d24d7274d716eca1a93119d13 Mon Sep 17 00:00:00 2001 From: Marc Abonce Seguin Date: Sun, 24 Nov 2019 20:21:37 -0700 Subject: [PATCH 06/10] add seedpeer again --- searx/engines/seedpeer.py | 78 +++++++++++++++++++ searx/settings.yml | 5 ++ .../courgette/result_templates/torrent.html | 2 +- .../legacy/result_templates/torrent.html | 2 +- .../oscar/result_templates/torrent.html | 2 +- .../simple/result_templates/torrent.html | 2 +- tests/unit/engines/test_seedpeer.py | 66 ++++++++++++++++ 7 files changed, 153 insertions(+), 4 deletions(-) create mode 100644 searx/engines/seedpeer.py create mode 100644 tests/unit/engines/test_seedpeer.py diff --git a/searx/engines/seedpeer.py b/searx/engines/seedpeer.py new file mode 100644 index 000000000..f9b1f99c8 --- /dev/null +++ b/searx/engines/seedpeer.py @@ -0,0 +1,78 @@ +# Seedpeer (Videos, Music, Files) +# +# @website https://seedpeer.me +# @provide-api no (nothing found) +# +# @using-api no +# @results HTML (using search portal) +# @stable yes (HTML can change) +# @parse url, title, content, seed, leech, magnetlink + +from lxml import html +from json import loads +from operator import itemgetter +from searx.url_utils import quote, urljoin +from searx.engines.xpath import extract_text + + +url = 'https://seedpeer.me/' +search_url = url + 'search/{search_term}?page={page_no}' +torrent_file_url = url + 'torrent/{torrent_hash}' + +# specific xpath variables +script_xpath = '//script[@type="text/javascript"][not(@src)]' +torrent_xpath = '(//table)[2]/tbody/tr' +link_xpath = '(./td)[1]/a/@href' +age_xpath = '(./td)[2]' +size_xpath = '(./td)[3]' + + +# do search-request +def request(query, params): + params['url'] = search_url.format(search_term=quote(query), + page_no=params['pageno']) + return params + + +# get response from search-request +def response(resp): + results = [] + dom = html.fromstring(resp.text) + result_rows = dom.xpath(torrent_xpath) + + try: + script_element = dom.xpath(script_xpath)[0] + json_string = script_element.text[script_element.text.find('{'):] + torrents_json = loads(json_string) + except: + return [] + + # parse results + for torrent_row, torrent_json in zip(result_rows, torrents_json['data']['list']): + title = torrent_json['name'] + seed = int(torrent_json['seeds']) + leech = int(torrent_json['peers']) + size = int(torrent_json['size']) + torrent_hash = torrent_json['hash'] + + torrentfile = torrent_file_url.format(torrent_hash=torrent_hash) + magnetlink = 'magnet:?xt=urn:btih:{}'.format(torrent_hash) + + age = extract_text(torrent_row.xpath(age_xpath)) + link = torrent_row.xpath(link_xpath)[0] + + href = urljoin(url, link) + + # append result + results.append({'url': href, + 'title': title, + 'content': age, + 'seed': seed, + 'leech': leech, + 'filesize': size, + 'torrentfile': torrentfile, + 'magnetlink': magnetlink, + 'template': 'torrent.html'}) + + # return results sorted by seeder + return sorted(results, key=itemgetter('seed'), reverse=True) diff --git a/searx/settings.yml b/searx/settings.yml index 835fbe5f6..25d90d4db 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -748,6 +748,11 @@ engines: page_size : 10 disabled : True + - name : seedpeer + shortcut : speu + engine : seedpeer + categories: files, music, videos + # - name : yacy # engine : yacy # shortcut : ya diff --git a/searx/templates/courgette/result_templates/torrent.html b/searx/templates/courgette/result_templates/torrent.html index d659064d9..7f94a221e 100644 --- a/searx/templates/courgette/result_templates/torrent.html +++ b/searx/templates/courgette/result_templates/torrent.html @@ -4,7 +4,7 @@ {% endif %}

{{ result.title|safe }}

{% if result.content %}{{ result.content|safe }}
{% endif %} - {% if result.seed %}{{ _('Seeder') }} : {{ result.seed }}, {{ _('Leecher') }} : {{ result.leech }}
{% endif %} + {% if result.seed is defined %}{{ _('Seeder') }} : {{ result.seed }}, {{ _('Leecher') }} : {{ result.leech }}
{% endif %} {% if result.magnetlink %}{{ _('magnet link') }}{% endif %} {% if result.torrentfile %}{{ _('torrent file') }}{% endif %} diff --git a/searx/templates/legacy/result_templates/torrent.html b/searx/templates/legacy/result_templates/torrent.html index 7a8ac33de..068e05373 100644 --- a/searx/templates/legacy/result_templates/torrent.html +++ b/searx/templates/legacy/result_templates/torrent.html @@ -8,6 +8,6 @@

{% if result.magnetlink %}{{ _('magnet link') }}{% endif %} {% if result.torrentfile %}{{ _('torrent file') }}{% endif %} - - {% if result.seed %}{{ _('Seeder') }} : {{ result.seed }}, {{ _('Leecher') }} : {{ result.leech }}{% endif %} + {% if result.seed is defined %}{{ _('Seeder') }} : {{ result.seed }}, {{ _('Leecher') }} : {{ result.leech }}{% endif %}

diff --git a/searx/templates/oscar/result_templates/torrent.html b/searx/templates/oscar/result_templates/torrent.html index f5ea415e2..089367e36 100644 --- a/searx/templates/oscar/result_templates/torrent.html +++ b/searx/templates/oscar/result_templates/torrent.html @@ -3,7 +3,7 @@ {{ result_header(result, favicons) }} {{ result_sub_header(result) }} -{% if result.seed %}

{{ icon('transfer') }} {{ _('Seeder') }} {{ result.seed }} • {{ _('Leecher') }} {{ result.leech }}{% endif %} +{% if result.seed is defined %}

{{ icon('transfer') }} {{ _('Seeder') }} {{ result.seed }} • {{ _('Leecher') }} {{ result.leech }}{% endif %} {% if result.filesize %}
{{ icon('floppy-disk') }} {{ _('Filesize') }} {% if result.filesize < 1024 %}{{ result.filesize }} {{ _('Bytes') }} diff --git a/searx/templates/simple/result_templates/torrent.html b/searx/templates/simple/result_templates/torrent.html index 3c7fd15e8..71c775bc9 100644 --- a/searx/templates/simple/result_templates/torrent.html +++ b/searx/templates/simple/result_templates/torrent.html @@ -6,7 +6,7 @@ {% if result.magnetlink %}

{% endif %} {% if result.torrentfile %}{% endif %} -{% if result.seed %}

• {{ icon('arrow-swap') }} {{ _('Seeder') }} {{ result.seed }} • {{ _('Leecher') }} {{ result.leech }}

{% endif %} +{% if result.seed is defined %}

• {{ icon('arrow-swap') }} {{ _('Seeder') }} {{ result.seed }} • {{ _('Leecher') }} {{ result.leech }}

{% endif %} {%- if result.filesize %}

{{ icon('floppy-disk') }} {{ _('Filesize') }} {%- if result.filesize < 1024 %}{{ result.filesize }} {{ _('Bytes') }} diff --git a/tests/unit/engines/test_seedpeer.py b/tests/unit/engines/test_seedpeer.py new file mode 100644 index 000000000..2057c1cb1 --- /dev/null +++ b/tests/unit/engines/test_seedpeer.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +from collections import defaultdict +import mock +from searx.engines import seedpeer +from searx.testing import SearxTestCase + + +class TestBtdiggEngine(SearxTestCase): + + def test_request(self): + query = 'test_query' + dicto = defaultdict(dict) + dicto['pageno'] = 1 + params = seedpeer.request(query, dicto) + self.assertIn('url', params) + self.assertIn(query, params['url']) + self.assertIn('seedpeer', params['url']) + + def test_response(self): + self.assertRaises(AttributeError, seedpeer.response, None) + self.assertRaises(AttributeError, seedpeer.response, []) + self.assertRaises(AttributeError, seedpeer.response, '') + self.assertRaises(AttributeError, seedpeer.response, '[]') + + response = mock.Mock(text='') + self.assertEqual(seedpeer.response(response), []) + + html = u""" + + + + + + + +
+ + + + + + + + + + + + +
Title1 year1 KB1020
+ + + """ + response = mock.Mock(text=html) + results = seedpeer.response(response) + self.assertEqual(type(results), list) + self.assertEqual(len(results), 1) + self.assertEqual(results[0]['title'], 'Title') + self.assertEqual(results[0]['url'], 'https://seedpeer.me/link') + self.assertEqual(results[0]['seed'], 10) + self.assertEqual(results[0]['leech'], 20) + self.assertEqual(results[0]['filesize'], 1024) + self.assertEqual(results[0]['torrentfile'], 'https://seedpeer.me/torrent/abc123') + self.assertEqual(results[0]['magnetlink'], 'magnet:?xt=urn:btih:abc123') From 2aa95c16e31ff0d688a06492863b9f2a0c209bfb Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Fri, 29 Nov 2019 14:47:43 +0100 Subject: [PATCH 07/10] [fix] soundcloud: URLs of JS sources has been moved The client_id is found under (new) URL: https://a-v2.sndcdn.com/assets/49-a0c01933-3.js Signed-off-by: Markus Heiser --- searx/engines/soundcloud.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py index 870998545..284689bf6 100644 --- a/searx/engines/soundcloud.py +++ b/searx/engines/soundcloud.py @@ -51,7 +51,9 @@ def get_client_id(): if response.ok: tree = html.fromstring(response.content) - script_tags = tree.xpath("//script[contains(@src, '/assets/app')]") + # script_tags has been moved from /assets/app/ to /assets/ path. I + # found client_id in https://a-v2.sndcdn.com/assets/49-a0c01933-3.js + script_tags = tree.xpath("//script[contains(@src, '/assets/')]") app_js_urls = [script_tag.get('src') for script_tag in script_tags if script_tag is not None] # extracts valid app_js urls from soundcloud.com content From 4998e9ec856479d0c619e54f7100c295c7c5851c Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Fri, 29 Nov 2019 18:56:29 +0100 Subject: [PATCH 08/10] [fix] duckduckgo_definitions - where 'AnswerType' is 'calc' Do not try to get text when 'AnswerType' is 'calc'. Signed-off-by: Markus Heiser --- searx/engines/duckduckgo_definitions.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py index 957a13ea6..2899b50fb 100644 --- a/searx/engines/duckduckgo_definitions.py +++ b/searx/engines/duckduckgo_definitions.py @@ -1,11 +1,25 @@ +""" +DuckDuckGo (definitions) + +- `Instant Answer API`_ +- `DuckDuckGo query`_ + +.. _Instant Answer API: https://duckduckgo.com/api +.. _DuckDuckGo query: https://api.duckduckgo.com/?q=DuckDuckGo&format=json&pretty=1 + +""" + import json from lxml import html from re import compile +import logging from searx.engines.xpath import extract_text from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases from searx.url_utils import urlencode from searx.utils import html_to_text, match_language +logger = logging.getLogger('searx.engines.'+ __name__) + url = 'https://api.duckduckgo.com/'\ + '?{query}&format=json&pretty=0&no_redirect=1&d=1' @@ -25,7 +39,9 @@ def result_to_text(url, text, htmlResult): def request(query, params): params['url'] = url.format(query=urlencode({'q': query})) language = match_language(params['language'], supported_languages, language_aliases) - params['headers']['Accept-Language'] = language.split('-')[0] + language = language.split('-')[0] + params['headers']['Accept-Language'] = language + logger.debug("query %s: // headers: %s", params['url'], params['headers']) return params @@ -43,8 +59,9 @@ def response(resp): # add answer if there is one answer = search_res.get('Answer', '') - if answer != '': - results.append({'answer': html_to_text(answer)}) + if answer: + if search_res.get('AnswerType', '') not in ['calc']: + results.append({'answer': html_to_text(answer)}) # add infobox if 'Definition' in search_res: From b6d9f5aa71de43ff9a46f0d4d94baad429e2275d Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Fri, 29 Nov 2019 19:55:16 +0100 Subject: [PATCH 09/10] [fix] duckduckgo_definition issues reported by 'manage.sh test' Fix this error while travis build:: /home/travis/build/asciimoo/searx/searx/engines/duckduckgo_definitions.py:21:44: E225 missing whitespace around operator Signed-off-by: Markus Heiser --- searx/engines/duckduckgo_definitions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py index 2899b50fb..49def2a2a 100644 --- a/searx/engines/duckduckgo_definitions.py +++ b/searx/engines/duckduckgo_definitions.py @@ -18,7 +18,7 @@ from searx.engines.duckduckgo import _fetch_supported_languages, supported_langu from searx.url_utils import urlencode from searx.utils import html_to_text, match_language -logger = logging.getLogger('searx.engines.'+ __name__) +logger = logging.getLogger('searx.engines.' + __name__) url = 'https://api.duckduckgo.com/'\ + '?{query}&format=json&pretty=0&no_redirect=1&d=1' From 30ad0c666df6644c4d05949f7bc5d7b1485f3fd6 Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Mon, 2 Dec 2019 14:26:11 +0100 Subject: [PATCH 10/10] duckduckgo_definitions: remove the debug message Signed-off-by: Markus Heiser --- searx/engines/duckduckgo_definitions.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py index 49def2a2a..79d10c303 100644 --- a/searx/engines/duckduckgo_definitions.py +++ b/searx/engines/duckduckgo_definitions.py @@ -12,14 +12,11 @@ DuckDuckGo (definitions) import json from lxml import html from re import compile -import logging from searx.engines.xpath import extract_text from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases from searx.url_utils import urlencode from searx.utils import html_to_text, match_language -logger = logging.getLogger('searx.engines.' + __name__) - url = 'https://api.duckduckgo.com/'\ + '?{query}&format=json&pretty=0&no_redirect=1&d=1' @@ -41,7 +38,6 @@ def request(query, params): language = match_language(params['language'], supported_languages, language_aliases) language = language.split('-')[0] params['headers']['Accept-Language'] = language - logger.debug("query %s: // headers: %s", params['url'], params['headers']) return params