From b4c80d9ebba1a527f0e212e3864ff3ec5aeb4d0f Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Sat, 4 Feb 2023 17:58:53 +0100 Subject: [PATCH] [mod] make format.python : prepare python code for black 23.1.0 Signed-off-by: Markus Heiser --- searx/compat.py | 1 - searx/engines/apkmirror.py | 1 - searx/engines/artic.py | 3 --- searx/engines/bandcamp.py | 1 - searx/engines/bing.py | 5 ----- searx/engines/bing_news.py | 2 -- searx/engines/core.py | 1 - searx/engines/dailymotion.py | 2 -- searx/engines/deezer.py | 1 + searx/engines/demo_online.py | 1 - searx/engines/deviantart.py | 3 --- searx/engines/docker_hub.py | 1 - searx/engines/doku.py | 1 - searx/engines/duckduckgo.py | 5 +---- searx/engines/emojipedia.py | 1 - searx/engines/freesound.py | 1 + searx/engines/gigablast.py | 1 - searx/engines/google.py | 3 --- searx/engines/google_images.py | 1 - searx/engines/google_news.py | 1 - searx/engines/google_scholar.py | 1 - searx/engines/google_videos.py | 1 - searx/engines/imdb.py | 3 --- searx/engines/json_engine.py | 1 - searx/engines/loc.py | 1 - searx/engines/mediathekviewweb.py | 3 --- searx/engines/mixcloud.py | 1 - searx/engines/openverse.py | 1 - searx/engines/pdbe.py | 6 ++---- searx/engines/petal_images.py | 1 - searx/engines/photon.py | 1 - searx/engines/qwant.py | 4 ---- searx/engines/reddit.py | 2 -- searx/engines/scanr_structures.py | 1 - searx/engines/searx_engine.py | 1 - searx/engines/sjp.py | 2 +- searx/engines/soundcloud.py | 1 - searx/engines/spotify.py | 1 + searx/engines/sqlite.py | 1 - searx/engines/stackexchange.py | 3 --- searx/engines/startpage.py | 2 -- searx/engines/tineye.py | 3 --- searx/engines/torznab.py | 1 - searx/engines/wikidata.py | 3 --- searx/engines/wolframalpha_api.py | 1 - searx/engines/xpath.py | 1 - searx/engines/yacy.py | 1 - searx/engines/yahoo.py | 1 - searx/engines/yahoo_news.py | 1 - searx/flaskfix.py | 2 -- searx/locales.py | 1 - searx/metrics/__init__.py | 3 --- searx/metrics/error_recorder.py | 1 - searx/metrics/models.py | 3 --- searx/network/network.py | 2 -- searx/plugins/hostname_replace.py | 4 +--- searx/plugins/tor_check.py | 2 -- searx/query.py | 1 - searx/search/checker/impl.py | 4 ---- searx/webapp.py | 4 ---- searx/webutils.py | 1 - searxng_extra/update/update_languages.py | 2 -- searxng_extra/update/update_osm_keys_tags.py | 1 - searxng_extra/update/update_pygments.py | 1 - tests/unit/network/test_network.py | 2 -- tests/unit/test_external_bangs.py | 1 - tests/unit/test_query.py | 1 - tests/unit/test_utils.py | 1 - 68 files changed, 8 insertions(+), 115 deletions(-) diff --git a/searx/compat.py b/searx/compat.py index 15e27d45d..5c7e6d7cb 100644 --- a/searx/compat.py +++ b/searx/compat.py @@ -14,7 +14,6 @@ try: from functools import cached_property # type: ignore except ImportError: - # cache_property has been added in py3.8 [1] # # To support cache_property in py3.7 the implementation from 3.8 has been diff --git a/searx/engines/apkmirror.py b/searx/engines/apkmirror.py index ac7cd7431..e077c8c10 100644 --- a/searx/engines/apkmirror.py +++ b/searx/engines/apkmirror.py @@ -49,7 +49,6 @@ def response(resp): # parse results for result in eval_xpath_list(dom, "//div[@id='content']//div[@class='listWidget']/div/div[@class='appRow']"): - link = eval_xpath_getindex(result, './/h5/a', 0) url = base_url + link.attrib.get('href') + '#downloads' diff --git a/searx/engines/artic.py b/searx/engines/artic.py index c0ae0a5e7..efec3307c 100644 --- a/searx/engines/artic.py +++ b/searx/engines/artic.py @@ -29,7 +29,6 @@ image_api = 'https://www.artic.edu/iiif/2/' def request(query, params): - args = urlencode( { 'q': query, @@ -45,12 +44,10 @@ def request(query, params): def response(resp): - results = [] json_data = loads(resp.text) for result in json_data['data']: - if not result['image_id']: continue diff --git a/searx/engines/bandcamp.py b/searx/engines/bandcamp.py index 8feff1fe0..2895c682d 100644 --- a/searx/engines/bandcamp.py +++ b/searx/engines/bandcamp.py @@ -63,7 +63,6 @@ def response(resp): dom = html.fromstring(resp.text) for result in eval_xpath_list(dom, '//li[contains(@class, "searchresult")]'): - link = eval_xpath_getindex(result, './/div[@class="itemurl"]/a', 0, default=None) if link is None: continue diff --git a/searx/engines/bing.py b/searx/engines/bing.py index 783c0056a..b9541677e 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -45,7 +45,6 @@ def _get_offset_from_pageno(pageno): def request(query, params): - offset = _get_offset_from_pageno(params.get('pageno', 1)) # logger.debug("params['pageno'] --> %s", params.get('pageno')) @@ -86,7 +85,6 @@ def response(resp): url_to_resolve_index = [] i = 0 for result in eval_xpath_list(dom, '//ol[@id="b_results"]/li[contains(@class, "b_algo")]'): - link = eval_xpath_getindex(result, './/h2/a', 0, None) if link is None: continue @@ -138,7 +136,6 @@ def response(resp): try: result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()')) if "-" in result_len_container: - # Remove the part "from-to" for paginated request ... result_len_container = result_len_container[result_len_container.find("-") * 2 + 2 :] @@ -159,14 +156,12 @@ def response(resp): # get supported languages from their site def _fetch_supported_languages(resp): - lang_tags = set() dom = html.fromstring(resp.text) lang_links = eval_xpath(dom, '//div[@id="language-section"]//li') for _li in lang_links: - href = eval_xpath(_li, './/@href')[0] (_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href) query = parse_qs(query, keep_blank_values=True) diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py index 7eea17bb4..4f5e2ef34 100644 --- a/searx/engines/bing_news.py +++ b/searx/engines/bing_news.py @@ -90,7 +90,6 @@ def _get_url(query, language, offset, time_range): def request(query, params): - if params['time_range'] and params['time_range'] not in time_range_dict: return params @@ -105,7 +104,6 @@ def request(query, params): def response(resp): - results = [] rss = etree.fromstring(resp.content) namespaces = rss.nsmap diff --git a/searx/engines/core.py b/searx/engines/core.py index 2fa66e226..a56f258be 100644 --- a/searx/engines/core.py +++ b/searx/engines/core.py @@ -29,7 +29,6 @@ search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}' def request(query, params): - if api_key == 'unset': raise SearxEngineAPIException('missing CORE API key') diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py index 7dd84dd27..f1721a9ba 100644 --- a/searx/engines/dailymotion.py +++ b/searx/engines/dailymotion.py @@ -77,7 +77,6 @@ def init(_engine_settings): def request(query, params): - if not query: return False @@ -127,7 +126,6 @@ def response(resp): # parse results for res in search_res.get('list', []): - title = res['title'] url = res['url'] diff --git a/searx/engines/deezer.py b/searx/engines/deezer.py index 63c71e3cc..3dd787c48 100644 --- a/searx/engines/deezer.py +++ b/searx/engines/deezer.py @@ -25,6 +25,7 @@ url = 'https://api.deezer.com/' search_url = url + 'search?{query}&index={offset}' iframe_src = "https://www.deezer.com/plugins/player?type=tracks&id={audioid}" + # do search-request def request(query, params): offset = (params['pageno'] - 1) * 25 diff --git a/searx/engines/demo_online.py b/searx/engines/demo_online.py index 08add5371..858839865 100644 --- a/searx/engines/demo_online.py +++ b/searx/engines/demo_online.py @@ -81,7 +81,6 @@ def response(resp): json_data = loads(resp.text) for result in json_data['data']: - if not result['image_id']: continue diff --git a/searx/engines/deviantart.py b/searx/engines/deviantart.py index e44ac28e5..8194dd09f 100644 --- a/searx/engines/deviantart.py +++ b/searx/engines/deviantart.py @@ -34,7 +34,6 @@ base_url = 'https://www.deviantart.com' def request(query, params): - # https://www.deviantart.com/search/deviations?page=5&q=foo query = { @@ -50,14 +49,12 @@ def request(query, params): def response(resp): - results = [] dom = html.fromstring(resp.text) for row in dom.xpath('//div[contains(@data-hook, "content_row")]'): for result in row.xpath('./div'): - a_tag = result.xpath('.//a[@data-hook="deviation_link"]')[0] noscript_tag = a_tag.xpath('.//noscript') diff --git a/searx/engines/docker_hub.py b/searx/engines/docker_hub.py index 1e492b196..9a57c4236 100644 --- a/searx/engines/docker_hub.py +++ b/searx/engines/docker_hub.py @@ -25,7 +25,6 @@ search_url = base_url + "api/content/v1/products/search?{query}&type=image&page_ def request(query, params): - params['url'] = search_url.format(query=urlencode(dict(q=query, page=params["pageno"]))) params["headers"]["Search-Version"] = "v3" diff --git a/searx/engines/doku.py b/searx/engines/doku.py index 08f56bbe7..29c113b46 100644 --- a/searx/engines/doku.py +++ b/searx/engines/doku.py @@ -37,7 +37,6 @@ search_url = ( # do search-request def request(query, params): - params['url'] = base_url + search_url.format(query=urlencode({'id': query})) return params diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index 2a7956ca8..f294d15a1 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -49,6 +49,7 @@ time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'} url = 'https://lite.duckduckgo.com/lite/' url_ping = 'https://duckduckgo.com/t/sl_l' + # match query's language to a region code that duckduckgo will accept def get_region_code(lang, lang_list=None): if lang == 'all': @@ -62,7 +63,6 @@ def get_region_code(lang, lang_list=None): def request(query, params): - params['url'] = url params['method'] = 'POST' @@ -118,7 +118,6 @@ def request(query, params): # get response from search-request def response(resp): - headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie']) get(url_ping, headers=headers_ping) @@ -143,7 +142,6 @@ def response(resp): offset = 0 while len_tr_rows >= offset + 4: - # assemble table rows we need to scrap tr_title = tr_rows[offset] tr_content = tr_rows[offset + 1] @@ -174,7 +172,6 @@ def response(resp): # get supported languages from their site def _fetch_supported_languages(resp): - # response is a js file with regions as an embedded object response_page = resp.text response_page = response_page[response_page.find('regions:{') + 8 :] diff --git a/searx/engines/emojipedia.py b/searx/engines/emojipedia.py index 020bf689b..b6c04f48a 100644 --- a/searx/engines/emojipedia.py +++ b/searx/engines/emojipedia.py @@ -48,7 +48,6 @@ def response(resp): dom = html.fromstring(resp.text) for result in eval_xpath_list(dom, "//ol[@class='search-results']/li"): - extracted_desc = extract_text(eval_xpath_getindex(result, './/p', 0)) if 'No results found.' in extracted_desc: diff --git a/searx/engines/freesound.py b/searx/engines/freesound.py index ea6666621..b56b5a414 100644 --- a/searx/engines/freesound.py +++ b/searx/engines/freesound.py @@ -29,6 +29,7 @@ search_url = ( url + "search/text/?query={query}&page={page}&fields=name,url,download,created,description,type&token={api_key}" ) + # search request def request(query, params): params["url"] = search_url.format( diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py index 1c40ff331..c53dd26a4 100644 --- a/searx/engines/gigablast.py +++ b/searx/engines/gigablast.py @@ -41,7 +41,6 @@ extra_param_expiration_delay = 3000 def fetch_extra_param(query_args, headers): - # example: # # var uxrl='/search?c=main&qlangcountry=en-us&q=south&s=10&rand=1590740241635&n'; diff --git a/searx/engines/google.py b/searx/engines/google.py index bdb351432..4626be9e0 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -220,7 +220,6 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language): # https://developers.google.com/custom-search/docs/xml_results_appendices#languageCollections if _any_language and supported_any_language: - # interpretation is left up to Google (based on whoogle) # # - add parameter ``source=lnt`` @@ -230,7 +229,6 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language): ret_val['params']['source'] = 'lnt' else: - # restricts search results to documents written in a particular # language. ret_val['params']['lr'] = "lang_" + lang_list.get(lang_country, language) @@ -323,7 +321,6 @@ def response(resp): # parse results for result in eval_xpath_list(dom, results_xpath): - # google *sections* if extract_text(eval_xpath(result, g_section_with_header)): logger.debug("ignoring ") diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py index 528f8d21d..46798f6c2 100644 --- a/searx/engines/google_images.py +++ b/searx/engines/google_images.py @@ -93,7 +93,6 @@ def response(resp): json_data = loads(resp.text[json_start:]) for item in json_data["ischj"]["metadata"]: - result_item = { 'url': item["result"]["referrer_url"], 'title': item["result"]["page_title"], diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py index 1ada2d64d..2831fda09 100644 --- a/searx/engines/google_news.py +++ b/searx/engines/google_news.py @@ -119,7 +119,6 @@ def response(resp): dom = html.fromstring(resp.text) for result in eval_xpath_list(dom, '//div[@class="xrnccd"]'): - # The first tag in the
contains the link to the # article The href attribute of the is a google internal link, # we can't use. The real link is hidden in the jslog attribute: diff --git a/searx/engines/google_scholar.py b/searx/engines/google_scholar.py index c07cd4cea..f62255e10 100644 --- a/searx/engines/google_scholar.py +++ b/searx/engines/google_scholar.py @@ -151,7 +151,6 @@ def response(resp): # pylint: disable=too-many-locals # parse results for result in eval_xpath_list(dom, '//div[@data-cid]'): - title = extract_text(eval_xpath(result, './/h3[1]//a')) if not title: diff --git a/searx/engines/google_videos.py b/searx/engines/google_videos.py index fc574bd48..6d158b116 100644 --- a/searx/engines/google_videos.py +++ b/searx/engines/google_videos.py @@ -147,7 +147,6 @@ def response(resp): # parse results for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'): - # ignore google *sections* if extract_text(eval_xpath(result, g_section_with_header)): logger.debug("ignoring ") diff --git a/searx/engines/imdb.py b/searx/engines/imdb.py index 0897b8dca..f2f4c1457 100644 --- a/searx/engines/imdb.py +++ b/searx/engines/imdb.py @@ -39,7 +39,6 @@ search_categories = {"nm": "name", "tt": "title", "kw": "keyword", "co": "compan def request(query, params): - query = query.replace(" ", "_").lower() params['url'] = suggestion_url.format(letter=query[0], query=query) @@ -47,12 +46,10 @@ def request(query, params): def response(resp): - suggestions = json.loads(resp.text) results = [] for entry in suggestions.get('d', []): - # https://developer.imdb.com/documentation/key-concepts#imdb-ids entry_id = entry['id'] categ = search_categories.get(entry_id[:2]) diff --git a/searx/engines/json_engine.py b/searx/engines/json_engine.py index 2dd3bc55e..9d3db4b6b 100644 --- a/searx/engines/json_engine.py +++ b/searx/engines/json_engine.py @@ -64,7 +64,6 @@ def do_query(data, q): qkey = q[0] for key, value in iterate(data): - if len(q) == 1: if key == qkey: ret.append(value) diff --git a/searx/engines/loc.py b/searx/engines/loc.py index 0b2f3a689..830b7e0aa 100644 --- a/searx/engines/loc.py +++ b/searx/engines/loc.py @@ -33,7 +33,6 @@ IMG_SRC_FIXES = { def request(query, params): - search_path = search_string.format(query=urlencode({'q': query}), page=params['pageno']) params['url'] = base_url + search_path diff --git a/searx/engines/mediathekviewweb.py b/searx/engines/mediathekviewweb.py index 5570ebe24..16abaa503 100644 --- a/searx/engines/mediathekviewweb.py +++ b/searx/engines/mediathekviewweb.py @@ -24,7 +24,6 @@ safesearch = False def request(query, params): - params['url'] = 'https://mediathekviewweb.de/api/query' params['method'] = 'POST' params['headers']['Content-type'] = 'text/plain' @@ -50,7 +49,6 @@ def request(query, params): def response(resp): - resp = loads(resp.text) mwv_result = resp['result'] @@ -59,7 +57,6 @@ def response(resp): results = [] for item in mwv_result_list: - item['hms'] = str(datetime.timedelta(seconds=item['duration'])) results.append( diff --git a/searx/engines/mixcloud.py b/searx/engines/mixcloud.py index 3f255697e..e10ce0b63 100644 --- a/searx/engines/mixcloud.py +++ b/searx/engines/mixcloud.py @@ -38,7 +38,6 @@ def response(resp): search_res = resp.json() for result in search_res.get('data', []): - r_url = result['url'] publishedDate = parser.parse(result['created_time']) res = { diff --git a/searx/engines/openverse.py b/searx/engines/openverse.py index 9f4636e41..d47f196b7 100644 --- a/searx/engines/openverse.py +++ b/searx/engines/openverse.py @@ -28,7 +28,6 @@ search_string = '?page={page}&page_size={nb_per_page}&format=json&{query}' def request(query, params): - search_path = search_string.format(query=urlencode({'q': query}), nb_per_page=nb_per_page, page=params['pageno']) params['url'] = base_url + search_path diff --git a/searx/engines/pdbe.py b/searx/engines/pdbe.py index 34c8d3227..2a8c2d9b9 100644 --- a/searx/engines/pdbe.py +++ b/searx/engines/pdbe.py @@ -31,7 +31,6 @@ pdbe_preview_url = 'https://www.ebi.ac.uk/pdbe/static/entry/{pdb_id}_deposited_c def request(query, params): - params['url'] = pdbe_solr_url params['method'] = 'POST' params['data'] = {'q': query, 'wt': "json"} # request response in parsable format @@ -66,21 +65,20 @@ def construct_body(result): year=result['release_year'], ) img_src = pdbe_preview_url.format(pdb_id=result['pdb_id']) - except (KeyError): + except KeyError: content = None img_src = None # construct url for preview image try: img_src = pdbe_preview_url.format(pdb_id=result['pdb_id']) - except (KeyError): + except KeyError: img_src = None return [title, content, img_src] def response(resp): - results = [] json = loads(resp.text)['response']['docs'] diff --git a/searx/engines/petal_images.py b/searx/engines/petal_images.py index 88853c1bd..2a6826f8e 100644 --- a/searx/engines/petal_images.py +++ b/searx/engines/petal_images.py @@ -32,7 +32,6 @@ search_string = 'search?{query}&channel=image&ps=50&pn={page}®ion={lang}&ss_m def request(query, params): - search_path = search_string.format( query=urlencode({'query': query}), page=params['pageno'], diff --git a/searx/engines/photon.py b/searx/engines/photon.py index 2ea393679..25bce90ab 100644 --- a/searx/engines/photon.py +++ b/searx/engines/photon.py @@ -53,7 +53,6 @@ def response(resp): # parse results for r in json.get('features', {}): - properties = r.get('properties') if not properties: diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py index 6de2176d0..58e0fed95 100644 --- a/searx/engines/qwant.py +++ b/searx/engines/qwant.py @@ -149,7 +149,6 @@ def response(resp): return [] for row in mainline: - mainline_type = row.get('type', 'web') if mainline_type != qwant_categ: continue @@ -160,7 +159,6 @@ def response(resp): mainline_items = row.get('items', []) for item in mainline_items: - title = item.get('title', None) res_url = item.get('url', None) @@ -175,7 +173,6 @@ def response(resp): ) elif mainline_type == 'news': - pub_date = item['date'] if pub_date is not None: pub_date = datetime.fromtimestamp(pub_date) @@ -244,7 +241,6 @@ def response(resp): def _fetch_supported_languages(resp): - text = resp.text text = text[text.find('INITIAL_PROPS') :] text = text[text.find('{') : text.find('')] diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py index 36d92339d..73ed75249 100644 --- a/searx/engines/reddit.py +++ b/searx/engines/reddit.py @@ -27,7 +27,6 @@ search_url = base_url + 'search.json?{query}' def request(query, params): - query = urlencode({'q': query, 'limit': page_size}) params['url'] = search_url.format(query=query) @@ -35,7 +34,6 @@ def request(query, params): def response(resp): - img_results = [] text_results = [] diff --git a/searx/engines/scanr_structures.py b/searx/engines/scanr_structures.py index ad27079dd..7bdc30c43 100644 --- a/searx/engines/scanr_structures.py +++ b/searx/engines/scanr_structures.py @@ -28,7 +28,6 @@ search_url = url + 'api/structures/search' # do search-request def request(query, params): - params['url'] = search_url params['method'] = 'POST' params['headers']['Content-type'] = "application/json" diff --git a/searx/engines/searx_engine.py b/searx/engines/searx_engine.py index 84a8e6449..ddf55b72d 100644 --- a/searx/engines/searx_engine.py +++ b/searx/engines/searx_engine.py @@ -45,7 +45,6 @@ def request(query, params): # get response from search-request def response(resp): - response_json = loads(resp.text) results = response_json['results'] diff --git a/searx/engines/sjp.py b/searx/engines/sjp.py index 6daa46e78..711fed736 100644 --- a/searx/engines/sjp.py +++ b/searx/engines/sjp.py @@ -80,7 +80,7 @@ def response(resp): for src in definitions: infobox += f"
{src[0]}" infobox += "
    " - for (def_text, sub_def) in src[1]: + for def_text, sub_def in src[1]: infobox += f"
  • {def_text}
  • " if sub_def: infobox += "
      " diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py index 78947c69c..edb934aeb 100644 --- a/searx/engines/soundcloud.py +++ b/searx/engines/soundcloud.py @@ -85,7 +85,6 @@ def response(resp): # parse results for result in search_res.get('collection', []): - if result['kind'] in ('track', 'playlist'): uri = quote_plus(result['uri']) res = { diff --git a/searx/engines/spotify.py b/searx/engines/spotify.py index 87edb7f1b..491a7058e 100644 --- a/searx/engines/spotify.py +++ b/searx/engines/spotify.py @@ -29,6 +29,7 @@ api_client_secret = None url = 'https://api.spotify.com/' search_url = url + 'v1/search?{query}&type=track&offset={offset}' + # do search-request def request(query, params): offset = (params['pageno'] - 1) * 20 diff --git a/searx/engines/sqlite.py b/searx/engines/sqlite.py index 6de12f5fe..0aa94a4c9 100644 --- a/searx/engines/sqlite.py +++ b/searx/engines/sqlite.py @@ -54,7 +54,6 @@ def search(query, params): query_to_run = query_str + ' LIMIT :limit OFFSET :offset' with sqlite_cursor() as cur: - cur.execute(query_to_run, query_params) col_names = [cn[0] for cn in cur.description] diff --git a/searx/engines/stackexchange.py b/searx/engines/stackexchange.py index 99615b1a7..b93324f7d 100644 --- a/searx/engines/stackexchange.py +++ b/searx/engines/stackexchange.py @@ -31,7 +31,6 @@ search_api = 'https://api.stackexchange.com/2.3/search/advanced?' def request(query, params): - args = urlencode( { 'q': query, @@ -48,12 +47,10 @@ def request(query, params): def response(resp): - results = [] json_data = loads(resp.text) for result in json_data['items']: - content = "[%s]" % ", ".join(result['tags']) content += " %s" % result['owner']['display_name'] if result['is_answered']: diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py index f857f7b6d..d46595d40 100644 --- a/searx/engines/startpage.py +++ b/searx/engines/startpage.py @@ -60,7 +60,6 @@ sc_code = '' def raise_captcha(resp): - if str(resp.url).startswith('https://www.startpage.com/sp/captcha'): raise SearxEngineCaptchaException() @@ -104,7 +103,6 @@ def get_sc_code(headers): # do search-request def request(query, params): - # pylint: disable=line-too-long # The format string from Startpage's FFox add-on [1]:: # diff --git a/searx/engines/tineye.py b/searx/engines/tineye.py index 6c5ff134c..353a85747 100644 --- a/searx/engines/tineye.py +++ b/searx/engines/tineye.py @@ -114,7 +114,6 @@ def parse_tineye_match(match_json): backlinks = [] if "backlinks" in match_json: - for backlink_json in match_json["backlinks"]: if not isinstance(backlink_json, dict): continue @@ -164,7 +163,6 @@ def response(resp): if resp.is_error: if resp.status_code in (400, 422): - message = 'HTTP status: %s' % resp.status_code error = json_data.get('error') s_key = json_data.get('suggestions', {}).get('key', '') @@ -195,7 +193,6 @@ def response(resp): # append results from matches for match_json in json_data['matches']: - tineye_match = parse_tineye_match(match_json) if not tineye_match['backlinks']: continue diff --git a/searx/engines/torznab.py b/searx/engines/torznab.py index a48017c13..7ddb79178 100644 --- a/searx/engines/torznab.py +++ b/searx/engines/torznab.py @@ -42,7 +42,6 @@ def init(engine_settings=None): # pylint: disable=unused-argument def request(query, params): - search_url = base_url + '?t=search&q={search_query}' if len(api_key) > 0: search_url += '&apikey={api_key}' diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py index e0ad2e6c9..b270a8afc 100644 --- a/searx/engines/wikidata.py +++ b/searx/engines/wikidata.py @@ -522,7 +522,6 @@ class WDAmountAttribute(WDAttribute): class WDArticle(WDAttribute): - __slots__ = 'language', 'kwargs' def __init__(self, language, kwargs=None): @@ -568,7 +567,6 @@ class WDLabelAttribute(WDAttribute): class WDURLAttribute(WDAttribute): - HTTP_WIKIMEDIA_IMAGE = 'http://commons.wikimedia.org/wiki/Special:FilePath/' __slots__ = 'url_id', 'kwargs' @@ -623,7 +621,6 @@ class WDGeoAttribute(WDAttribute): class WDImageAttribute(WDURLAttribute): - __slots__ = ('priority',) def __init__(self, name, url_id=None, priority=100): diff --git a/searx/engines/wolframalpha_api.py b/searx/engines/wolframalpha_api.py index 6a2423b51..f54ac6962 100644 --- a/searx/engines/wolframalpha_api.py +++ b/searx/engines/wolframalpha_api.py @@ -100,7 +100,6 @@ def response(resp): image = subpod.xpath(image_xpath) if content and pod_id not in image_pods: - if pod_is_result or not result_content: if pod_id != "Input": result_content = "%s: %s" % (pod_title, content) diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py index 2dc22028f..3a18ec302 100644 --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -204,7 +204,6 @@ def response(resp): # pylint: disable=too-many-branches if results_xpath: for result in eval_xpath_list(dom, results_xpath): - url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url) title = extract_text(eval_xpath_list(result, title_xpath, min_len=1)) content = extract_text(eval_xpath_list(result, content_xpath)) diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py index 12e7305db..de2b85c8f 100644 --- a/searx/engines/yacy.py +++ b/searx/engines/yacy.py @@ -79,7 +79,6 @@ def response(resp): for result in search_results[0].get('items', []): # parse image results if resp.search_params.get('category') == 'images': - result_url = '' if 'url' in result: result_url = result['url'] diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py index c13ce6d78..efdcf8952 100644 --- a/searx/engines/yahoo.py +++ b/searx/engines/yahoo.py @@ -69,7 +69,6 @@ lang2domain = { def _get_language(params): - lang = language_aliases.get(params['language']) if lang is None: lang = match_language(params['language'], supported_languages, language_aliases) diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py index 00f208b17..8bf406c97 100644 --- a/searx/engines/yahoo_news.py +++ b/searx/engines/yahoo_news.py @@ -71,7 +71,6 @@ def response(resp): # parse results for result in eval_xpath_list(dom, '//ol[contains(@class,"searchCenterMiddle")]//li'): - url = eval_xpath_getindex(result, './/h4/a/@href', 0, None) if url is None: continue diff --git a/searx/flaskfix.py b/searx/flaskfix.py index 326c4b981..9d58a5491 100644 --- a/searx/flaskfix.py +++ b/searx/flaskfix.py @@ -33,14 +33,12 @@ class ReverseProxyPathFix: # pylint: disable=too-few-public-methods def __init__(self, wsgi_app): - self.wsgi_app = wsgi_app self.script_name = None self.scheme = None self.server = None if settings['server']['base_url']: - # If base_url is specified, then these values from are given # preference over any Flask's generics. diff --git a/searx/locales.py b/searx/locales.py index 9e06bf39d..3b4edb29f 100644 --- a/searx/locales.py +++ b/searx/locales.py @@ -246,7 +246,6 @@ def get_engine_locale(searxng_locale, engine_locales, default=None): # engine does support the searxng_lang in this other territory. if locale.language: - searxng_lang = locale.language if locale.script: searxng_lang += '_' + locale.script diff --git a/searx/metrics/__init__.py b/searx/metrics/__init__.py index 18d2170df..7cefe7e37 100644 --- a/searx/metrics/__init__.py +++ b/searx/metrics/__init__.py @@ -179,7 +179,6 @@ def get_engines_stats(engine_name_list): max_time_total = max_result_count = None for engine_name in engine_name_list: - sent_count = counter('engine', engine_name, 'search', 'count', 'sent') if sent_count == 0: continue @@ -218,7 +217,6 @@ def get_engines_stats(engine_name_list): time_http_p80 = time_http_p95 = 0 if time_http is not None: - time_http_p80 = histogram('engine', engine_name, 'time', 'http').percentage(80) time_http_p95 = histogram('engine', engine_name, 'time', 'http').percentage(95) @@ -227,7 +225,6 @@ def get_engines_stats(engine_name_list): stats['http_p95'] = round(time_http_p95, 1) if time_total is not None: - time_total_p80 = histogram('engine', engine_name, 'time', 'total').percentage(80) time_total_p95 = histogram('engine', engine_name, 'time', 'total').percentage(95) diff --git a/searx/metrics/error_recorder.py b/searx/metrics/error_recorder.py index 1d0d6e7a3..5e6859268 100644 --- a/searx/metrics/error_recorder.py +++ b/searx/metrics/error_recorder.py @@ -17,7 +17,6 @@ errors_per_engines = {} class ErrorContext: - __slots__ = ( 'filename', 'function', diff --git a/searx/metrics/models.py b/searx/metrics/models.py index 900a7fa93..8ab17d605 100644 --- a/searx/metrics/models.py +++ b/searx/metrics/models.py @@ -12,7 +12,6 @@ logger = logger.getChild('searx.metrics') class Histogram: - _slots__ = '_lock', '_size', '_sum', '_quartiles', '_count', '_width' def __init__(self, width=10, size=200): @@ -101,7 +100,6 @@ class Histogram: class HistogramStorage: - __slots__ = 'measures', 'histogram_class' def __init__(self, histogram_class=Histogram): @@ -127,7 +125,6 @@ class HistogramStorage: class CounterStorage: - __slots__ = 'counters', 'lock' def __init__(self): diff --git a/searx/network/network.py b/searx/network/network.py index 6e1825dd9..a7f9b7413 100644 --- a/searx/network/network.py +++ b/searx/network/network.py @@ -37,7 +37,6 @@ ADDRESS_MAPPING = {'ipv4': '0.0.0.0', 'ipv6': '::'} class Network: - __slots__ = ( 'enable_http', 'verify', @@ -76,7 +75,6 @@ class Network: max_redirects=30, logger_name=None, ): - self.enable_http = enable_http self.verify = verify self.enable_http2 = enable_http2 diff --git a/searx/plugins/hostname_replace.py b/searx/plugins/hostname_replace.py index 039aadb91..ff0f63596 100644 --- a/searx/plugins/hostname_replace.py +++ b/searx/plugins/hostname_replace.py @@ -21,9 +21,7 @@ _url_fields = ['iframe_src', 'audio_src'] def on_result(request, search, result): - - for (pattern, replacement) in replacements.items(): - + for pattern, replacement in replacements.items(): if parsed in result: if pattern.search(result[parsed].netloc): # to keep or remove this result from the result list depends diff --git a/searx/plugins/tor_check.py b/searx/plugins/tor_check.py index 7d50bbcb5..ae85ae2da 100644 --- a/searx/plugins/tor_check.py +++ b/searx/plugins/tor_check.py @@ -45,12 +45,10 @@ reg = re.compile(r"(?<=ExitAddress )\S+") def post_search(request, search): - if search.search_query.pageno > 1: return True if search.search_query.query.lower() == "tor-check": - # Request the list of tor exit nodes. try: resp = get("https://check.torproject.org/exit-addresses") diff --git a/searx/query.py b/searx/query.py index b8e1c1275..6f5ec818c 100644 --- a/searx/query.py +++ b/searx/query.py @@ -12,7 +12,6 @@ from searx.webutils import VALID_LANGUAGE_CODE class QueryPartParser(ABC): - __slots__ = "raw_text_query", "enable_autocomplete" @staticmethod diff --git a/searx/search/checker/impl.py b/searx/search/checker/impl.py index 37f145e1e..1136bc454 100644 --- a/searx/search/checker/impl.py +++ b/searx/search/checker/impl.py @@ -150,7 +150,6 @@ def _search_query_diff( class TestResults: - __slots__ = 'errors', 'logs', 'languages' def __init__(self): @@ -182,7 +181,6 @@ class TestResults: class ResultContainerTests: - __slots__ = 'test_name', 'search_query', 'result_container', 'languages', 'stop_test', 'test_results' def __init__( @@ -320,7 +318,6 @@ class ResultContainerTests: class CheckerTests: - __slots__ = 'test_results', 'test_name', 'result_container_tests_list' def __init__( @@ -352,7 +349,6 @@ class CheckerTests: class Checker: - __slots__ = 'processor', 'tests', 'test_results' def __init__(self, processor: EngineProcessor): diff --git a/searx/webapp.py b/searx/webapp.py index d9ca3941c..a328ea6a8 100755 --- a/searx/webapp.py +++ b/searx/webapp.py @@ -268,7 +268,6 @@ def code_highlighter(codelines, language=None): # new codeblock is detected if last_line is not None and last_line + 1 != line: - # highlight last codepart formatter = HtmlFormatter(linenos='inline', linenostart=line_code_start, cssclass="code-highlight") html_code = html_code + highlight(tmp_code, lexer, formatter) @@ -334,7 +333,6 @@ def morty_proxify(url: str): def image_proxify(url: str): - if url.startswith('//'): url = 'https:' + url @@ -405,7 +403,6 @@ def get_client_settings(): def render(template_name: str, **kwargs): - kwargs['client_settings'] = str( base64.b64encode( bytes( @@ -896,7 +893,6 @@ def autocompleter(): # normal autocompletion results only appear if no inner results returned # and there is a query part if len(raw_text_query.autocomplete_list) == 0 and len(sug_prefix) > 0: - # get language from cookie language = request.preferences.get_value('language') if not language or language == 'all': diff --git a/searx/webutils.py b/searx/webutils.py index 7b9a8045c..e8e5b7d5c 100644 --- a/searx/webutils.py +++ b/searx/webutils.py @@ -157,7 +157,6 @@ def regex_highlight_cjk(word: str) -> str: def highlight_content(content, query): - if not content: return None diff --git a/searxng_extra/update/update_languages.py b/searxng_extra/update/update_languages.py index 87b13b276..946aec741 100755 --- a/searxng_extra/update/update_languages.py +++ b/searxng_extra/update/update_languages.py @@ -129,7 +129,6 @@ def join_language_lists(engines_languages): language_list = {} for engine_name in engines_languages: for lang_code in engines_languages[engine_name]: - # apply custom fixes if necessary if lang_code in getattr(engines[engine_name], 'language_aliases', {}).values(): lang_code = next( @@ -275,7 +274,6 @@ def write_languages_file(languages): language_codes = [] for code in sorted(languages): - name = languages[code]['name'] if name is None: print("ERROR: languages['%s'] --> %s" % (code, languages[code])) diff --git a/searxng_extra/update/update_osm_keys_tags.py b/searxng_extra/update/update_osm_keys_tags.py index 72197498d..266421efa 100755 --- a/searxng_extra/update/update_osm_keys_tags.py +++ b/searxng_extra/update/update_osm_keys_tags.py @@ -208,7 +208,6 @@ def get_osm_tags_filename(): if __name__ == '__main__': - set_timeout_for_thread(60) result = { 'keys': optimize_keys(get_keys()), diff --git a/searxng_extra/update/update_pygments.py b/searxng_extra/update/update_pygments.py index ca14868a2..966b6dbf4 100755 --- a/searxng_extra/update/update_pygments.py +++ b/searxng_extra/update/update_pygments.py @@ -58,7 +58,6 @@ def get_css(cssclass, style): def main(): - fname = 'static/themes/simple/src/generated/pygments.less' print("update: %s" % fname) with open(get_output_filename(fname), 'w') as f: diff --git a/tests/unit/network/test_network.py b/tests/unit/network/test_network.py index 905b981c1..0738fa7f0 100644 --- a/tests/unit/network/test_network.py +++ b/tests/unit/network/test_network.py @@ -122,7 +122,6 @@ class TestNetwork(SearxTestCase): class TestNetworkRequestRetries(SearxTestCase): - TEXT = 'Lorem Ipsum' @classmethod @@ -195,7 +194,6 @@ class TestNetworkRequestRetries(SearxTestCase): class TestNetworkStreamRetries(SearxTestCase): - TEXT = 'Lorem Ipsum' @classmethod diff --git a/tests/unit/test_external_bangs.py b/tests/unit/test_external_bangs.py index 794edf159..258c178e8 100644 --- a/tests/unit/test_external_bangs.py +++ b/tests/unit/test_external_bangs.py @@ -32,7 +32,6 @@ TEST_DB = { class TestGetNode(SearxTestCase): - DB = { 'trie': { 'exam': { diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index db25da8f3..8acb72ed3 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -228,7 +228,6 @@ class TestExternalBangParser(SearxTestCase): class TestBang(SearxTestCase): - SPECIFIC_BANGS = ['!dummy_engine', '!du', '!general'] THE_QUERY = 'the query' diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 6f51f1ee3..100a9bcbd 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -154,7 +154,6 @@ class TestHTMLTextExtractor(SearxTestCase): class TestXPathUtils(SearxTestCase): - TEST_DOC = """
      • Text in bold and italic
      • Another text