[mod] make format.python : prepare python code for black 23.1.0

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
This commit is contained in:
Markus Heiser 2023-02-04 17:58:53 +01:00
parent 233ee1bb15
commit b4c80d9ebb
68 changed files with 8 additions and 115 deletions

View file

@ -14,7 +14,6 @@ try:
from functools import cached_property # type: ignore
except ImportError:
# cache_property has been added in py3.8 [1]
#
# To support cache_property in py3.7 the implementation from 3.8 has been

View file

@ -49,7 +49,6 @@ def response(resp):
# parse results
for result in eval_xpath_list(dom, "//div[@id='content']//div[@class='listWidget']/div/div[@class='appRow']"):
link = eval_xpath_getindex(result, './/h5/a', 0)
url = base_url + link.attrib.get('href') + '#downloads'

View file

@ -29,7 +29,6 @@ image_api = 'https://www.artic.edu/iiif/2/'
def request(query, params):
args = urlencode(
{
'q': query,
@ -45,12 +44,10 @@ def request(query, params):
def response(resp):
results = []
json_data = loads(resp.text)
for result in json_data['data']:
if not result['image_id']:
continue

View file

@ -63,7 +63,6 @@ def response(resp):
dom = html.fromstring(resp.text)
for result in eval_xpath_list(dom, '//li[contains(@class, "searchresult")]'):
link = eval_xpath_getindex(result, './/div[@class="itemurl"]/a', 0, default=None)
if link is None:
continue

View file

@ -45,7 +45,6 @@ def _get_offset_from_pageno(pageno):
def request(query, params):
offset = _get_offset_from_pageno(params.get('pageno', 1))
# logger.debug("params['pageno'] --> %s", params.get('pageno'))
@ -86,7 +85,6 @@ def response(resp):
url_to_resolve_index = []
i = 0
for result in eval_xpath_list(dom, '//ol[@id="b_results"]/li[contains(@class, "b_algo")]'):
link = eval_xpath_getindex(result, './/h2/a', 0, None)
if link is None:
continue
@ -138,7 +136,6 @@ def response(resp):
try:
result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
if "-" in result_len_container:
# Remove the part "from-to" for paginated request ...
result_len_container = result_len_container[result_len_container.find("-") * 2 + 2 :]
@ -159,14 +156,12 @@ def response(resp):
# get supported languages from their site
def _fetch_supported_languages(resp):
lang_tags = set()
dom = html.fromstring(resp.text)
lang_links = eval_xpath(dom, '//div[@id="language-section"]//li')
for _li in lang_links:
href = eval_xpath(_li, './/@href')[0]
(_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href)
query = parse_qs(query, keep_blank_values=True)

View file

@ -90,7 +90,6 @@ def _get_url(query, language, offset, time_range):
def request(query, params):
if params['time_range'] and params['time_range'] not in time_range_dict:
return params
@ -105,7 +104,6 @@ def request(query, params):
def response(resp):
results = []
rss = etree.fromstring(resp.content)
namespaces = rss.nsmap

View file

@ -29,7 +29,6 @@ search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}'
def request(query, params):
if api_key == 'unset':
raise SearxEngineAPIException('missing CORE API key')

View file

@ -77,7 +77,6 @@ def init(_engine_settings):
def request(query, params):
if not query:
return False
@ -127,7 +126,6 @@ def response(resp):
# parse results
for res in search_res.get('list', []):
title = res['title']
url = res['url']

View file

@ -25,6 +25,7 @@ url = 'https://api.deezer.com/'
search_url = url + 'search?{query}&index={offset}'
iframe_src = "https://www.deezer.com/plugins/player?type=tracks&id={audioid}"
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 25

View file

@ -81,7 +81,6 @@ def response(resp):
json_data = loads(resp.text)
for result in json_data['data']:
if not result['image_id']:
continue

View file

@ -34,7 +34,6 @@ base_url = 'https://www.deviantart.com'
def request(query, params):
# https://www.deviantart.com/search/deviations?page=5&q=foo
query = {
@ -50,14 +49,12 @@ def request(query, params):
def response(resp):
results = []
dom = html.fromstring(resp.text)
for row in dom.xpath('//div[contains(@data-hook, "content_row")]'):
for result in row.xpath('./div'):
a_tag = result.xpath('.//a[@data-hook="deviation_link"]')[0]
noscript_tag = a_tag.xpath('.//noscript')

View file

@ -25,7 +25,6 @@ search_url = base_url + "api/content/v1/products/search?{query}&type=image&page_
def request(query, params):
params['url'] = search_url.format(query=urlencode(dict(q=query, page=params["pageno"])))
params["headers"]["Search-Version"] = "v3"

View file

@ -37,7 +37,6 @@ search_url = (
# do search-request
def request(query, params):
params['url'] = base_url + search_url.format(query=urlencode({'id': query}))
return params

View file

@ -49,6 +49,7 @@ time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
url = 'https://lite.duckduckgo.com/lite/'
url_ping = 'https://duckduckgo.com/t/sl_l'
# match query's language to a region code that duckduckgo will accept
def get_region_code(lang, lang_list=None):
if lang == 'all':
@ -62,7 +63,6 @@ def get_region_code(lang, lang_list=None):
def request(query, params):
params['url'] = url
params['method'] = 'POST'
@ -118,7 +118,6 @@ def request(query, params):
# get response from search-request
def response(resp):
headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie'])
get(url_ping, headers=headers_ping)
@ -143,7 +142,6 @@ def response(resp):
offset = 0
while len_tr_rows >= offset + 4:
# assemble table rows we need to scrap
tr_title = tr_rows[offset]
tr_content = tr_rows[offset + 1]
@ -174,7 +172,6 @@ def response(resp):
# get supported languages from their site
def _fetch_supported_languages(resp):
# response is a js file with regions as an embedded object
response_page = resp.text
response_page = response_page[response_page.find('regions:{') + 8 :]

View file

@ -48,7 +48,6 @@ def response(resp):
dom = html.fromstring(resp.text)
for result in eval_xpath_list(dom, "//ol[@class='search-results']/li"):
extracted_desc = extract_text(eval_xpath_getindex(result, './/p', 0))
if 'No results found.' in extracted_desc:

View file

@ -29,6 +29,7 @@ search_url = (
url + "search/text/?query={query}&page={page}&fields=name,url,download,created,description,type&token={api_key}"
)
# search request
def request(query, params):
params["url"] = search_url.format(

View file

@ -41,7 +41,6 @@ extra_param_expiration_delay = 3000
def fetch_extra_param(query_args, headers):
# example:
#
# var uxrl='/search?c=main&qlangcountry=en-us&q=south&s=10&rand=1590740241635&n';

View file

@ -220,7 +220,6 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language):
# https://developers.google.com/custom-search/docs/xml_results_appendices#languageCollections
if _any_language and supported_any_language:
# interpretation is left up to Google (based on whoogle)
#
# - add parameter ``source=lnt``
@ -230,7 +229,6 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language):
ret_val['params']['source'] = 'lnt'
else:
# restricts search results to documents written in a particular
# language.
ret_val['params']['lr'] = "lang_" + lang_list.get(lang_country, language)
@ -323,7 +321,6 @@ def response(resp):
# parse results
for result in eval_xpath_list(dom, results_xpath):
# google *sections*
if extract_text(eval_xpath(result, g_section_with_header)):
logger.debug("ignoring <g-section-with-header>")

View file

@ -93,7 +93,6 @@ def response(resp):
json_data = loads(resp.text[json_start:])
for item in json_data["ischj"]["metadata"]:
result_item = {
'url': item["result"]["referrer_url"],
'title': item["result"]["page_title"],

View file

@ -119,7 +119,6 @@ def response(resp):
dom = html.fromstring(resp.text)
for result in eval_xpath_list(dom, '//div[@class="xrnccd"]'):
# The first <a> tag in the <article> contains the link to the
# article The href attribute of the <a> is a google internal link,
# we can't use. The real link is hidden in the jslog attribute:

View file

@ -151,7 +151,6 @@ def response(resp): # pylint: disable=too-many-locals
# parse results
for result in eval_xpath_list(dom, '//div[@data-cid]'):
title = extract_text(eval_xpath(result, './/h3[1]//a'))
if not title:

View file

@ -147,7 +147,6 @@ def response(resp):
# parse results
for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'):
# ignore google *sections*
if extract_text(eval_xpath(result, g_section_with_header)):
logger.debug("ignoring <g-section-with-header>")

View file

@ -39,7 +39,6 @@ search_categories = {"nm": "name", "tt": "title", "kw": "keyword", "co": "compan
def request(query, params):
query = query.replace(" ", "_").lower()
params['url'] = suggestion_url.format(letter=query[0], query=query)
@ -47,12 +46,10 @@ def request(query, params):
def response(resp):
suggestions = json.loads(resp.text)
results = []
for entry in suggestions.get('d', []):
# https://developer.imdb.com/documentation/key-concepts#imdb-ids
entry_id = entry['id']
categ = search_categories.get(entry_id[:2])

View file

@ -64,7 +64,6 @@ def do_query(data, q):
qkey = q[0]
for key, value in iterate(data):
if len(q) == 1:
if key == qkey:
ret.append(value)

View file

@ -33,7 +33,6 @@ IMG_SRC_FIXES = {
def request(query, params):
search_path = search_string.format(query=urlencode({'q': query}), page=params['pageno'])
params['url'] = base_url + search_path

View file

@ -24,7 +24,6 @@ safesearch = False
def request(query, params):
params['url'] = 'https://mediathekviewweb.de/api/query'
params['method'] = 'POST'
params['headers']['Content-type'] = 'text/plain'
@ -50,7 +49,6 @@ def request(query, params):
def response(resp):
resp = loads(resp.text)
mwv_result = resp['result']
@ -59,7 +57,6 @@ def response(resp):
results = []
for item in mwv_result_list:
item['hms'] = str(datetime.timedelta(seconds=item['duration']))
results.append(

View file

@ -38,7 +38,6 @@ def response(resp):
search_res = resp.json()
for result in search_res.get('data', []):
r_url = result['url']
publishedDate = parser.parse(result['created_time'])
res = {

View file

@ -28,7 +28,6 @@ search_string = '?page={page}&page_size={nb_per_page}&format=json&{query}'
def request(query, params):
search_path = search_string.format(query=urlencode({'q': query}), nb_per_page=nb_per_page, page=params['pageno'])
params['url'] = base_url + search_path

View file

@ -31,7 +31,6 @@ pdbe_preview_url = 'https://www.ebi.ac.uk/pdbe/static/entry/{pdb_id}_deposited_c
def request(query, params):
params['url'] = pdbe_solr_url
params['method'] = 'POST'
params['data'] = {'q': query, 'wt': "json"} # request response in parsable format
@ -66,21 +65,20 @@ def construct_body(result):
year=result['release_year'],
)
img_src = pdbe_preview_url.format(pdb_id=result['pdb_id'])
except (KeyError):
except KeyError:
content = None
img_src = None
# construct url for preview image
try:
img_src = pdbe_preview_url.format(pdb_id=result['pdb_id'])
except (KeyError):
except KeyError:
img_src = None
return [title, content, img_src]
def response(resp):
results = []
json = loads(resp.text)['response']['docs']

View file

@ -32,7 +32,6 @@ search_string = 'search?{query}&channel=image&ps=50&pn={page}&region={lang}&ss_m
def request(query, params):
search_path = search_string.format(
query=urlencode({'query': query}),
page=params['pageno'],

View file

@ -53,7 +53,6 @@ def response(resp):
# parse results
for r in json.get('features', {}):
properties = r.get('properties')
if not properties:

View file

@ -149,7 +149,6 @@ def response(resp):
return []
for row in mainline:
mainline_type = row.get('type', 'web')
if mainline_type != qwant_categ:
continue
@ -160,7 +159,6 @@ def response(resp):
mainline_items = row.get('items', [])
for item in mainline_items:
title = item.get('title', None)
res_url = item.get('url', None)
@ -175,7 +173,6 @@ def response(resp):
)
elif mainline_type == 'news':
pub_date = item['date']
if pub_date is not None:
pub_date = datetime.fromtimestamp(pub_date)
@ -244,7 +241,6 @@ def response(resp):
def _fetch_supported_languages(resp):
text = resp.text
text = text[text.find('INITIAL_PROPS') :]
text = text[text.find('{') : text.find('</script>')]

View file

@ -27,7 +27,6 @@ search_url = base_url + 'search.json?{query}'
def request(query, params):
query = urlencode({'q': query, 'limit': page_size})
params['url'] = search_url.format(query=query)
@ -35,7 +34,6 @@ def request(query, params):
def response(resp):
img_results = []
text_results = []

View file

@ -28,7 +28,6 @@ search_url = url + 'api/structures/search'
# do search-request
def request(query, params):
params['url'] = search_url
params['method'] = 'POST'
params['headers']['Content-type'] = "application/json"

View file

@ -45,7 +45,6 @@ def request(query, params):
# get response from search-request
def response(resp):
response_json = loads(resp.text)
results = response_json['results']

View file

@ -80,7 +80,7 @@ def response(resp):
for src in definitions:
infobox += f"<div><small>{src[0]}</small>"
infobox += "<ul>"
for (def_text, sub_def) in src[1]:
for def_text, sub_def in src[1]:
infobox += f"<li>{def_text}</li>"
if sub_def:
infobox += "<ol>"

View file

@ -85,7 +85,6 @@ def response(resp):
# parse results
for result in search_res.get('collection', []):
if result['kind'] in ('track', 'playlist'):
uri = quote_plus(result['uri'])
res = {

View file

@ -29,6 +29,7 @@ api_client_secret = None
url = 'https://api.spotify.com/'
search_url = url + 'v1/search?{query}&type=track&offset={offset}'
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 20

View file

@ -54,7 +54,6 @@ def search(query, params):
query_to_run = query_str + ' LIMIT :limit OFFSET :offset'
with sqlite_cursor() as cur:
cur.execute(query_to_run, query_params)
col_names = [cn[0] for cn in cur.description]

View file

@ -31,7 +31,6 @@ search_api = 'https://api.stackexchange.com/2.3/search/advanced?'
def request(query, params):
args = urlencode(
{
'q': query,
@ -48,12 +47,10 @@ def request(query, params):
def response(resp):
results = []
json_data = loads(resp.text)
for result in json_data['items']:
content = "[%s]" % ", ".join(result['tags'])
content += " %s" % result['owner']['display_name']
if result['is_answered']:

View file

@ -60,7 +60,6 @@ sc_code = ''
def raise_captcha(resp):
if str(resp.url).startswith('https://www.startpage.com/sp/captcha'):
raise SearxEngineCaptchaException()
@ -104,7 +103,6 @@ def get_sc_code(headers):
# do search-request
def request(query, params):
# pylint: disable=line-too-long
# The format string from Startpage's FFox add-on [1]::
#

View file

@ -114,7 +114,6 @@ def parse_tineye_match(match_json):
backlinks = []
if "backlinks" in match_json:
for backlink_json in match_json["backlinks"]:
if not isinstance(backlink_json, dict):
continue
@ -164,7 +163,6 @@ def response(resp):
if resp.is_error:
if resp.status_code in (400, 422):
message = 'HTTP status: %s' % resp.status_code
error = json_data.get('error')
s_key = json_data.get('suggestions', {}).get('key', '')
@ -195,7 +193,6 @@ def response(resp):
# append results from matches
for match_json in json_data['matches']:
tineye_match = parse_tineye_match(match_json)
if not tineye_match['backlinks']:
continue

View file

@ -42,7 +42,6 @@ def init(engine_settings=None): # pylint: disable=unused-argument
def request(query, params):
search_url = base_url + '?t=search&q={search_query}'
if len(api_key) > 0:
search_url += '&apikey={api_key}'

View file

@ -522,7 +522,6 @@ class WDAmountAttribute(WDAttribute):
class WDArticle(WDAttribute):
__slots__ = 'language', 'kwargs'
def __init__(self, language, kwargs=None):
@ -568,7 +567,6 @@ class WDLabelAttribute(WDAttribute):
class WDURLAttribute(WDAttribute):
HTTP_WIKIMEDIA_IMAGE = 'http://commons.wikimedia.org/wiki/Special:FilePath/'
__slots__ = 'url_id', 'kwargs'
@ -623,7 +621,6 @@ class WDGeoAttribute(WDAttribute):
class WDImageAttribute(WDURLAttribute):
__slots__ = ('priority',)
def __init__(self, name, url_id=None, priority=100):

View file

@ -100,7 +100,6 @@ def response(resp):
image = subpod.xpath(image_xpath)
if content and pod_id not in image_pods:
if pod_is_result or not result_content:
if pod_id != "Input":
result_content = "%s: %s" % (pod_title, content)

View file

@ -204,7 +204,6 @@ def response(resp): # pylint: disable=too-many-branches
if results_xpath:
for result in eval_xpath_list(dom, results_xpath):
url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
content = extract_text(eval_xpath_list(result, content_xpath))

View file

@ -79,7 +79,6 @@ def response(resp):
for result in search_results[0].get('items', []):
# parse image results
if resp.search_params.get('category') == 'images':
result_url = ''
if 'url' in result:
result_url = result['url']

View file

@ -69,7 +69,6 @@ lang2domain = {
def _get_language(params):
lang = language_aliases.get(params['language'])
if lang is None:
lang = match_language(params['language'], supported_languages, language_aliases)

View file

@ -71,7 +71,6 @@ def response(resp):
# parse results
for result in eval_xpath_list(dom, '//ol[contains(@class,"searchCenterMiddle")]//li'):
url = eval_xpath_getindex(result, './/h4/a/@href', 0, None)
if url is None:
continue

View file

@ -33,14 +33,12 @@ class ReverseProxyPathFix:
# pylint: disable=too-few-public-methods
def __init__(self, wsgi_app):
self.wsgi_app = wsgi_app
self.script_name = None
self.scheme = None
self.server = None
if settings['server']['base_url']:
# If base_url is specified, then these values from are given
# preference over any Flask's generics.

View file

@ -246,7 +246,6 @@ def get_engine_locale(searxng_locale, engine_locales, default=None):
# engine does support the searxng_lang in this other territory.
if locale.language:
searxng_lang = locale.language
if locale.script:
searxng_lang += '_' + locale.script

View file

@ -179,7 +179,6 @@ def get_engines_stats(engine_name_list):
max_time_total = max_result_count = None
for engine_name in engine_name_list:
sent_count = counter('engine', engine_name, 'search', 'count', 'sent')
if sent_count == 0:
continue
@ -218,7 +217,6 @@ def get_engines_stats(engine_name_list):
time_http_p80 = time_http_p95 = 0
if time_http is not None:
time_http_p80 = histogram('engine', engine_name, 'time', 'http').percentage(80)
time_http_p95 = histogram('engine', engine_name, 'time', 'http').percentage(95)
@ -227,7 +225,6 @@ def get_engines_stats(engine_name_list):
stats['http_p95'] = round(time_http_p95, 1)
if time_total is not None:
time_total_p80 = histogram('engine', engine_name, 'time', 'total').percentage(80)
time_total_p95 = histogram('engine', engine_name, 'time', 'total').percentage(95)

View file

@ -17,7 +17,6 @@ errors_per_engines = {}
class ErrorContext:
__slots__ = (
'filename',
'function',

View file

@ -12,7 +12,6 @@ logger = logger.getChild('searx.metrics')
class Histogram:
_slots__ = '_lock', '_size', '_sum', '_quartiles', '_count', '_width'
def __init__(self, width=10, size=200):
@ -101,7 +100,6 @@ class Histogram:
class HistogramStorage:
__slots__ = 'measures', 'histogram_class'
def __init__(self, histogram_class=Histogram):
@ -127,7 +125,6 @@ class HistogramStorage:
class CounterStorage:
__slots__ = 'counters', 'lock'
def __init__(self):

View file

@ -37,7 +37,6 @@ ADDRESS_MAPPING = {'ipv4': '0.0.0.0', 'ipv6': '::'}
class Network:
__slots__ = (
'enable_http',
'verify',
@ -76,7 +75,6 @@ class Network:
max_redirects=30,
logger_name=None,
):
self.enable_http = enable_http
self.verify = verify
self.enable_http2 = enable_http2

View file

@ -21,9 +21,7 @@ _url_fields = ['iframe_src', 'audio_src']
def on_result(request, search, result):
for (pattern, replacement) in replacements.items():
for pattern, replacement in replacements.items():
if parsed in result:
if pattern.search(result[parsed].netloc):
# to keep or remove this result from the result list depends

View file

@ -45,12 +45,10 @@ reg = re.compile(r"(?<=ExitAddress )\S+")
def post_search(request, search):
if search.search_query.pageno > 1:
return True
if search.search_query.query.lower() == "tor-check":
# Request the list of tor exit nodes.
try:
resp = get("https://check.torproject.org/exit-addresses")

View file

@ -12,7 +12,6 @@ from searx.webutils import VALID_LANGUAGE_CODE
class QueryPartParser(ABC):
__slots__ = "raw_text_query", "enable_autocomplete"
@staticmethod

View file

@ -150,7 +150,6 @@ def _search_query_diff(
class TestResults:
__slots__ = 'errors', 'logs', 'languages'
def __init__(self):
@ -182,7 +181,6 @@ class TestResults:
class ResultContainerTests:
__slots__ = 'test_name', 'search_query', 'result_container', 'languages', 'stop_test', 'test_results'
def __init__(
@ -320,7 +318,6 @@ class ResultContainerTests:
class CheckerTests:
__slots__ = 'test_results', 'test_name', 'result_container_tests_list'
def __init__(
@ -352,7 +349,6 @@ class CheckerTests:
class Checker:
__slots__ = 'processor', 'tests', 'test_results'
def __init__(self, processor: EngineProcessor):

View file

@ -268,7 +268,6 @@ def code_highlighter(codelines, language=None):
# new codeblock is detected
if last_line is not None and last_line + 1 != line:
# highlight last codepart
formatter = HtmlFormatter(linenos='inline', linenostart=line_code_start, cssclass="code-highlight")
html_code = html_code + highlight(tmp_code, lexer, formatter)
@ -334,7 +333,6 @@ def morty_proxify(url: str):
def image_proxify(url: str):
if url.startswith('//'):
url = 'https:' + url
@ -405,7 +403,6 @@ def get_client_settings():
def render(template_name: str, **kwargs):
kwargs['client_settings'] = str(
base64.b64encode(
bytes(
@ -896,7 +893,6 @@ def autocompleter():
# normal autocompletion results only appear if no inner results returned
# and there is a query part
if len(raw_text_query.autocomplete_list) == 0 and len(sug_prefix) > 0:
# get language from cookie
language = request.preferences.get_value('language')
if not language or language == 'all':

View file

@ -157,7 +157,6 @@ def regex_highlight_cjk(word: str) -> str:
def highlight_content(content, query):
if not content:
return None

View file

@ -129,7 +129,6 @@ def join_language_lists(engines_languages):
language_list = {}
for engine_name in engines_languages:
for lang_code in engines_languages[engine_name]:
# apply custom fixes if necessary
if lang_code in getattr(engines[engine_name], 'language_aliases', {}).values():
lang_code = next(
@ -275,7 +274,6 @@ def write_languages_file(languages):
language_codes = []
for code in sorted(languages):
name = languages[code]['name']
if name is None:
print("ERROR: languages['%s'] --> %s" % (code, languages[code]))

View file

@ -208,7 +208,6 @@ def get_osm_tags_filename():
if __name__ == '__main__':
set_timeout_for_thread(60)
result = {
'keys': optimize_keys(get_keys()),

View file

@ -58,7 +58,6 @@ def get_css(cssclass, style):
def main():
fname = 'static/themes/simple/src/generated/pygments.less'
print("update: %s" % fname)
with open(get_output_filename(fname), 'w') as f:

View file

@ -122,7 +122,6 @@ class TestNetwork(SearxTestCase):
class TestNetworkRequestRetries(SearxTestCase):
TEXT = 'Lorem Ipsum'
@classmethod
@ -195,7 +194,6 @@ class TestNetworkRequestRetries(SearxTestCase):
class TestNetworkStreamRetries(SearxTestCase):
TEXT = 'Lorem Ipsum'
@classmethod

View file

@ -32,7 +32,6 @@ TEST_DB = {
class TestGetNode(SearxTestCase):
DB = {
'trie': {
'exam': {

View file

@ -228,7 +228,6 @@ class TestExternalBangParser(SearxTestCase):
class TestBang(SearxTestCase):
SPECIFIC_BANGS = ['!dummy_engine', '!du', '!general']
THE_QUERY = 'the query'

View file

@ -154,7 +154,6 @@ class TestHTMLTextExtractor(SearxTestCase):
class TestXPathUtils(SearxTestCase):
TEST_DOC = """<ul>
<li>Text in <b>bold</b> and <i>italic</i> </li>
<li>Another <b>text</b> <img src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="></li>