mirror of
https://github.com/searxng/searxng
synced 2024-01-01 19:24:07 +01:00
cleanup
This commit is contained in:
parent
6c375c7d28
commit
e82516fb8d
4 changed files with 10 additions and 8 deletions
|
@ -37,7 +37,7 @@ Communication with search engines.
|
||||||
# Uncomment below if you want to make multiple request in parallel
|
# Uncomment below if you want to make multiple request in parallel
|
||||||
# through all the proxies at once:
|
# through all the proxies at once:
|
||||||
#
|
#
|
||||||
# proxy_request_redundancy: 10
|
# proxy_request_redundancy: 4
|
||||||
#
|
#
|
||||||
# Extra seconds to add in order to account for the time taken by the proxy
|
# Extra seconds to add in order to account for the time taken by the proxy
|
||||||
#
|
#
|
||||||
|
|
|
@ -207,16 +207,16 @@ class AsyncParallelTransport(httpx.AsyncBaseTransport):
|
||||||
network_logger: logging.Logger,
|
network_logger: logging.Logger,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Init the parallel transport using a list of base `transports`."""
|
"""Init the parallel transport using a list of base `transports`."""
|
||||||
|
self._logger = network_logger or logger
|
||||||
self._transports = list(transports)
|
self._transports = list(transports)
|
||||||
if len(self._transports) == 0:
|
if len(self._transports) == 0:
|
||||||
msg = "Got an empty list of (proxy) transports."
|
msg = "Got an empty list of (proxy) transports."
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
if proxy_request_redundancy < 1:
|
if proxy_request_redundancy < 1:
|
||||||
logger.warning("Invalid proxy_request_redundancy specified: %d", proxy_request_redundancy)
|
self._logger.warning("Invalid proxy_request_redundancy specified: %d", proxy_request_redundancy)
|
||||||
proxy_request_redundancy = 1
|
proxy_request_redundancy = 1
|
||||||
self._proxy_request_redundancy = proxy_request_redundancy
|
self._proxy_request_redundancy = proxy_request_redundancy
|
||||||
self._index = random.randrange(len(self._transports)) # noqa: S311
|
self._index = random.randrange(len(self._transports)) # noqa: S311
|
||||||
self._logger = network_logger or logger
|
|
||||||
|
|
||||||
async def handle_async_request(
|
async def handle_async_request(
|
||||||
self,
|
self,
|
||||||
|
@ -258,7 +258,7 @@ class AsyncParallelTransport(httpx.AsyncBaseTransport):
|
||||||
if not result.is_error:
|
if not result.is_error:
|
||||||
response = result
|
response = result
|
||||||
elif result.status_code == 404 and response is None:
|
elif result.status_code == 404 and response is None:
|
||||||
response = result
|
error_response = response = result
|
||||||
elif not error_response:
|
elif not error_response:
|
||||||
self._logger.warning("Error response: %s for %s", result.status_code, request.url)
|
self._logger.warning("Error response: %s for %s", result.status_code, request.url)
|
||||||
error_response = result
|
error_response = result
|
||||||
|
|
|
@ -351,6 +351,7 @@ def initialize(settings_engines=None, settings_outgoing=None):
|
||||||
}
|
}
|
||||||
|
|
||||||
def new_network(params, logger_name=None):
|
def new_network(params, logger_name=None):
|
||||||
|
nonlocal default_params
|
||||||
result = {}
|
result = {}
|
||||||
result.update(default_params)
|
result.update(default_params)
|
||||||
result.update(params)
|
result.update(params)
|
||||||
|
@ -358,7 +359,8 @@ def initialize(settings_engines=None, settings_outgoing=None):
|
||||||
result['logger_name'] = logger_name
|
result['logger_name'] = logger_name
|
||||||
return Network(**result)
|
return Network(**result)
|
||||||
|
|
||||||
def iter_engine_networks():
|
def iter_networks():
|
||||||
|
nonlocal settings_engines
|
||||||
for engine_spec in settings_engines:
|
for engine_spec in settings_engines:
|
||||||
engine_name = engine_spec['name']
|
engine_name = engine_spec['name']
|
||||||
engine = engines.get(engine_name)
|
engine = engines.get(engine_name)
|
||||||
|
@ -379,7 +381,7 @@ def initialize(settings_engines=None, settings_outgoing=None):
|
||||||
NETWORKS[network_name] = new_network(network, logger_name=network_name)
|
NETWORKS[network_name] = new_network(network, logger_name=network_name)
|
||||||
|
|
||||||
# define networks from engines.[i].network (except references)
|
# define networks from engines.[i].network (except references)
|
||||||
for engine_name, engine, network in iter_engine_networks():
|
for engine_name, engine, network in iter_networks():
|
||||||
if network is None:
|
if network is None:
|
||||||
network = {}
|
network = {}
|
||||||
for attribute_name, attribute_value in default_params.items():
|
for attribute_name, attribute_value in default_params.items():
|
||||||
|
@ -392,7 +394,7 @@ def initialize(settings_engines=None, settings_outgoing=None):
|
||||||
NETWORKS[engine_name] = new_network(network, logger_name=engine_name)
|
NETWORKS[engine_name] = new_network(network, logger_name=engine_name)
|
||||||
|
|
||||||
# define networks from engines.[i].network (references)
|
# define networks from engines.[i].network (references)
|
||||||
for engine_name, engine, network in iter_engine_networks():
|
for engine_name, engine, network in iter_networks():
|
||||||
if isinstance(network, str):
|
if isinstance(network, str):
|
||||||
NETWORKS[engine_name] = NETWORKS[network]
|
NETWORKS[engine_name] = NETWORKS[network]
|
||||||
|
|
||||||
|
|
|
@ -193,7 +193,7 @@ outgoing:
|
||||||
# Uncomment below if you want to make multiple request in parallel
|
# Uncomment below if you want to make multiple request in parallel
|
||||||
# through all the proxies at once:
|
# through all the proxies at once:
|
||||||
#
|
#
|
||||||
# proxy_request_redundancy: 10
|
# proxy_request_redundancy: 4
|
||||||
#
|
#
|
||||||
# Extra seconds to add in order to account for the time taken by the proxy
|
# Extra seconds to add in order to account for the time taken by the proxy
|
||||||
#
|
#
|
||||||
|
|
Loading…
Add table
Reference in a new issue