mirror of
https://github.com/searxng/searxng
synced 2024-01-01 19:24:07 +01:00

We have been using a static type checker (pyright) for a long time, but its check was not yet a prerequisite for passing the quality gate. It was checked in the CI, but the error messages were only logged. As is always the case in life, with checks that you have to do but which have no consequences; you neglect them :-) We didn't activate the checks back then because we (even today) have too much monkey patching in our code (not only in the engines, httpx and others objects are also affected). We want to replace monkey patching with clear interfaces for a long time, the basis for this is increased typing and we can only achieve this if we make type checking an integral part of the quality gate. This PR activates the type check; in order to pass the check, a few typings were corrected in the code, but most type inconsistencies were deactivated via inline comments. This was particularly necessary in places where the code uses properties that stick to the objects (monkey patching). The sticking of properties only happens in a few places, but the access to these properties extends over the entire code, which is why there are many `# type: ignore` markers in the code ... which we will hopefully be able to remove again successively in the future. Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
93 lines
2.6 KiB
Python
93 lines
2.6 KiB
Python
# SPDX-License-Identifier: AGPL-3.0-or-later
|
|
# pylint: disable=missing-module-docstring
|
|
|
|
import math
|
|
|
|
from searx.data import EXTERNAL_URLS
|
|
|
|
|
|
IMDB_PREFIX_TO_URL_ID = {
|
|
'tt': 'imdb_title',
|
|
'mn': 'imdb_name',
|
|
'ch': 'imdb_character',
|
|
'co': 'imdb_company',
|
|
'ev': 'imdb_event',
|
|
}
|
|
HTTP_WIKIMEDIA_IMAGE = 'http://commons.wikimedia.org/wiki/Special:FilePath/'
|
|
|
|
|
|
def get_imdb_url_id(imdb_item_id):
|
|
id_prefix = imdb_item_id[:2]
|
|
return IMDB_PREFIX_TO_URL_ID.get(id_prefix)
|
|
|
|
|
|
def get_wikimedia_image_id(url):
|
|
if url.startswith(HTTP_WIKIMEDIA_IMAGE):
|
|
return url[len(HTTP_WIKIMEDIA_IMAGE) :]
|
|
if url.startswith('File:'):
|
|
return url[len('File:') :]
|
|
return url
|
|
|
|
|
|
def get_external_url(url_id, item_id, alternative="default"):
|
|
"""Return an external URL or None if url_id is not found.
|
|
|
|
url_id can take value from data/external_urls.json
|
|
The "imdb_id" value is automatically converted according to the item_id value.
|
|
|
|
If item_id is None, the raw URL with the $1 is returned.
|
|
"""
|
|
if item_id is not None:
|
|
if url_id == 'imdb_id':
|
|
url_id = get_imdb_url_id(item_id)
|
|
elif url_id == 'wikimedia_image':
|
|
item_id = get_wikimedia_image_id(item_id)
|
|
|
|
url_description = EXTERNAL_URLS.get(url_id)
|
|
if url_description:
|
|
url_template = url_description["urls"].get(alternative)
|
|
if url_template is not None:
|
|
if item_id is not None:
|
|
return url_template.replace('$1', item_id)
|
|
return url_template
|
|
return None
|
|
|
|
|
|
def get_earth_coordinates_url(latitude, longitude, osm_zoom, alternative='default'):
|
|
url = (
|
|
get_external_url('map', None, alternative)
|
|
.replace('${latitude}', str(latitude)) # type: ignore
|
|
.replace('${longitude}', str(longitude))
|
|
.replace('${zoom}', str(osm_zoom))
|
|
)
|
|
return url
|
|
|
|
|
|
def area_to_osm_zoom(area):
|
|
"""Convert an area in km² into an OSM zoom. Less reliable if the shape is not round.
|
|
|
|
logarithm regression using these data:
|
|
* 9596961 -> 4 (China)
|
|
* 3287263 -> 5 (India)
|
|
* 643801 -> 6 (France)
|
|
* 6028 -> 9
|
|
* 1214 -> 10
|
|
* 891 -> 12
|
|
* 12 -> 13
|
|
|
|
In WolframAlpha:
|
|
>>> log fit {9596961,15},{3287263, 14},{643801,13},{6028,10},{1214,9},{891,7},{12,6}
|
|
|
|
with 15 = 19-4 (China); 14 = 19-5 (India) and so on
|
|
|
|
Args:
|
|
area (int,float,str): area in km²
|
|
|
|
Returns:
|
|
int: OSM zoom or 19 in area is not a number
|
|
"""
|
|
try:
|
|
amount = float(area)
|
|
return max(0, min(19, round(19 - 0.688297 * math.log(226.878 * amount))))
|
|
except ValueError:
|
|
return 19
|