[mod] one logger per engine - drop obsolete logger.getChild

Remove the no longer needed `logger = logger.getChild(...)` from engines.

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
This commit is contained in:
Markus Heiser 2021-09-06 18:05:46 +02:00
parent 7b235a1c36
commit aecfb2300d
22 changed files with 0 additions and 66 deletions

View File

@ -8,15 +8,12 @@
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath_list, eval_xpath_list,
eval_xpath_getindex, eval_xpath_getindex,
extract_text, extract_text,
) )
logger = logger.getChild('APKMirror engine')
about = { about = {
"website": 'https://www.apkmirror.com', "website": 'https://www.apkmirror.com',
"wikidata_id": None, "wikidata_id": None,

View File

@ -13,9 +13,6 @@ Explore thousands of artworks from The Art Institute of Chicago.
from json import loads from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from searx import logger
logger = logger.getChild('APKMirror engine')
about = { about = {
"website": 'https://www.artic.edu', "website": 'https://www.artic.edu',
"wikidata_id": 'Q239303', "wikidata_id": 'Q239303',

View File

@ -6,11 +6,8 @@
import re import re
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx import logger
from searx.utils import eval_xpath, extract_text, match_language from searx.utils import eval_xpath, extract_text, match_language
logger = logger.getChild('bing engine')
# about # about
about = { about = {
"website": 'https://www.bing.com', "website": 'https://www.bing.com',

View File

@ -9,11 +9,8 @@ from json import loads
from datetime import datetime from datetime import datetime
from urllib.parse import urlencode from urllib.parse import urlencode
from searx import logger
from searx.exceptions import SearxEngineAPIException from searx.exceptions import SearxEngineAPIException
logger = logger.getChild('CORE engine')
about = { about = {
"website": 'https://core.ac.uk', "website": 'https://core.ac.uk',
"wikidata_id": 'Q22661180', "wikidata_id": 'Q22661180',
@ -29,8 +26,6 @@ nb_per_page = 10
api_key = 'unset' api_key = 'unset'
logger = logger.getChild('CORE engine')
base_url = 'https://core.ac.uk:443/api-v2/search/' base_url = 'https://core.ac.uk:443/api-v2/search/'
search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}' search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}'

View File

@ -9,15 +9,12 @@ import json
from urllib.parse import urlencode, urlparse, urljoin from urllib.parse import urlencode, urlparse, urljoin
from lxml import html from lxml import html
from searx import logger
from searx.data import WIKIDATA_UNITS from searx.data import WIKIDATA_UNITS
from searx.engines.duckduckgo import language_aliases from searx.engines.duckduckgo import language_aliases
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
logger = logger.getChild('duckduckgo_definitions')
# about # about
about = { about = {
"website": 'https://duckduckgo.com/', "website": 'https://duckduckgo.com/',

View File

@ -7,11 +7,8 @@ from json import loads
from time import time from time import time
import re import re
from urllib.parse import urlencode from urllib.parse import urlencode
from searx.engines import logger
from searx.utils import ecma_unescape, html_to_text from searx.utils import ecma_unescape, html_to_text
logger = logger.getChild('flickr-noapi')
# about # about
about = { about = {
"website": 'https://www.flickr.com', "website": 'https://www.flickr.com',

View File

@ -9,9 +9,6 @@ from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from datetime import datetime from datetime import datetime
from searx import logger
logger = logger.getChild('genius engine')
# about # about
about = { about = {
"website": 'https://genius.com/', "website": 'https://genius.com/',

View File

@ -8,7 +8,6 @@
import re import re
from json import loads from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
# from searx import logger
from searx.network import get from searx.network import get
# about # about

View File

@ -29,12 +29,9 @@ The google WEB engine itself has a special setup option:
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx import logger
from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
from searx.exceptions import SearxEngineCaptchaException from searx.exceptions import SearxEngineCaptchaException
logger = logger.getChild('google engine')
# about # about
about = { about = {
"website": 'https://www.google.com', "website": 'https://www.google.com',

View File

@ -16,7 +16,6 @@
from urllib.parse import urlencode, unquote from urllib.parse import urlencode, unquote
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath, eval_xpath,
eval_xpath_list, eval_xpath_list,
@ -37,8 +36,6 @@ from searx.engines.google import (
) )
# pylint: enable=unused-import # pylint: enable=unused-import
logger = logger.getChild('google images')
# about # about
about = { about = {
"website": 'https://images.google.com', "website": 'https://images.google.com',

View File

@ -20,7 +20,6 @@ from urllib.parse import urlencode
from base64 import b64decode from base64 import b64decode
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath, eval_xpath,
eval_xpath_list, eval_xpath_list,
@ -50,8 +49,6 @@ about = {
"results": 'HTML', "results": 'HTML',
} }
logger = logger.getChild('google news')
# compared to other google engines google-news has a different time range # compared to other google engines google-news has a different time range
# support. The time range is included in the search term. # support. The time range is included in the search term.
time_range_dict = { time_range_dict = {

View File

@ -14,7 +14,6 @@ Definitions`_.
from urllib.parse import urlencode from urllib.parse import urlencode
from datetime import datetime from datetime import datetime
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath, eval_xpath,
@ -53,8 +52,6 @@ use_locale_domain = True
time_range_support = True time_range_support = True
safesearch = False safesearch = False
logger = logger.getChild('google scholar')
def time_range_url(params): def time_range_url(params):
"""Returns a URL query component for a google-Scholar time range based on """Returns a URL query component for a google-Scholar time range based on
``params['time_range']``. Google-Scholar does only support ranges in years. ``params['time_range']``. Google-Scholar does only support ranges in years.

View File

@ -20,7 +20,6 @@ import re
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath, eval_xpath,
eval_xpath_list, eval_xpath_list,
@ -59,8 +58,6 @@ about = {
"results": 'HTML', "results": 'HTML',
} }
logger = logger.getChild('google video')
# engine dependent config # engine dependent config
categories = ['videos'] categories = ['videos']

View File

@ -8,9 +8,6 @@
from json import loads from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from searx import logger
logger = logger.getChild('solidtor engine')
about = { about = {
"website": 'https://www.solidtorrents.net/', "website": 'https://www.solidtorrents.net/',

View File

@ -8,7 +8,6 @@ from json import loads
from lxml import html from lxml import html
from dateutil import parser from dateutil import parser
from urllib.parse import quote_plus, urlencode from urllib.parse import quote_plus, urlencode
from searx import logger
from searx.network import get as http_get from searx.network import get as http_get
# about # about

View File

@ -10,11 +10,8 @@ from datetime import datetime
from json import loads from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from searx import logger
from searx.exceptions import SearxEngineAPIException from searx.exceptions import SearxEngineAPIException
logger = logger.getChild('Springer Nature engine')
about = { about = {
"website": 'https://www.springernature.com/', "website": 'https://www.springernature.com/',
"wikidata_id": 'Q21096327', "wikidata_id": 'Q21096327',

View File

@ -9,11 +9,6 @@
import sqlite3 import sqlite3
import contextlib import contextlib
from searx import logger
logger = logger.getChild('SQLite engine')
engine_type = 'offline' engine_type = 'offline'
database = "" database = ""
query_str = "" query_str = ""

View File

@ -8,9 +8,6 @@
from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
from json import loads from json import loads
from searx import logger
logger = logger.getChild('unsplash engine')
# about # about
about = { about = {
"website": 'https://unsplash.com', "website": 'https://unsplash.com',

View File

@ -10,15 +10,12 @@ from json import loads
from dateutil.parser import isoparse from dateutil.parser import isoparse
from babel.dates import format_datetime, format_date, format_time, get_datetime_format from babel.dates import format_datetime, format_date, format_time, get_datetime_format
from searx import logger
from searx.data import WIKIDATA_UNITS from searx.data import WIKIDATA_UNITS
from searx.network import post, get from searx.network import post, get
from searx.utils import match_language, searx_useragent, get_string_replaces_function from searx.utils import match_language, searx_useragent, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
logger = logger.getChild('wikidata')
# about # about
about = { about = {
"website": 'https://wikidata.org/', "website": 'https://wikidata.org/',

View File

@ -4,12 +4,9 @@
""" """
from lxml.html import fromstring from lxml.html import fromstring
from searx import logger
from searx.utils import extract_text from searx.utils import extract_text
from searx.network import raise_for_httperror from searx.network import raise_for_httperror
logger = logger.getChild('Wordnik engine')
# about # about
about = { about = {
"website": 'https://www.wordnik.com', "website": 'https://www.wordnik.com',

View File

@ -23,9 +23,6 @@ from urllib.parse import urlencode
from lxml import html from lxml import html
from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
from searx import logger
logger = logger.getChild('XPath engine')
search_url = None search_url = None
""" """

View File

@ -14,7 +14,6 @@ from datetime import datetime, timedelta
from dateutil import parser from dateutil import parser
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath_list, eval_xpath_list,
eval_xpath_getindex, eval_xpath_getindex,
@ -23,8 +22,6 @@ from searx.utils import (
from searx.engines.yahoo import parse_url from searx.engines.yahoo import parse_url
logger = logger.getChild('yahoo_news engine')
# about # about
about = { about = {
"website": 'https://news.yahoo.com', "website": 'https://news.yahoo.com',