[mod] bing_news: use async API & upgrade to data_type: traits_v1

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
This commit is contained in:
Markus Heiser 2022-10-22 20:19:52 +02:00
parent ff80e7637e
commit 15eaf0f15f
1 changed files with 100 additions and 105 deletions

View File

@ -1,20 +1,30 @@
# SPDX-License-Identifier: AGPL-3.0-or-later # SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint # lint: pylint
"""Bing (News) """Bing-News: description see :py:obj:`searx.engines.bing`.
""" """
from urllib.parse import ( # pylint: disable=invalid-name
urlencode,
urlparse, from typing import TYPE_CHECKING
parse_qsl, import uuid
quote, from urllib.parse import urlencode
)
from datetime import datetime from lxml import html
from dateutil import parser
from lxml import etree
from lxml.etree import XPath
from searx.utils import match_language, eval_xpath_getindex
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
from searx.engines.bing import (
set_bing_cookies,
_fetch_traits,
)
from searx.engines.bing import send_accept_language_header # pylint: disable=unused-import
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about # about
about = { about = {
@ -30,126 +40,111 @@ about = {
categories = ['news'] categories = ['news']
paging = True paging = True
time_range_support = True time_range_support = True
send_accept_language_header = True time_map = {
'day': '4',
'week': '8',
'month': '9',
}
"""A string '4' means *last hour*. We use *last hour* for ``day`` here since the
difference of *last day* and *last week* in the result list is just marginally.
"""
# search-url base_url = 'https://www.bing.com/news/infinitescrollajax'
base_url = 'https://www.bing.com/' """Bing (News) search URL"""
search_string = 'news/search?{query}&first={offset}&format=RSS'
search_string_with_time = 'news/search?{query}&first={offset}&qft=interval%3d"{interval}"&format=RSS'
time_range_dict = {'day': '7', 'week': '8', 'month': '9'}
bing_traits_url = 'https://learn.microsoft.com/en-us/bing/search-apis/bing-news-search/reference/market-codes'
"""Bing (News) search API description"""
def url_cleanup(url_string): mkt_alias = {
"""remove click""" 'zh': 'en-WW',
'zh-CN': 'en-WW',
parsed_url = urlparse(url_string) }
if parsed_url.netloc == 'www.bing.com' and parsed_url.path == '/news/apiclick.aspx': """Bing News has an official market code 'zh-CN' but we won't get a result with
query = dict(parse_qsl(parsed_url.query)) this market code. For 'zh' and 'zh-CN' we better use the *Worldwide aggregate*
url_string = query.get('url', None) market code (en-WW).
return url_string """
def image_url_cleanup(url_string):
"""replace the http://*bing.com/th?id=... by https://www.bing.com/th?id=..."""
parsed_url = urlparse(url_string)
if parsed_url.netloc.endswith('bing.com') and parsed_url.path == '/th':
query = dict(parse_qsl(parsed_url.query))
url_string = "https://www.bing.com/th?id=" + quote(query.get('id'))
return url_string
def _get_url(query, language, offset, time_range):
if time_range in time_range_dict:
search_path = search_string_with_time.format(
# fmt: off
query = urlencode({
'q': query,
'setmkt': language
}),
offset = offset,
interval = time_range_dict[time_range]
# fmt: on
)
else:
# e.g. setmkt=de-de&setlang=de
search_path = search_string.format(
# fmt: off
query = urlencode({
'q': query,
'setmkt': language
}),
offset = offset
# fmt: on
)
return base_url + search_path
def request(query, params): def request(query, params):
"""Assemble a Bing-News request."""
if params['time_range'] and params['time_range'] not in time_range_dict: sxng_locale = params['searxng_locale']
return params engine_region = traits.get_region(mkt_alias.get(sxng_locale, sxng_locale), traits.all_locale)
engine_language = traits.get_language(sxng_locale, 'en')
offset = (params['pageno'] - 1) * 10 + 1 SID = uuid.uuid1().hex.upper()
if params['language'] == 'all': set_bing_cookies(params, engine_language, engine_region, SID)
language = 'en-US'
else: # build URL query
language = match_language(params['language'], supported_languages, language_aliases) #
params['url'] = _get_url(query, language, offset, params['time_range']) # example: https://www.bing.com/news/infinitescrollajax?q=london&first=1
query_params = {
# fmt: off
'q': query,
'InfiniteScroll': 1,
# to simplify the page count lets use the default of 10 images per page
'first' : (int(params.get('pageno', 1)) - 1) * 10 + 1,
# fmt: on
}
if params['time_range']:
# qft=interval:"7"
query_params['qft'] = 'qft=interval="%s"' % time_map.get(params['time_range'], '9')
params['url'] = base_url + '?' + urlencode(query_params)
return params return params
def response(resp): def response(resp):
"""Get response from Bing-Video"""
results = [] results = []
rss = etree.fromstring(resp.content)
namespaces = rss.nsmap
for item in rss.xpath('./channel/item'): if not resp.ok or not resp.text:
# url / title / content return results
url = url_cleanup(eval_xpath_getindex(item, './link/text()', 0, default=None))
title = eval_xpath_getindex(item, './title/text()', 0, default=url)
content = eval_xpath_getindex(item, './description/text()', 0, default='')
# publishedDate dom = html.fromstring(resp.text)
publishedDate = eval_xpath_getindex(item, './pubDate/text()', 0, default=None)
try:
publishedDate = parser.parse(publishedDate, dayfirst=False)
except TypeError:
publishedDate = datetime.now()
except ValueError:
publishedDate = datetime.now()
# thumbnail for newsitem in dom.xpath('//div[contains(@class, "newsitem")]'):
thumbnail = eval_xpath_getindex(item, XPath('./News:Image/text()', namespaces=namespaces), 0, default=None)
if thumbnail is not None: url = newsitem.xpath('./@url')[0]
thumbnail = image_url_cleanup(thumbnail) title = ' '.join(newsitem.xpath('.//div[@class="caption"]//a[@class="title"]/text()')).strip()
content = ' '.join(newsitem.xpath('.//div[@class="snippet"]/text()')).strip()
thumbnail = None
author = newsitem.xpath('./@data-author')[0]
metadata = ' '.join(newsitem.xpath('.//div[@class="source"]/span/text()')).strip()
img_src = newsitem.xpath('.//a[@class="imagelink"]//img/@src')
if img_src:
thumbnail = 'https://www.bing.com/' + img_src[0]
# append result
if thumbnail is not None:
results.append( results.append(
{'url': url, 'title': title, 'publishedDate': publishedDate, 'content': content, 'img_src': thumbnail} {
'url': url,
'title': title,
'content': content,
'img_src': thumbnail,
'author': author,
'metadata': metadata,
}
) )
else:
results.append({'url': url, 'title': title, 'publishedDate': publishedDate, 'content': content})
return results return results
def fetch_traits(engine_traits: EngineTraits): def fetch_traits(engine_traits: EngineTraits):
"""Fetch languages and regions from Bing-News.""" """Fetch languages and regions from Bing-News.
# pylint: disable=import-outside-toplevel
from searx.engines.bing import _fetch_traits The :py:obj:`description <searx.engines.bing_news.bing_traits_url>` of the
first table says *"query parameter when calling the Video Search API."*
.. thats why I use the 4. table "News Category API markets" for the
``xpath_market_codes``.
url = 'https://learn.microsoft.com/en-us/bing/search-apis/bing-news-search/reference/market-codes' """
# The description of the first table says "query parameter when calling the
# Video Search API." .. thats why I use the 4. table "News Category API markets"
xpath_market_codes = '//table[4]/tbody/tr/td[3]' xpath_market_codes = '//table[4]/tbody/tr/td[3]'
# xpath_country_codes = '//table[2]/tbody/tr/td[2]' # xpath_country_codes = '//table[2]/tbody/tr/td[2]'
xpath_language_codes = '//table[3]/tbody/tr/td[2]' xpath_language_codes = '//table[3]/tbody/tr/td[2]'
_fetch_traits(engine_traits, url, xpath_language_codes, xpath_market_codes) _fetch_traits(engine_traits, bing_traits_url, xpath_language_codes, xpath_market_codes)