searxngRebrandZaclys/searx/engines/bing_news.py

112 lines
3.3 KiB
Python
Raw Normal View History

"""
Bing (News)
@website https://www.bing.com/news
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
max. 5000 query/month
@using-api no (because of query limit)
@results RSS (using search portal)
@stable yes (except perhaps for the images)
@parse url, title, content, publishedDate, thumbnail
"""
2014-09-01 12:38:59 +00:00
2014-03-04 12:10:04 +00:00
from urllib import urlencode
from urlparse import urlparse, parse_qsl
from datetime import datetime
2014-09-01 12:38:59 +00:00
from dateutil import parser
from lxml import etree
from searx.utils import list_get
2014-03-04 12:10:04 +00:00
2014-09-01 12:38:59 +00:00
# engine dependent config
2014-03-04 12:10:04 +00:00
categories = ['news']
paging = True
language_support = True
2014-09-01 12:38:59 +00:00
# search-url
base_url = 'https://www.bing.com/'
search_string = 'news/search?{query}&first={offset}&format=RSS'
# remove click
def url_cleanup(url_string):
parsed_url = urlparse(url_string)
if parsed_url.netloc == 'www.bing.com' and parsed_url.path == '/news/apiclick.aspx':
query = dict(parse_qsl(parsed_url.query))
return query.get('url', None)
return url_string
# replace the http://*bing4.com/th?id=... by https://www.bing.com/th?id=...
def image_url_cleanup(url_string):
parsed_url = urlparse(url_string)
if parsed_url.netloc.endswith('bing4.com') and parsed_url.path == '/th':
query = dict(parse_qsl(parsed_url.query))
return "https://www.bing.com/th?id=" + query.get('id')
return url_string
2014-03-04 12:10:04 +00:00
2014-09-02 15:13:44 +00:00
2014-09-01 12:38:59 +00:00
# do search-request
2014-03-04 12:10:04 +00:00
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
2014-09-01 12:38:59 +00:00
2014-03-04 12:10:04 +00:00
if params['language'] == 'all':
language = 'en-US'
else:
language = params['language'].replace('_', '-')
2014-09-01 12:38:59 +00:00
2014-03-04 12:10:04 +00:00
search_path = search_string.format(
query=urlencode({'q': query, 'setmkt': language}),
offset=offset)
params['url'] = base_url + search_path
2014-03-04 12:10:04 +00:00
return params
2014-09-01 12:38:59 +00:00
# get response from search-request
2014-03-04 12:10:04 +00:00
def response(resp):
results = []
2014-09-01 12:38:59 +00:00
2015-10-16 09:53:52 +00:00
rss = etree.fromstring(resp.content)
ns = rss.nsmap
2014-09-01 12:38:59 +00:00
# parse results
for item in rss.xpath('./channel/item'):
# url / title / content
url = url_cleanup(item.xpath('./link/text()')[0])
title = list_get(item.xpath('./title/text()'), 0, url)
content = list_get(item.xpath('./description/text()'), 0, '')
# publishedDate
publishedDate = list_get(item.xpath('./pubDate/text()'), 0)
try:
publishedDate = parser.parse(publishedDate, dayfirst=False)
except TypeError:
publishedDate = datetime.now()
except ValueError:
publishedDate = datetime.now()
# thumbnail
thumbnail = list_get(item.xpath('./News:Image/text()', namespaces=ns), 0)
if thumbnail is not None:
thumbnail = image_url_cleanup(thumbnail)
2014-09-01 12:38:59 +00:00
# append result
if thumbnail is not None:
results.append({'template': 'videos.html',
'url': url,
'title': title,
'publishedDate': publishedDate,
'content': content,
'thumbnail': thumbnail})
else:
results.append({'url': url,
'title': title,
'publishedDate': publishedDate,
'content': content})
2014-09-01 12:38:59 +00:00
# return results
2014-03-04 12:10:04 +00:00
return results