searxngRebrandZaclys/searx/engines/google_images.py

72 lines
1.9 KiB
Python
Raw Normal View History

2014-09-01 13:10:05 +00:00
## Google (Images)
#
2014-09-01 13:10:05 +00:00
# @website https://www.google.com
# @provide-api yes (https://developers.google.com/web-search/docs/),
# deprecated!
#
2014-09-01 13:10:05 +00:00
# @using-api yes
# @results JSON
# @stable yes (but deprecated)
# @parse url, title, img_src
2013-10-19 20:19:14 +00:00
2014-12-16 16:26:16 +00:00
from urllib import urlencode, unquote
2013-10-19 21:12:18 +00:00
from json import loads
2013-10-19 20:19:14 +00:00
2014-09-01 13:10:05 +00:00
# engine dependent config
2013-10-19 20:19:31 +00:00
categories = ['images']
2014-09-01 13:10:05 +00:00
paging = True
2015-02-08 21:15:25 +00:00
safesearch = True
2013-10-19 20:19:14 +00:00
2014-09-01 13:10:05 +00:00
# search-url
2013-10-23 21:55:37 +00:00
url = 'https://ajax.googleapis.com/'
2015-02-08 21:15:25 +00:00
search_url = url + 'ajax/services/search/images?v=1.0&start={offset}&rsz=large&safe={safesearch}&filter=off&{query}'
2014-01-20 01:31:20 +00:00
2013-10-19 20:19:14 +00:00
2014-09-01 13:10:05 +00:00
# do search-request
2013-10-19 20:19:14 +00:00
def request(query, params):
2014-01-30 00:21:33 +00:00
offset = (params['pageno'] - 1) * 8
2014-09-01 13:10:05 +00:00
if params['safesearch'] == 0:
2015-02-08 21:15:25 +00:00
safesearch = 'off'
else:
safesearch = 'on'
2015-02-08 21:15:25 +00:00
2014-01-30 00:21:33 +00:00
params['url'] = search_url.format(query=urlencode({'q': query}),
2015-02-08 21:15:25 +00:00
offset=offset,
safesearch=safesearch)
2014-09-01 13:10:05 +00:00
2013-10-19 20:19:14 +00:00
return params
2014-01-20 01:31:20 +00:00
2014-09-01 13:10:05 +00:00
# get response from search-request
2013-10-19 20:19:14 +00:00
def response(resp):
results = []
2014-09-01 13:10:05 +00:00
2013-10-19 21:12:18 +00:00
search_res = loads(resp.text)
2014-09-01 13:10:05 +00:00
# return empty array if there are no results
if not search_res.get('responseData', {}).get('results'):
2013-10-20 17:45:13 +00:00
return []
2014-09-01 13:10:05 +00:00
# parse results
2013-10-19 21:12:18 +00:00
for result in search_res['responseData']['results']:
2013-10-23 21:55:37 +00:00
href = result['originalContextUrl']
2013-10-19 21:12:18 +00:00
title = result['title']
2015-01-31 15:16:30 +00:00
if 'url' not in result:
2013-10-22 21:35:17 +00:00
continue
thumbnail_src = result['tbUrl']
2014-09-01 13:10:05 +00:00
# http to https
thumbnail_src = thumbnail_src.replace("http://", "https://")
2014-09-01 13:10:05 +00:00
# append result
2014-01-20 01:31:20 +00:00
results.append({'url': href,
'title': title,
2015-01-31 15:16:30 +00:00
'content': result['content'],
'thumbnail_src': thumbnail_src,
'img_src': unquote(result['url']),
2014-01-20 01:31:20 +00:00
'template': 'images.html'})
2014-09-01 13:10:05 +00:00
# return results
2013-10-19 20:19:14 +00:00
return results