forked from zaclys/searxng
Merge pull request #1259 from allendema/petal-images
[enh] Add Petalsearch Images engine
This commit is contained in:
commit
96dc4369d5
|
@ -0,0 +1,94 @@
|
||||||
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
"""Petalsearch Images
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from json import loads
|
||||||
|
from urllib.parse import urlencode
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from lxml import html
|
||||||
|
|
||||||
|
from searx.utils import extract_text
|
||||||
|
|
||||||
|
about = {
|
||||||
|
"website": 'https://petalsearch.com/',
|
||||||
|
"wikidata_id": 'Q104399280',
|
||||||
|
"official_api_documentation": False,
|
||||||
|
"use_official_api": False,
|
||||||
|
"require_api_key": False,
|
||||||
|
"results": 'JSON',
|
||||||
|
}
|
||||||
|
|
||||||
|
categories = ['images']
|
||||||
|
paging = True
|
||||||
|
time_range_support = False
|
||||||
|
|
||||||
|
safesearch = True
|
||||||
|
safesearch_table = {0: 'off', 1: 'moderate', 2: 'on'}
|
||||||
|
|
||||||
|
base_url = 'https://petalsearch.com/'
|
||||||
|
search_string = 'search?{query}&channel=image&ps=50&pn={page}®ion={lang}&ss_mode={safesearch}&ss_type=normal'
|
||||||
|
|
||||||
|
|
||||||
|
def request(query, params):
|
||||||
|
|
||||||
|
search_path = search_string.format(
|
||||||
|
query=urlencode({'query': query}),
|
||||||
|
page=params['pageno'],
|
||||||
|
lang=params['language'].lower(),
|
||||||
|
safesearch=safesearch_table[params['safesearch']],
|
||||||
|
)
|
||||||
|
|
||||||
|
params['url'] = base_url + search_path
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def response(resp):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
tree = html.fromstring(resp.text)
|
||||||
|
root = tree.findall('.//script[3]')
|
||||||
|
|
||||||
|
# Convert list to JSON
|
||||||
|
json_content = extract_text(root)
|
||||||
|
|
||||||
|
# Manipulate with JSON
|
||||||
|
data = loads(json_content)
|
||||||
|
|
||||||
|
for result in data['newImages']:
|
||||||
|
url = result['url']
|
||||||
|
title = result['title']
|
||||||
|
thumbnail_src = result['image']
|
||||||
|
|
||||||
|
pic_dict = result.get('extrainfo')
|
||||||
|
|
||||||
|
date_from_api = pic_dict.get('publish_time')
|
||||||
|
width = pic_dict.get('width')
|
||||||
|
height = pic_dict.get('height')
|
||||||
|
img_src = pic_dict.get('real_url')
|
||||||
|
|
||||||
|
# Continue if img_src is missing
|
||||||
|
if img_src is None or '':
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get and convert published date
|
||||||
|
if date_from_api is not None:
|
||||||
|
publishedDate = datetime.fromtimestamp(int(date_from_api))
|
||||||
|
|
||||||
|
# Append results
|
||||||
|
results.append(
|
||||||
|
{
|
||||||
|
'template': 'images.html',
|
||||||
|
'url': url,
|
||||||
|
'title': title,
|
||||||
|
'img_src': img_src,
|
||||||
|
'thumbnail_src': thumbnail_src,
|
||||||
|
'width': width,
|
||||||
|
'height': height,
|
||||||
|
'publishedDate': publishedDate,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
|
@ -1720,6 +1720,12 @@ engines:
|
||||||
require_api_key: false
|
require_api_key: false
|
||||||
results: HTML
|
results: HTML
|
||||||
|
|
||||||
|
- name: petalsearch images
|
||||||
|
engine: petal_images
|
||||||
|
shortcut: ptsi
|
||||||
|
disabled: true
|
||||||
|
timeout: 3.0
|
||||||
|
|
||||||
- name: petalsearch news
|
- name: petalsearch news
|
||||||
shortcut: ptsn
|
shortcut: ptsn
|
||||||
categories: news
|
categories: news
|
||||||
|
|
|
@ -1163,7 +1163,9 @@ def image_proxy():
|
||||||
return '', resp.status_code
|
return '', resp.status_code
|
||||||
return '', 400
|
return '', 400
|
||||||
|
|
||||||
if not resp.headers.get('Content-Type', '').startswith('image/'):
|
if not resp.headers.get('Content-Type', '').startswith('image/') and not resp.headers.get(
|
||||||
|
'Content-Type', ''
|
||||||
|
).startswith('binary/octet-stream'):
|
||||||
logger.debug('image-proxy: wrong content-type: %s', resp.headers.get('Content-Type', ''))
|
logger.debug('image-proxy: wrong content-type: %s', resp.headers.get('Content-Type', ''))
|
||||||
return '', 400
|
return '', 400
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue