Merge pull request #2296 from return42/fix-1879

[fix] engine flickr: adapt to the new data model from flicker's response
This commit is contained in:
Markus Heiser 2023-03-30 21:21:05 +02:00 committed by GitHub
commit 0311d634ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 46 additions and 29 deletions

View File

@ -1,14 +1,22 @@
# SPDX-License-Identifier: AGPL-3.0-or-later # SPDX-License-Identifier: AGPL-3.0-or-later
""" # lint: pylint
Flickr (Images) """Flickr (Images)
""" """
from json import loads from typing import TYPE_CHECKING
import json
from time import time from time import time
import re import re
from urllib.parse import urlencode from urllib.parse import urlencode
from searx.utils import ecma_unescape, html_to_text from searx.utils import ecma_unescape, html_to_text
if TYPE_CHECKING:
import logging
logger: logging.Logger
# about # about
about = { about = {
"website": 'https://www.flickr.com', "website": 'https://www.flickr.com',
@ -19,23 +27,24 @@ about = {
"results": 'HTML', "results": 'HTML',
} }
# engine dependent config
categories = ['images'] categories = ['images']
url = 'https://www.flickr.com/'
search_url = url + 'search?{query}&page={page}'
time_range_url = '&min_upload_date={start}&max_upload_date={end}'
photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
modelexport_re = re.compile(r"^\s*modelExport:\s*({.*}),$", re.M)
image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
paging = True paging = True
time_range_support = True time_range_support = True
safesearch = False
time_range_dict = { time_range_dict = {
'day': 60 * 60 * 24, 'day': 60 * 60 * 24,
'week': 60 * 60 * 24 * 7, 'week': 60 * 60 * 24 * 7,
'month': 60 * 60 * 24 * 7 * 4, 'month': 60 * 60 * 24 * 7 * 4,
'year': 60 * 60 * 24 * 7 * 52, 'year': 60 * 60 * 24 * 7 * 52,
} }
image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'm', 'n', 't', 'q', 's')
search_url = 'https://www.flickr.com/search?{query}&page={page}'
time_range_url = '&min_upload_date={start}&max_upload_date={end}'
photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
modelexport_re = re.compile(r"^\s*modelExport:\s*({.*}),$", re.M)
def build_flickr_url(user_id, photo_id): def build_flickr_url(user_id, photo_id):
@ -55,51 +64,59 @@ def request(query, params):
return params return params
def response(resp): def response(resp): # pylint: disable=too-many-branches
results = [] results = []
matches = modelexport_re.search(resp.text) matches = modelexport_re.search(resp.text)
if matches is None: if matches is None:
return results return results
match = matches.group(1) match = matches.group(1)
model_export = loads(match) model_export = json.loads(match)
if 'legend' not in model_export: if 'legend' not in model_export:
return results return results
legend = model_export['legend'] legend = model_export['legend']
# handle empty page # handle empty page
if not legend or not legend[0]: if not legend or not legend[0]:
return results return results
for index in legend: for x, index in enumerate(legend):
photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])] if len(index) != 8:
logger.debug("skip legend enty %s : %s", x, index)
continue
photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][index[4]][index[5]][int(index[6])][
index[7]
]
author = ecma_unescape(photo.get('realname', '')) author = ecma_unescape(photo.get('realname', ''))
source = ecma_unescape(photo.get('username', '')) + ' @ Flickr' source = ecma_unescape(photo.get('username', ''))
if source:
source += ' @ Flickr'
title = ecma_unescape(photo.get('title', '')) title = ecma_unescape(photo.get('title', ''))
content = html_to_text(ecma_unescape(photo.get('description', ''))) content = html_to_text(ecma_unescape(photo.get('description', '')))
img_src = None img_src = None
# From the biggest to the lowest format # From the biggest to the lowest format
size_data = None
for image_size in image_sizes: for image_size in image_sizes:
if image_size in photo['sizes']: if image_size in photo['sizes']['data']:
img_src = photo['sizes'][image_size]['url'] size_data = photo['sizes']['data'][image_size]['data']
img_format = (
'jpg ' + str(photo['sizes'][image_size]['width']) + 'x' + str(photo['sizes'][image_size]['height'])
)
break break
if not img_src: if not size_data:
logger.debug('cannot find valid image size: {0}'.format(repr(photo))) logger.debug('cannot find valid image size: {0}'.format(repr(photo['sizes']['data'])))
continue continue
img_src = size_data['url']
img_format = f"{size_data['width']} x {size_data['height']}"
# For a bigger thumbnail, keep only the url_z, not the url_n # For a bigger thumbnail, keep only the url_z, not the url_n
if 'n' in photo['sizes']: if 'n' in photo['sizes']['data']:
thumbnail_src = photo['sizes']['n']['url'] thumbnail_src = photo['sizes']['data']['n']['data']['url']
elif 'z' in photo['sizes']: elif 'z' in photo['sizes']['data']:
thumbnail_src = photo['sizes']['z']['url'] thumbnail_src = photo['sizes']['data']['z']['data']['url']
else: else:
thumbnail_src = img_src thumbnail_src = img_src