forked from zaclys/searxng
[fix] engine deviantart: review of the result-scrapper
The deviantart site changed and hence deviantart is currently unusable.
This commit is contained in:
parent
00a98865b6
commit
7b396ccb7b
|
@ -1,12 +1,14 @@
|
||||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
# lint: pylint
|
# lint: pylint
|
||||||
"""
|
"""Deviantart (Images)
|
||||||
Deviantart (Images)
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from urllib.parse import urlencode
|
import urllib.parse
|
||||||
from lxml import html
|
from lxml import html
|
||||||
|
|
||||||
|
from searx.utils import extract_text, eval_xpath, eval_xpath_list
|
||||||
|
|
||||||
# about
|
# about
|
||||||
about = {
|
about = {
|
||||||
"website": 'https://www.deviantart.com/',
|
"website": 'https://www.deviantart.com/',
|
||||||
|
@ -20,31 +22,30 @@ about = {
|
||||||
# engine dependent config
|
# engine dependent config
|
||||||
categories = ['images']
|
categories = ['images']
|
||||||
paging = True
|
paging = True
|
||||||
time_range_support = True
|
|
||||||
|
|
||||||
time_range_dict = {
|
|
||||||
'day': 'popular-24-hours',
|
|
||||||
'week': 'popular-1-week',
|
|
||||||
'month': 'popular-1-month',
|
|
||||||
'year': 'most-recent',
|
|
||||||
}
|
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
base_url = 'https://www.deviantart.com'
|
base_url = 'https://www.deviantart.com'
|
||||||
|
|
||||||
|
results_xpath = '//div[@class="_2pZkk"]/div/div/a'
|
||||||
|
url_xpath = './@href'
|
||||||
|
thumbnail_src_xpath = './div/img/@src'
|
||||||
|
img_src_xpath = './div/img/@srcset'
|
||||||
|
title_xpath = './@aria-label'
|
||||||
|
premium_xpath = '../div/div/div/text()'
|
||||||
|
premium_keytext = 'Watch the artist to view this deviation'
|
||||||
|
cursor_xpath = '(//a[@class="_1OGeq"]/@href)[last()]'
|
||||||
|
|
||||||
|
|
||||||
def request(query, params):
|
def request(query, params):
|
||||||
|
|
||||||
# https://www.deviantart.com/search/deviations?page=5&q=foo
|
# https://www.deviantart.com/search?q=foo
|
||||||
|
|
||||||
query = {
|
nextpage_url = params['engine_data'].get('nextpage')
|
||||||
'page': params['pageno'],
|
# don't use nextpage when user selected to jump back to page 1
|
||||||
'q': query,
|
if params['pageno'] > 1 and nextpage_url is not None:
|
||||||
}
|
params['url'] = nextpage_url
|
||||||
if params['time_range'] in time_range_dict:
|
else:
|
||||||
query['order'] = time_range_dict[params['time_range']]
|
params['url'] = f"{base_url}/search?{urllib.parse.urlencode({'q': query})}"
|
||||||
|
|
||||||
params['url'] = base_url + '/search/deviations?' + urlencode(query)
|
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
@ -52,30 +53,36 @@ def request(query, params):
|
||||||
def response(resp):
|
def response(resp):
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
dom = html.fromstring(resp.text)
|
dom = html.fromstring(resp.text)
|
||||||
|
|
||||||
for row in dom.xpath('//div[contains(@data-hook, "content_row")]'):
|
for result in eval_xpath_list(dom, results_xpath):
|
||||||
for result in row.xpath('./div'):
|
# skip images that are blurred
|
||||||
|
_text = extract_text(eval_xpath(result, premium_xpath))
|
||||||
|
if _text and premium_keytext in _text:
|
||||||
|
continue
|
||||||
|
img_src = extract_text(eval_xpath(result, img_src_xpath))
|
||||||
|
if img_src:
|
||||||
|
img_src = img_src.split(' ')[0]
|
||||||
|
parsed_url = urllib.parse.urlparse(img_src)
|
||||||
|
img_src = parsed_url._replace(path=parsed_url.path.split('/v1')[0]).geturl()
|
||||||
|
|
||||||
a_tag = result.xpath('.//a[@data-hook="deviation_link"]')[0]
|
results.append(
|
||||||
noscript_tag = a_tag.xpath('.//noscript')
|
{
|
||||||
|
'template': 'images.html',
|
||||||
|
'url': extract_text(eval_xpath(result, url_xpath)),
|
||||||
|
'img_src': img_src,
|
||||||
|
'thumbnail_src': extract_text(eval_xpath(result, thumbnail_src_xpath)),
|
||||||
|
'title': extract_text(eval_xpath(result, title_xpath)),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
if noscript_tag:
|
nextpage_url = extract_text(eval_xpath(dom, cursor_xpath))
|
||||||
img_tag = noscript_tag[0].xpath('.//img')
|
if nextpage_url:
|
||||||
else:
|
results.append(
|
||||||
img_tag = a_tag.xpath('.//img')
|
{
|
||||||
if not img_tag:
|
'engine_data': nextpage_url.replace("http://", "https://"),
|
||||||
continue
|
'key': 'nextpage',
|
||||||
img_tag = img_tag[0]
|
}
|
||||||
|
)
|
||||||
results.append(
|
|
||||||
{
|
|
||||||
'template': 'images.html',
|
|
||||||
'url': a_tag.attrib.get('href'),
|
|
||||||
'img_src': img_tag.attrib.get('src'),
|
|
||||||
'title': img_tag.attrib.get('alt'),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
Loading…
Reference in New Issue