forked from zaclys/searxng
		
	[fix] engine deviantart: review of the result-scrapper
The deviantart site changed and hence deviantart is currently unusable.
This commit is contained in:
		
							parent
							
								
									00a98865b6
								
							
						
					
					
						commit
						7b396ccb7b
					
				
					 1 changed files with 48 additions and 41 deletions
				
			
		| 
						 | 
				
			
			@ -1,12 +1,14 @@
 | 
			
		|||
# SPDX-License-Identifier: AGPL-3.0-or-later
 | 
			
		||||
# lint: pylint
 | 
			
		||||
"""
 | 
			
		||||
 Deviantart (Images)
 | 
			
		||||
"""Deviantart (Images)
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
from urllib.parse import urlencode
 | 
			
		||||
import urllib.parse
 | 
			
		||||
from lxml import html
 | 
			
		||||
 | 
			
		||||
from searx.utils import extract_text, eval_xpath, eval_xpath_list
 | 
			
		||||
 | 
			
		||||
# about
 | 
			
		||||
about = {
 | 
			
		||||
    "website": 'https://www.deviantart.com/',
 | 
			
		||||
| 
						 | 
				
			
			@ -20,31 +22,30 @@ about = {
 | 
			
		|||
# engine dependent config
 | 
			
		||||
categories = ['images']
 | 
			
		||||
paging = True
 | 
			
		||||
time_range_support = True
 | 
			
		||||
 | 
			
		||||
time_range_dict = {
 | 
			
		||||
    'day': 'popular-24-hours',
 | 
			
		||||
    'week': 'popular-1-week',
 | 
			
		||||
    'month': 'popular-1-month',
 | 
			
		||||
    'year': 'most-recent',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# search-url
 | 
			
		||||
base_url = 'https://www.deviantart.com'
 | 
			
		||||
 | 
			
		||||
results_xpath = '//div[@class="_2pZkk"]/div/div/a'
 | 
			
		||||
url_xpath = './@href'
 | 
			
		||||
thumbnail_src_xpath = './div/img/@src'
 | 
			
		||||
img_src_xpath = './div/img/@srcset'
 | 
			
		||||
title_xpath = './@aria-label'
 | 
			
		||||
premium_xpath = '../div/div/div/text()'
 | 
			
		||||
premium_keytext = 'Watch the artist to view this deviation'
 | 
			
		||||
cursor_xpath = '(//a[@class="_1OGeq"]/@href)[last()]'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def request(query, params):
 | 
			
		||||
 | 
			
		||||
    # https://www.deviantart.com/search/deviations?page=5&q=foo
 | 
			
		||||
    # https://www.deviantart.com/search?q=foo
 | 
			
		||||
 | 
			
		||||
    query = {
 | 
			
		||||
        'page': params['pageno'],
 | 
			
		||||
        'q': query,
 | 
			
		||||
    }
 | 
			
		||||
    if params['time_range'] in time_range_dict:
 | 
			
		||||
        query['order'] = time_range_dict[params['time_range']]
 | 
			
		||||
 | 
			
		||||
    params['url'] = base_url + '/search/deviations?' + urlencode(query)
 | 
			
		||||
    nextpage_url = params['engine_data'].get('nextpage')
 | 
			
		||||
    # don't use nextpage when user selected to jump back to page 1
 | 
			
		||||
    if params['pageno'] > 1 and nextpage_url is not None:
 | 
			
		||||
        params['url'] = nextpage_url
 | 
			
		||||
    else:
 | 
			
		||||
        params['url'] = f"{base_url}/search?{urllib.parse.urlencode({'q': query})}"
 | 
			
		||||
 | 
			
		||||
    return params
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -52,30 +53,36 @@ def request(query, params):
 | 
			
		|||
def response(resp):
 | 
			
		||||
 | 
			
		||||
    results = []
 | 
			
		||||
 | 
			
		||||
    dom = html.fromstring(resp.text)
 | 
			
		||||
 | 
			
		||||
    for row in dom.xpath('//div[contains(@data-hook, "content_row")]'):
 | 
			
		||||
        for result in row.xpath('./div'):
 | 
			
		||||
    for result in eval_xpath_list(dom, results_xpath):
 | 
			
		||||
        # skip images that are blurred
 | 
			
		||||
        _text = extract_text(eval_xpath(result, premium_xpath))
 | 
			
		||||
        if _text and premium_keytext in _text:
 | 
			
		||||
            continue
 | 
			
		||||
        img_src = extract_text(eval_xpath(result, img_src_xpath))
 | 
			
		||||
        if img_src:
 | 
			
		||||
            img_src = img_src.split(' ')[0]
 | 
			
		||||
            parsed_url = urllib.parse.urlparse(img_src)
 | 
			
		||||
            img_src = parsed_url._replace(path=parsed_url.path.split('/v1')[0]).geturl()
 | 
			
		||||
 | 
			
		||||
            a_tag = result.xpath('.//a[@data-hook="deviation_link"]')[0]
 | 
			
		||||
            noscript_tag = a_tag.xpath('.//noscript')
 | 
			
		||||
        results.append(
 | 
			
		||||
            {
 | 
			
		||||
                'template': 'images.html',
 | 
			
		||||
                'url': extract_text(eval_xpath(result, url_xpath)),
 | 
			
		||||
                'img_src': img_src,
 | 
			
		||||
                'thumbnail_src': extract_text(eval_xpath(result, thumbnail_src_xpath)),
 | 
			
		||||
                'title': extract_text(eval_xpath(result, title_xpath)),
 | 
			
		||||
            }
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
            if noscript_tag:
 | 
			
		||||
                img_tag = noscript_tag[0].xpath('.//img')
 | 
			
		||||
            else:
 | 
			
		||||
                img_tag = a_tag.xpath('.//img')
 | 
			
		||||
            if not img_tag:
 | 
			
		||||
                continue
 | 
			
		||||
            img_tag = img_tag[0]
 | 
			
		||||
 | 
			
		||||
            results.append(
 | 
			
		||||
                {
 | 
			
		||||
                    'template': 'images.html',
 | 
			
		||||
                    'url': a_tag.attrib.get('href'),
 | 
			
		||||
                    'img_src': img_tag.attrib.get('src'),
 | 
			
		||||
                    'title': img_tag.attrib.get('alt'),
 | 
			
		||||
                }
 | 
			
		||||
            )
 | 
			
		||||
    nextpage_url = extract_text(eval_xpath(dom, cursor_xpath))
 | 
			
		||||
    if nextpage_url:
 | 
			
		||||
        results.append(
 | 
			
		||||
            {
 | 
			
		||||
                'engine_data': nextpage_url.replace("http://", "https://"),
 | 
			
		||||
                'key': 'nextpage',
 | 
			
		||||
            }
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    return results
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		
		Reference in a new issue