searxngRebrandZaclys/searx/engines/vimeo.py

74 lines
1.9 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Wikipedia (Web
"""
from urllib.parse import urlencode
2016-12-11 01:33:04 +00:00
from json import loads
2014-03-18 14:56:22 +00:00
from dateutil import parser
# about
about = {
"website": 'https://vimeo.com/',
"wikidata_id": 'Q156376',
"official_api_documentation": 'http://developer.vimeo.com/api',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
# engine dependent config
categories = ['videos']
paging = True
# search-url
2016-12-11 01:33:04 +00:00
base_url = 'https://vimeo.com/'
search_url = base_url + '/search/page:{pageno}?{query}'
embedded_url = (
'<iframe data-src="https://player.vimeo.com/video/{videoid}" '
+ 'width="540" height="304" frameborder="0" '
+ 'webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>'
)
2014-01-20 01:31:20 +00:00
# do search-request
def request(query, params):
params['url'] = search_url.format(pageno=params['pageno'], query=urlencode({'q': query}))
return params
2014-01-20 01:31:20 +00:00
# get response from search-request
def response(resp):
results = []
2016-12-11 01:33:04 +00:00
data_start_pos = resp.text.find('{"filtered"')
data_end_pos = resp.text.find(';\n', data_start_pos + 1)
data = loads(resp.text[data_start_pos:data_end_pos])
2014-01-11 10:14:46 +00:00
# parse results
2016-12-11 01:33:04 +00:00
for result in data['filtered']['data']:
result = result[result['type']]
videoid = result['uri'].split('/')[-1]
url = base_url + videoid
2016-12-11 01:33:04 +00:00
title = result['name']
thumbnail = result['pictures']['sizes'][-1]['link']
publishedDate = parser.parse(result['created_time'])
embedded = embedded_url.format(videoid=videoid)
2014-03-18 14:56:22 +00:00
# append result
results.append(
{
'url': url,
'title': title,
'content': '',
'template': 'videos.html',
'publishedDate': publishedDate,
'embedded': embedded,
'thumbnail': thumbnail,
}
)
# return results
return results