Merge branch 'master' of https://github.com/asciimoo/searx into add-docs

This commit is contained in:
Markus Heiser 2020-01-03 14:30:53 +01:00
commit 51b9295b84
7 changed files with 52 additions and 30 deletions

View File

@ -114,6 +114,9 @@ content:
# Module to import # Module to import
module = searx.webapp module = searx.webapp
# Support running the module from a webserver subdirectory.
route-run = fixpathinfo:
# Virtualenv and python path # Virtualenv and python path
virtualenv = /usr/local/searx/searx-ve/ virtualenv = /usr/local/searx/searx-ve/
pythonpath = /usr/local/searx/ pythonpath = /usr/local/searx/
@ -151,7 +154,10 @@ content:
server { server {
listen 80; listen 80;
server_name searx.example.com; server_name searx.example.com;
root /usr/local/searx; root /usr/local/searx/searx;
location /static {
}
location / { location / {
include uwsgi_params; include uwsgi_params;
@ -180,14 +186,13 @@ Add this configuration in the server config file
.. code:: nginx .. code:: nginx
location = /searx { rewrite ^ /searx/; } location /searx/static {
location /searx { alias /usr/local/searx/searx/static;
try_files $uri @searx;
} }
location @searx {
location /searx {
uwsgi_param SCRIPT_NAME /searx; uwsgi_param SCRIPT_NAME /searx;
include uwsgi_params; include uwsgi_params;
uwsgi_modifier1 30;
uwsgi_pass unix:/run/uwsgi/app/searx/socket; uwsgi_pass unix:/run/uwsgi/app/searx/socket;
} }
@ -197,6 +202,10 @@ in case of single-user or low-traffic instances.)
.. code:: nginx .. code:: nginx
location /searx/static {
alias /usr/local/searx/searx/static;
}
location /searx { location /searx {
proxy_pass http://127.0.0.1:8888; proxy_pass http://127.0.0.1:8888;
proxy_set_header Host $host; proxy_set_header Host $host;
@ -338,4 +347,3 @@ References
* How to: `Setup searx in a couple of hours with a free SSL certificate * How to: `Setup searx in a couple of hours with a free SSL certificate
<https://www.reddit.com/r/privacytoolsIO/comments/366kvn/how_to_setup_your_own_privacy_respecting_search/>`__ <https://www.reddit.com/r/privacytoolsIO/comments/366kvn/how_to_setup_your_own_privacy_respecting_search/>`__

View File

@ -89,8 +89,7 @@ def response(resp):
'content': content}) 'content': content})
try: try:
result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]/text()')) result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
result_len_container = utils.to_string(result_len_container)
if "-" in result_len_container: if "-" in result_len_container:
# Remove the part "from-to" for paginated request ... # Remove the part "from-to" for paginated request ...
result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:] result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:]
@ -102,7 +101,7 @@ def response(resp):
logger.debug('result error :\n%s', e) logger.debug('result error :\n%s', e)
pass pass
if _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len: if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
return [] return []
results.append({'number_of_results': result_len}) results.append({'number_of_results': result_len})

View File

@ -109,14 +109,22 @@ def response(resp):
else: else:
url = build_flickr_url(photo['ownerNsid'], photo['id']) url = build_flickr_url(photo['ownerNsid'], photo['id'])
results.append({'url': url, result = {
'title': title, 'url': url,
'img_src': img_src, 'img_src': img_src,
'thumbnail_src': thumbnail_src, 'thumbnail_src': thumbnail_src,
'content': content,
'author': author,
'source': source, 'source': source,
'img_format': img_format, 'img_format': img_format,
'template': 'images.html'}) 'template': 'images.html'
}
try:
result['author'] = author.encode('utf-8')
result['title'] = title.encode('utf-8')
result['content'] = content.encode('utf-8')
except:
result['author'] = ''
result['title'] = ''
result['content'] = ''
results.append(result)
return results return results

View File

@ -32,7 +32,7 @@ base_url = 'https://www.ina.fr'
search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}' search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}'
# specific xpath variables # specific xpath variables
results_xpath = '//div[contains(@class,"search-results--list")]/div[@class="media"]' results_xpath = '//div[contains(@class,"search-results--list")]//div[@class="media-body"]'
url_xpath = './/a/@href' url_xpath = './/a/@href'
title_xpath = './/h3[@class="h3--title media-heading"]' title_xpath = './/h3[@class="h3--title media-heading"]'
thumbnail_xpath = './/img/@src' thumbnail_xpath = './/img/@src'
@ -65,8 +65,11 @@ def response(resp):
videoid = result.xpath(url_xpath)[0] videoid = result.xpath(url_xpath)[0]
url = base_url + videoid url = base_url + videoid
title = p.unescape(extract_text(result.xpath(title_xpath))) title = p.unescape(extract_text(result.xpath(title_xpath)))
try:
thumbnail = extract_text(result.xpath(thumbnail_xpath)[0]) thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])
if thumbnail[0] == '/': except:
thumbnail = ''
if thumbnail and thumbnail[0] == '/':
thumbnail = base_url + thumbnail thumbnail = base_url + thumbnail
d = extract_text(result.xpath(publishedDate_xpath)[0]) d = extract_text(result.xpath(publishedDate_xpath)[0])
d = d.split('/') d = d.split('/')

View File

@ -45,6 +45,8 @@ def request(query, params):
def response(resp): def response(resp):
results = [] results = []
response_data = loads(resp.text) response_data = loads(resp.text)
if not response_data:
return results
for result in response_data['results']: for result in response_data['results']:
url = _get_url(result) url = _get_url(result)

View File

@ -29,7 +29,7 @@ def request(query, params):
params['url'] = search_url params['url'] = search_url
params['method'] = 'POST' params['method'] = 'POST'
params['headers']['Content-type'] = "application/json" params['headers']['Content-type'] = "application/json"
params['data'] = dumps({"query": query, params['data'] = dumps({"query": query.decode('utf-8'),
"searchField": "ALL", "searchField": "ALL",
"sortDirection": "ASC", "sortDirection": "ASC",
"sortOrder": "RELEVANCY", "sortOrder": "RELEVANCY",

View File

@ -79,9 +79,10 @@ engines:
categories : science categories : science
timeout : 4.0 timeout : 4.0
- name : base # tmp suspended: dh key too small
engine : base # - name : base
shortcut : bs # engine : base
# shortcut : bs
- name : wikipedia - name : wikipedia
engine : wikipedia engine : wikipedia
@ -552,10 +553,11 @@ engines:
timeout : 10.0 timeout : 10.0
disabled : True disabled : True
- name : scanr structures # tmp suspended: bad certificate
shortcut: scs # - name : scanr structures
engine : scanr_structures # shortcut: scs
disabled : True # engine : scanr_structures
# disabled : True
- name : soundcloud - name : soundcloud
engine : soundcloud engine : soundcloud