mirror of https://github.com/searxng/searxng.git
Merge branch 'master' of https://github.com/asciimoo/searx into add-docs
This commit is contained in:
commit
51b9295b84
|
@ -114,6 +114,9 @@ content:
|
|||
# Module to import
|
||||
module = searx.webapp
|
||||
|
||||
# Support running the module from a webserver subdirectory.
|
||||
route-run = fixpathinfo:
|
||||
|
||||
# Virtualenv and python path
|
||||
virtualenv = /usr/local/searx/searx-ve/
|
||||
pythonpath = /usr/local/searx/
|
||||
|
@ -151,7 +154,10 @@ content:
|
|||
server {
|
||||
listen 80;
|
||||
server_name searx.example.com;
|
||||
root /usr/local/searx;
|
||||
root /usr/local/searx/searx;
|
||||
|
||||
location /static {
|
||||
}
|
||||
|
||||
location / {
|
||||
include uwsgi_params;
|
||||
|
@ -180,14 +186,13 @@ Add this configuration in the server config file
|
|||
|
||||
.. code:: nginx
|
||||
|
||||
location = /searx { rewrite ^ /searx/; }
|
||||
location /searx {
|
||||
try_files $uri @searx;
|
||||
location /searx/static {
|
||||
alias /usr/local/searx/searx/static;
|
||||
}
|
||||
location @searx {
|
||||
|
||||
location /searx {
|
||||
uwsgi_param SCRIPT_NAME /searx;
|
||||
include uwsgi_params;
|
||||
uwsgi_modifier1 30;
|
||||
uwsgi_pass unix:/run/uwsgi/app/searx/socket;
|
||||
}
|
||||
|
||||
|
@ -197,6 +202,10 @@ in case of single-user or low-traffic instances.)
|
|||
|
||||
.. code:: nginx
|
||||
|
||||
location /searx/static {
|
||||
alias /usr/local/searx/searx/static;
|
||||
}
|
||||
|
||||
location /searx {
|
||||
proxy_pass http://127.0.0.1:8888;
|
||||
proxy_set_header Host $host;
|
||||
|
@ -338,4 +347,3 @@ References
|
|||
|
||||
* How to: `Setup searx in a couple of hours with a free SSL certificate
|
||||
<https://www.reddit.com/r/privacytoolsIO/comments/366kvn/how_to_setup_your_own_privacy_respecting_search/>`__
|
||||
|
||||
|
|
|
@ -89,8 +89,7 @@ def response(resp):
|
|||
'content': content})
|
||||
|
||||
try:
|
||||
result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]/text()'))
|
||||
result_len_container = utils.to_string(result_len_container)
|
||||
result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
|
||||
if "-" in result_len_container:
|
||||
# Remove the part "from-to" for paginated request ...
|
||||
result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:]
|
||||
|
@ -102,7 +101,7 @@ def response(resp):
|
|||
logger.debug('result error :\n%s', e)
|
||||
pass
|
||||
|
||||
if _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
|
||||
if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
|
||||
return []
|
||||
|
||||
results.append({'number_of_results': result_len})
|
||||
|
|
|
@ -109,14 +109,22 @@ def response(resp):
|
|||
else:
|
||||
url = build_flickr_url(photo['ownerNsid'], photo['id'])
|
||||
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
result = {
|
||||
'url': url,
|
||||
'img_src': img_src,
|
||||
'thumbnail_src': thumbnail_src,
|
||||
'content': content,
|
||||
'author': author,
|
||||
'source': source,
|
||||
'img_format': img_format,
|
||||
'template': 'images.html'})
|
||||
'template': 'images.html'
|
||||
}
|
||||
try:
|
||||
result['author'] = author.encode('utf-8')
|
||||
result['title'] = title.encode('utf-8')
|
||||
result['content'] = content.encode('utf-8')
|
||||
except:
|
||||
result['author'] = ''
|
||||
result['title'] = ''
|
||||
result['content'] = ''
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
|
|
@ -32,7 +32,7 @@ base_url = 'https://www.ina.fr'
|
|||
search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}'
|
||||
|
||||
# specific xpath variables
|
||||
results_xpath = '//div[contains(@class,"search-results--list")]/div[@class="media"]'
|
||||
results_xpath = '//div[contains(@class,"search-results--list")]//div[@class="media-body"]'
|
||||
url_xpath = './/a/@href'
|
||||
title_xpath = './/h3[@class="h3--title media-heading"]'
|
||||
thumbnail_xpath = './/img/@src'
|
||||
|
@ -65,8 +65,11 @@ def response(resp):
|
|||
videoid = result.xpath(url_xpath)[0]
|
||||
url = base_url + videoid
|
||||
title = p.unescape(extract_text(result.xpath(title_xpath)))
|
||||
try:
|
||||
thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])
|
||||
if thumbnail[0] == '/':
|
||||
except:
|
||||
thumbnail = ''
|
||||
if thumbnail and thumbnail[0] == '/':
|
||||
thumbnail = base_url + thumbnail
|
||||
d = extract_text(result.xpath(publishedDate_xpath)[0])
|
||||
d = d.split('/')
|
||||
|
|
|
@ -45,6 +45,8 @@ def request(query, params):
|
|||
def response(resp):
|
||||
results = []
|
||||
response_data = loads(resp.text)
|
||||
if not response_data:
|
||||
return results
|
||||
|
||||
for result in response_data['results']:
|
||||
url = _get_url(result)
|
||||
|
|
|
@ -29,7 +29,7 @@ def request(query, params):
|
|||
params['url'] = search_url
|
||||
params['method'] = 'POST'
|
||||
params['headers']['Content-type'] = "application/json"
|
||||
params['data'] = dumps({"query": query,
|
||||
params['data'] = dumps({"query": query.decode('utf-8'),
|
||||
"searchField": "ALL",
|
||||
"sortDirection": "ASC",
|
||||
"sortOrder": "RELEVANCY",
|
||||
|
|
|
@ -79,9 +79,10 @@ engines:
|
|||
categories : science
|
||||
timeout : 4.0
|
||||
|
||||
- name : base
|
||||
engine : base
|
||||
shortcut : bs
|
||||
# tmp suspended: dh key too small
|
||||
# - name : base
|
||||
# engine : base
|
||||
# shortcut : bs
|
||||
|
||||
- name : wikipedia
|
||||
engine : wikipedia
|
||||
|
@ -552,10 +553,11 @@ engines:
|
|||
timeout : 10.0
|
||||
disabled : True
|
||||
|
||||
- name : scanr structures
|
||||
shortcut: scs
|
||||
engine : scanr_structures
|
||||
disabled : True
|
||||
# tmp suspended: bad certificate
|
||||
# - name : scanr structures
|
||||
# shortcut: scs
|
||||
# engine : scanr_structures
|
||||
# disabled : True
|
||||
|
||||
- name : soundcloud
|
||||
engine : soundcloud
|
||||
|
|
Loading…
Reference in New Issue