From 52e615dede8538c36f569d2cf07835427a9a0db6 Mon Sep 17 00:00:00 2001
From: Adam Tauber
Date: Wed, 30 Nov 2016 18:43:03 +0100
Subject: [PATCH] [enh] py3 compatibility
---
.travis.yml | 5 +-
requirements-dev.txt | 3 +-
searx/answerers/__init__.py | 12 +-
searx/answerers/random/answerer.py | 13 +-
searx/answerers/statistics/answerer.py | 16 +-
searx/autocomplete.py | 6 +-
searx/engines/1337x.py | 3 +-
searx/engines/__init__.py | 5 +-
searx/engines/archlinux.py | 3 +-
searx/engines/base.py | 6 +-
searx/engines/bing.py | 2 +-
searx/engines/bing_images.py | 2 +-
searx/engines/bing_news.py | 5 +-
searx/engines/blekko_images.py | 2 +-
searx/engines/btdigg.py | 5 +-
searx/engines/currency_convert.py | 14 +-
searx/engines/dailymotion.py | 3 +-
searx/engines/deezer.py | 5 +-
searx/engines/deviantart.py | 2 +-
searx/engines/dictzone.py | 6 +-
searx/engines/digbt.py | 8 +-
searx/engines/digg.py | 4 +-
searx/engines/doku.py | 2 +-
searx/engines/duckduckgo.py | 2 +-
searx/engines/duckduckgo_definitions.py | 6 +-
searx/engines/faroo.py | 2 +-
searx/engines/fdroid.py | 7 +-
searx/engines/filecrop.py | 11 +-
searx/engines/flickr.py | 2 +-
searx/engines/flickr_noapi.py | 2 +-
searx/engines/framalibre.py | 4 +-
searx/engines/frinkiac.py | 2 +-
searx/engines/gigablast.py | 3 +-
searx/engines/github.py | 2 +-
searx/engines/google.py | 5 +-
searx/engines/google_images.py | 2 +-
searx/engines/google_news.py | 3 +-
searx/engines/ina.py | 10 +-
searx/engines/json_engine.py | 11 +-
searx/engines/kickass.py | 3 +-
searx/engines/mediawiki.py | 2 +-
searx/engines/mixcloud.py | 2 +-
searx/engines/nyaa.py | 2 +-
searx/engines/openstreetmap.py | 4 -
searx/engines/photon.py | 2 +-
searx/engines/piratebay.py | 3 +-
searx/engines/qwant.py | 3 +-
searx/engines/reddit.py | 6 +-
searx/engines/scanr_structures.py | 4 +-
searx/engines/searchcode_code.py | 5 +-
searx/engines/searchcode_doc.py | 5 +-
searx/engines/seedpeer.py | 4 +-
searx/engines/soundcloud.py | 19 ++-
searx/engines/spotify.py | 5 +-
searx/engines/stackoverflow.py | 6 +-
searx/engines/startpage.py | 2 +-
searx/engines/subtitleseeker.py | 2 +-
searx/engines/swisscows.py | 27 ++--
searx/engines/tokyotoshokan.py | 11 +-
searx/engines/torrentz.py | 8 +-
searx/engines/translated.py | 4 +
searx/engines/twitter.py | 3 +-
searx/engines/vimeo.py | 2 +-
searx/engines/wikidata.py | 13 +-
searx/engines/wikipedia.py | 21 ++-
searx/engines/wolframalpha_api.py | 13 +-
searx/engines/wolframalpha_noapi.py | 9 +-
searx/engines/www1x.py | 6 +-
searx/engines/www500px.py | 3 +-
searx/engines/xpath.py | 4 +-
searx/engines/yacy.py | 2 +-
searx/engines/yahoo.py | 3 +-
searx/engines/yahoo_news.py | 6 +-
searx/engines/yandex.py | 4 +-
searx/engines/youtube_api.py | 2 +-
searx/engines/youtube_noapi.py | 2 +-
searx/plugins/__init__.py | 5 +-
searx/plugins/doai_rewrite.py | 2 +-
searx/plugins/https_rewrite.py | 5 +-
searx/plugins/self_info.py | 4 +-
searx/plugins/tracker_url_remover.py | 2 +-
searx/preferences.py | 18 +--
searx/query.py | 8 +-
searx/results.py | 6 +-
searx/search.py | 12 +-
searx/settings_robot.yml | 2 +-
searx/templates/courgette/404.html | 2 +-
searx/templates/legacy/404.html | 2 +-
searx/templates/oscar/404.html | 2 +-
searx/templates/pix-art/404.html | 2 +-
searx/testing.py | 42 ++++--
searx/url_utils.py | 28 ++++
searx/utils.py | 26 +++-
searx/webapp.py | 36 +++--
tests/robot/__init__.py | 75 ++++++++++
tests/robot/test_basic.robot | 153 --------------------
tests/unit/engines/test_archlinux.py | 4 +-
tests/unit/engines/test_bing.py | 6 +-
tests/unit/engines/test_bing_news.py | 12 +-
tests/unit/engines/test_btdigg.py | 12 +-
tests/unit/engines/test_currency_convert.py | 4 +-
tests/unit/engines/test_digbt.py | 4 +-
tests/unit/engines/test_duckduckgo.py | 3 +-
tests/unit/engines/test_frinkiac.py | 5 +-
tests/unit/engines/test_gigablast.py | 1 +
tests/unit/engines/test_soundcloud.py | 2 +-
tests/unit/engines/test_startpage.py | 6 +-
tests/unit/engines/test_swisscows.py | 8 +-
tests/unit/engines/test_tokyotoshokan.py | 2 +-
tests/unit/engines/test_wikidata.py | 3 +-
tests/unit/engines/test_wikipedia.py | 18 +--
tests/unit/engines/test_wolframalpha_api.py | 10 +-
tests/unit/test_plugins.py | 16 +-
tests/unit/test_utils.py | 8 +-
tests/unit/test_webapp.py | 46 +++---
115 files changed, 517 insertions(+), 513 deletions(-)
create mode 100644 searx/url_utils.py
delete mode 100644 tests/robot/test_basic.robot
diff --git a/.travis.yml b/.travis.yml
index 0a174ff66..b6017cd93 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -9,6 +9,7 @@ addons:
language: python
python:
- "2.7"
+ - "3.6"
before_install:
- "export DISPLAY=:99.0"
- "sh -e /etc/init.d/xvfb start"
@@ -24,9 +25,9 @@ script:
- ./manage.sh styles
- ./manage.sh grunt_build
- ./manage.sh tests
- - ./manage.sh py_test_coverage
after_success:
- coveralls
+ - ./manage.sh py_test_coverage
+ - coveralls
notifications:
irc:
channels:
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 01d1e1497..691a1e7ba 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -3,8 +3,7 @@ mock==2.0.0
nose2[coverage-plugin]
pep8==1.7.0
plone.testing==5.0.0
-robotframework-selenium2library==1.8.0
-robotsuite==1.7.0
+splinter==0.7.5
transifex-client==0.12.2
unittest2==1.1.0
zope.testrunner==4.5.1
diff --git a/searx/answerers/__init__.py b/searx/answerers/__init__.py
index 8f5951c75..444316f11 100644
--- a/searx/answerers/__init__.py
+++ b/searx/answerers/__init__.py
@@ -1,8 +1,12 @@
from os import listdir
from os.path import realpath, dirname, join, isdir
+from sys import version_info
from searx.utils import load_module
from collections import defaultdict
+if version_info[0] == 3:
+ unicode = str
+
answerers_dir = dirname(realpath(__file__))
@@ -10,7 +14,7 @@ answerers_dir = dirname(realpath(__file__))
def load_answerers():
answerers = []
for filename in listdir(answerers_dir):
- if not isdir(join(answerers_dir, filename)):
+ if not isdir(join(answerers_dir, filename)) or filename.startswith('_'):
continue
module = load_module('answerer.py', join(answerers_dir, filename))
if not hasattr(module, 'keywords') or not isinstance(module.keywords, tuple) or not len(module.keywords):
@@ -30,12 +34,12 @@ def get_answerers_by_keywords(answerers):
def ask(query):
results = []
- query_parts = filter(None, query.query.split())
+ query_parts = list(filter(None, query.query.split()))
- if query_parts[0] not in answerers_by_keywords:
+ if query_parts[0].decode('utf-8') not in answerers_by_keywords:
return results
- for answerer in answerers_by_keywords[query_parts[0]]:
+ for answerer in answerers_by_keywords[query_parts[0].decode('utf-8')]:
result = answerer(query)
if result:
results.append(result)
diff --git a/searx/answerers/random/answerer.py b/searx/answerers/random/answerer.py
index 510d9f5be..f2b8bf3e5 100644
--- a/searx/answerers/random/answerer.py
+++ b/searx/answerers/random/answerer.py
@@ -1,5 +1,6 @@
import random
import string
+import sys
from flask_babel import gettext
# required answerer attribute
@@ -8,7 +9,11 @@ keywords = ('random',)
random_int_max = 2**31
-random_string_letters = string.lowercase + string.digits + string.uppercase
+if sys.version_info[0] == 2:
+ random_string_letters = string.lowercase + string.digits + string.uppercase
+else:
+ unicode = str
+ random_string_letters = string.ascii_lowercase + string.digits + string.ascii_uppercase
def random_string():
@@ -24,9 +29,9 @@ def random_int():
return unicode(random.randint(-random_int_max, random_int_max))
-random_types = {u'string': random_string,
- u'int': random_int,
- u'float': random_float}
+random_types = {b'string': random_string,
+ b'int': random_int,
+ b'float': random_float}
# required answerer function
diff --git a/searx/answerers/statistics/answerer.py b/searx/answerers/statistics/answerer.py
index a04695f56..73dd25cfd 100644
--- a/searx/answerers/statistics/answerer.py
+++ b/searx/answerers/statistics/answerer.py
@@ -1,8 +1,12 @@
+from sys import version_info
from functools import reduce
from operator import mul
from flask_babel import gettext
+if version_info[0] == 3:
+ unicode = str
+
keywords = ('min',
'max',
'avg',
@@ -19,22 +23,22 @@ def answer(query):
return []
try:
- args = map(float, parts[1:])
+ args = list(map(float, parts[1:]))
except:
return []
func = parts[0]
answer = None
- if func == 'min':
+ if func == b'min':
answer = min(args)
- elif func == 'max':
+ elif func == b'max':
answer = max(args)
- elif func == 'avg':
+ elif func == b'avg':
answer = sum(args) / len(args)
- elif func == 'sum':
+ elif func == b'sum':
answer = sum(args)
- elif func == 'prod':
+ elif func == b'prod':
answer = reduce(mul, args, 1)
if answer is None:
diff --git a/searx/autocomplete.py b/searx/autocomplete.py
index b360af9f6..de0623a8a 100644
--- a/searx/autocomplete.py
+++ b/searx/autocomplete.py
@@ -18,7 +18,6 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
from lxml import etree
from json import loads
-from urllib import urlencode
from searx import settings
from searx.languages import language_codes
from searx.engines import (
@@ -26,6 +25,11 @@ from searx.engines import (
)
from searx.poolrequests import get as http_get
+try:
+ from urllib import urlencode
+except:
+ from urllib.parse import urlencode
+
def get(*args, **kwargs):
if 'timeout' not in kwargs:
diff --git a/searx/engines/1337x.py b/searx/engines/1337x.py
index c6bc3cb6d..0de04bd95 100644
--- a/searx/engines/1337x.py
+++ b/searx/engines/1337x.py
@@ -1,8 +1,7 @@
-from urllib import quote
from lxml import html
from searx.engines.xpath import extract_text
from searx.utils import get_torrent_size
-from urlparse import urljoin
+from searx.url_utils import quote, urljoin
url = 'https://1337x.to/'
search_url = url + 'search/{search_term}/{pageno}/'
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 77184a282..023ec409a 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -72,12 +72,11 @@ def load_engine(engine_data):
if engine_data['categories'] == 'none':
engine.categories = []
else:
- engine.categories = map(
- str.strip, engine_data['categories'].split(','))
+ engine.categories = list(map(str.strip, engine_data['categories'].split(',')))
continue
setattr(engine, param_name, engine_data[param_name])
- for arg_name, arg_value in engine_default_args.iteritems():
+ for arg_name, arg_value in engine_default_args.items():
if not hasattr(engine, arg_name):
setattr(engine, arg_name, arg_value)
diff --git a/searx/engines/archlinux.py b/searx/engines/archlinux.py
index dca825790..cad06f8c6 100644
--- a/searx/engines/archlinux.py
+++ b/searx/engines/archlinux.py
@@ -11,10 +11,9 @@
@parse url, title
"""
-from urlparse import urljoin
-from urllib import urlencode
from lxml import html
from searx.engines.xpath import extract_text
+from searx.url_utils import urlencode, urljoin
# engine dependent config
categories = ['it']
diff --git a/searx/engines/base.py b/searx/engines/base.py
index a552453ce..ff006a3bc 100755
--- a/searx/engines/base.py
+++ b/searx/engines/base.py
@@ -14,10 +14,10 @@
"""
from lxml import etree
-from urllib import urlencode
-from searx.utils import searx_useragent
from datetime import datetime
import re
+from searx.url_utils import urlencode
+from searx.utils import searx_useragent
categories = ['science']
@@ -73,7 +73,7 @@ def request(query, params):
def response(resp):
results = []
- search_results = etree.XML(resp.content)
+ search_results = etree.XML(resp.text)
for entry in search_results.xpath('./result/doc'):
content = "No description available"
diff --git a/searx/engines/bing.py b/searx/engines/bing.py
index 4e7ead82d..052d567ea 100644
--- a/searx/engines/bing.py
+++ b/searx/engines/bing.py
@@ -13,9 +13,9 @@
@todo publishedDate
"""
-from urllib import urlencode
from lxml import html
from searx.engines.xpath import extract_text
+from searx.url_utils import urlencode
# engine dependent config
categories = ['general']
diff --git a/searx/engines/bing_images.py b/searx/engines/bing_images.py
index 97f6dca37..e79740e50 100644
--- a/searx/engines/bing_images.py
+++ b/searx/engines/bing_images.py
@@ -15,11 +15,11 @@
limited response to 10 images
"""
-from urllib import urlencode
from lxml import html
from json import loads
import re
from searx.engines.bing import _fetch_supported_languages, supported_languages_url
+from searx.url_utils import urlencode
# engine dependent config
categories = ['images']
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index 765bcd38e..8e3cc517e 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -11,13 +11,12 @@
@parse url, title, content, publishedDate, thumbnail
"""
-from urllib import urlencode
-from urlparse import urlparse, parse_qsl
from datetime import datetime
from dateutil import parser
from lxml import etree
from searx.utils import list_get
from searx.engines.bing import _fetch_supported_languages, supported_languages_url
+from searx.url_utils import urlencode, urlparse, parse_qsl
# engine dependent config
categories = ['news']
@@ -86,7 +85,7 @@ def request(query, params):
def response(resp):
results = []
- rss = etree.fromstring(resp.content)
+ rss = etree.fromstring(resp.text)
ns = rss.nsmap
diff --git a/searx/engines/blekko_images.py b/searx/engines/blekko_images.py
index c0664f390..f71645634 100644
--- a/searx/engines/blekko_images.py
+++ b/searx/engines/blekko_images.py
@@ -11,7 +11,7 @@
"""
from json import loads
-from urllib import urlencode
+from searx.url_utils import urlencode
# engine dependent config
categories = ['images']
diff --git a/searx/engines/btdigg.py b/searx/engines/btdigg.py
index 33c8355de..40438673f 100644
--- a/searx/engines/btdigg.py
+++ b/searx/engines/btdigg.py
@@ -10,11 +10,10 @@
@parse url, title, content, seed, leech, magnetlink
"""
-from urlparse import urljoin
-from urllib import quote
from lxml import html
from operator import itemgetter
from searx.engines.xpath import extract_text
+from searx.url_utils import quote, urljoin
from searx.utils import get_torrent_size
# engine dependent config
@@ -38,7 +37,7 @@ def request(query, params):
def response(resp):
results = []
- dom = html.fromstring(resp.content)
+ dom = html.fromstring(resp.text)
search_res = dom.xpath('//div[@id="search_res"]/table/tr')
diff --git a/searx/engines/currency_convert.py b/searx/engines/currency_convert.py
index bc839cfb5..1218d4849 100644
--- a/searx/engines/currency_convert.py
+++ b/searx/engines/currency_convert.py
@@ -1,21 +1,25 @@
-from datetime import datetime
+import json
import re
import os
-import json
+import sys
import unicodedata
+from datetime import datetime
+
+if sys.version_info[0] == 3:
+ unicode = str
categories = []
url = 'https://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X'
weight = 100
-parser_re = re.compile(u'.*?(\\d+(?:\\.\\d+)?) ([^.0-9]+) (?:in|to) ([^.0-9]+)', re.I) # noqa
+parser_re = re.compile(b'.*?(\\d+(?:\\.\\d+)?) ([^.0-9]+) (?:in|to) ([^.0-9]+)', re.I)
db = 1
def normalize_name(name):
- name = name.lower().replace('-', ' ').rstrip('s')
+ name = name.decode('utf-8').lower().replace('-', ' ').rstrip('s')
name = re.sub(' +', ' ', name)
return unicodedata.normalize('NFKD', name).lower()
@@ -35,7 +39,7 @@ def iso4217_to_name(iso4217, language):
def request(query, params):
- m = parser_re.match(unicode(query, 'utf8'))
+ m = parser_re.match(query)
if not m:
# wrong query
return params
diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py
index 8c69aafe0..fad7e596c 100644
--- a/searx/engines/dailymotion.py
+++ b/searx/engines/dailymotion.py
@@ -12,10 +12,9 @@
@todo set content-parameter with correct data
"""
-from urllib import urlencode
from json import loads
from datetime import datetime
-from requests import get
+from searx.url_utils import urlencode
# engine dependent config
categories = ['videos']
diff --git a/searx/engines/deezer.py b/searx/engines/deezer.py
index 3db1af3d2..af63478fb 100644
--- a/searx/engines/deezer.py
+++ b/searx/engines/deezer.py
@@ -11,7 +11,7 @@
"""
from json import loads
-from urllib import urlencode
+from searx.url_utils import urlencode
# engine dependent config
categories = ['music']
@@ -30,8 +30,7 @@ embedded_url = ''
+cid_re = re.compile(r'client_id:"([^"]*)"', re.I | re.U)
+
def get_client_id():
response = http_get("https://soundcloud.com")
- rx_namespace = {"re": "http://exslt.org/regular-expressions"}
if response.ok:
- tree = etree.parse(StringIO(response.content), etree.HTMLParser())
- script_tags = tree.xpath("//script[re:match(@src, '(.*app.*js)')]", namespaces=rx_namespace)
+ tree = html.fromstring(response.content)
+ script_tags = tree.xpath("//script[contains(@src, '/assets/app')]")
app_js_urls = [script_tag.get('src') for script_tag in script_tags if script_tag is not None]
# extracts valid app_js urls from soundcloud.com content
@@ -51,7 +56,7 @@ def get_client_id():
# gets app_js and searches for the clientid
response = http_get(app_js_url)
if response.ok:
- cids = re.search(r'client_id:"([^"]*)"', response.content, re.M | re.I)
+ cids = cid_re.search(response.text)
if cids is not None and len(cids.groups()):
return cids.groups()[0]
logger.warning("Unable to fetch guest client_id from SoundCloud, check parser!")
diff --git a/searx/engines/spotify.py b/searx/engines/spotify.py
index 249ba91ef..aed756be3 100644
--- a/searx/engines/spotify.py
+++ b/searx/engines/spotify.py
@@ -11,7 +11,7 @@
"""
from json import loads
-from urllib import urlencode
+from searx.url_utils import urlencode
# engine dependent config
categories = ['music']
@@ -29,8 +29,7 @@ embedded_url = '
"""
- response = mock.Mock(content=html)
+ response = mock.Mock(text=html.encode('utf-8'))
results = startpage.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
@@ -133,7 +133,7 @@ class TestStartpageEngine(SearxTestCase):
"""
- response = mock.Mock(content=html)
+ response = mock.Mock(text=html.encode('utf-8'))
results = startpage.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
diff --git a/tests/unit/engines/test_swisscows.py b/tests/unit/engines/test_swisscows.py
index 27f33d70a..53890be78 100644
--- a/tests/unit/engines/test_swisscows.py
+++ b/tests/unit/engines/test_swisscows.py
@@ -33,13 +33,13 @@ class TestSwisscowsEngine(SearxTestCase):
self.assertRaises(AttributeError, swisscows.response, '')
self.assertRaises(AttributeError, swisscows.response, '[]')
- response = mock.Mock(content='')
+ response = mock.Mock(text=b'')
self.assertEqual(swisscows.response(response), [])
- response = mock.Mock(content='')
+ response = mock.Mock(text=b'')
self.assertEqual(swisscows.response(response), [])
- html = u"""
+ html = b"""
"""
- response = mock.Mock(content=html)
+ response = mock.Mock(text=html)
results = swisscows.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 3)
diff --git a/tests/unit/engines/test_tokyotoshokan.py b/tests/unit/engines/test_tokyotoshokan.py
index efe7dbfc2..b5c6fad17 100644
--- a/tests/unit/engines/test_tokyotoshokan.py
+++ b/tests/unit/engines/test_tokyotoshokan.py
@@ -91,7 +91,7 @@ class TestTokyotoshokanEngine(SearxTestCase):
self.assertEqual(r['title'], 'Koyomimonogatari')
self.assertEqual(r['magnetlink'], 'magnet:?xt=urn:btih:4c19eb46b5113685fbd2288ed2531b0b')
self.assertEqual(r['filesize'], int(1024 * 1024 * 10.5))
- self.assertEqual(r['publishedDate'], datetime(2016, 03, 26, 16, 41))
+ self.assertEqual(r['publishedDate'], datetime(2016, 3, 26, 16, 41))
self.assertEqual(r['content'], 'Comment: sample comment')
self.assertEqual(r['seed'], 53)
self.assertEqual(r['leech'], 18)
diff --git a/tests/unit/engines/test_wikidata.py b/tests/unit/engines/test_wikidata.py
index ec5f52ef9..aa69f116e 100644
--- a/tests/unit/engines/test_wikidata.py
+++ b/tests/unit/engines/test_wikidata.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-from json import loads
from lxml.html import fromstring
from collections import defaultdict
import mock
@@ -31,7 +30,7 @@ class TestWikidataEngine(SearxTestCase):
self.assertRaises(AttributeError, wikidata.response, '')
self.assertRaises(AttributeError, wikidata.response, '[]')
- response = mock.Mock(content='', search_params={"language": "all"})
+ response = mock.Mock(text='', search_params={"language": "all"})
self.assertEqual(wikidata.response(response), [])
def test_getDetail(self):
diff --git a/tests/unit/engines/test_wikipedia.py b/tests/unit/engines/test_wikipedia.py
index 988080b6a..7a86514c7 100644
--- a/tests/unit/engines/test_wikipedia.py
+++ b/tests/unit/engines/test_wikipedia.py
@@ -13,15 +13,15 @@ class TestWikipediaEngine(SearxTestCase):
query = 'test_query'
dicto = defaultdict(dict)
dicto['language'] = 'fr-FR'
- params = wikipedia.request(query, dicto)
+ params = wikipedia.request(query.encode('utf-8'), dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('test_query', params['url'])
self.assertIn('Test_Query', params['url'])
self.assertIn('fr.wikipedia.org', params['url'])
- query = 'Test_Query'
- params = wikipedia.request(query, dicto)
+ query = u'Test_Query'
+ params = wikipedia.request(query.encode('utf-8'), dicto)
self.assertIn('Test_Query', params['url'])
self.assertNotIn('test_query', params['url'])
@@ -57,7 +57,7 @@ class TestWikipediaEngine(SearxTestCase):
}
}
}"""
- response = mock.Mock(content=json, search_params=dicto)
+ response = mock.Mock(text=json, search_params=dicto)
self.assertEqual(wikipedia.response(response), [])
# normal case
@@ -80,7 +80,7 @@ class TestWikipediaEngine(SearxTestCase):
}
}
}"""
- response = mock.Mock(content=json, search_params=dicto)
+ response = mock.Mock(text=json, search_params=dicto)
results = wikipedia.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
@@ -108,10 +108,10 @@ class TestWikipediaEngine(SearxTestCase):
}
}
}"""
- response = mock.Mock(content=json, search_params=dicto)
+ response = mock.Mock(text=json, search_params=dicto)
results = wikipedia.response(response)
self.assertEqual(type(results), list)
- self.assertEqual(len(results), 0)
+ self.assertEqual(len(results), 2)
# no image
json = """
@@ -130,7 +130,7 @@ class TestWikipediaEngine(SearxTestCase):
}
}
}"""
- response = mock.Mock(content=json, search_params=dicto)
+ response = mock.Mock(text=json, search_params=dicto)
results = wikipedia.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
@@ -158,7 +158,7 @@ class TestWikipediaEngine(SearxTestCase):
}
}
}"""
- response = mock.Mock(content=json, search_params=dicto)
+ response = mock.Mock(text=json, search_params=dicto)
results = wikipedia.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
diff --git a/tests/unit/engines/test_wolframalpha_api.py b/tests/unit/engines/test_wolframalpha_api.py
index 64a64ceb3..30d337645 100644
--- a/tests/unit/engines/test_wolframalpha_api.py
+++ b/tests/unit/engines/test_wolframalpha_api.py
@@ -35,11 +35,11 @@ class TestWolframAlphaAPIEngine(SearxTestCase):
xml = '''
'''
- response = mock.Mock(content=xml)
+ response = mock.Mock(text=xml.encode('utf-8'))
self.assertEqual(wolframalpha_api.response(response), [])
# test basic case
- xml = """
+ xml = b"""
"""
- response = mock.Mock(content=xml, request=request)
+ response = mock.Mock(text=xml, request=request)
results = wolframalpha_api.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
@@ -107,7 +107,7 @@ class TestWolframAlphaAPIEngine(SearxTestCase):
self.assertIn('result_plaintext', results[1]['content'])
# test calc
- xml = """
+ xml = b"""
"""
- response = mock.Mock(content=xml, request=request)
+ response = mock.Mock(text=xml, request=request)
results = wolframalpha_api.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py
index 78dcea478..e497371f8 100644
--- a/tests/unit/test_plugins.py
+++ b/tests/unit/test_plugins.py
@@ -48,11 +48,11 @@ class SelfIPTest(SearxTestCase):
# IP test
request = Mock(remote_addr='127.0.0.1')
request.headers.getlist.return_value = []
- search = get_search_mock(query='ip', pageno=1)
+ search = get_search_mock(query=b'ip', pageno=1)
store.call(store.plugins, 'post_search', request, search)
self.assertTrue('127.0.0.1' in search.result_container.answers)
- search = get_search_mock(query='ip', pageno=2)
+ search = get_search_mock(query=b'ip', pageno=2)
store.call(store.plugins, 'post_search', request, search)
self.assertFalse('127.0.0.1' in search.result_container.answers)
@@ -60,26 +60,26 @@ class SelfIPTest(SearxTestCase):
request = Mock(user_agent='Mock')
request.headers.getlist.return_value = []
- search = get_search_mock(query='user-agent', pageno=1)
+ search = get_search_mock(query=b'user-agent', pageno=1)
store.call(store.plugins, 'post_search', request, search)
self.assertTrue('Mock' in search.result_container.answers)
- search = get_search_mock(query='user-agent', pageno=2)
+ search = get_search_mock(query=b'user-agent', pageno=2)
store.call(store.plugins, 'post_search', request, search)
self.assertFalse('Mock' in search.result_container.answers)
- search = get_search_mock(query='user-agent', pageno=1)
+ search = get_search_mock(query=b'user-agent', pageno=1)
store.call(store.plugins, 'post_search', request, search)
self.assertTrue('Mock' in search.result_container.answers)
- search = get_search_mock(query='user-agent', pageno=2)
+ search = get_search_mock(query=b'user-agent', pageno=2)
store.call(store.plugins, 'post_search', request, search)
self.assertFalse('Mock' in search.result_container.answers)
- search = get_search_mock(query='What is my User-Agent?', pageno=1)
+ search = get_search_mock(query=b'What is my User-Agent?', pageno=1)
store.call(store.plugins, 'post_search', request, search)
self.assertTrue('Mock' in search.result_container.answers)
- search = get_search_mock(query='What is my User-Agent?', pageno=2)
+ search = get_search_mock(query=b'What is my User-Agent?', pageno=2)
store.call(store.plugins, 'post_search', request, search)
self.assertFalse('Mock' in search.result_container.answers)
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py
index 04480791d..eb40e62e2 100644
--- a/tests/unit/test_utils.py
+++ b/tests/unit/test_utils.py
@@ -1,8 +1,12 @@
# -*- coding: utf-8 -*-
import mock
+import sys
from searx.testing import SearxTestCase
from searx import utils
+if sys.version_info[0] == 3:
+ unicode = str
+
class TestUtils(SearxTestCase):
@@ -30,9 +34,9 @@ class TestUtils(SearxTestCase):
self.assertEqual(utils.highlight_content(content, None), content)
content = 'a'
- query = 'test'
+ query = b'test'
self.assertEqual(utils.highlight_content(content, query), content)
- query = 'a test'
+ query = b'a test'
self.assertEqual(utils.highlight_content(content, query), content)
def test_html_to_text(self):
diff --git a/tests/unit/test_webapp.py b/tests/unit/test_webapp.py
index 5e5f0b4bf..45a08c1ba 100644
--- a/tests/unit/test_webapp.py
+++ b/tests/unit/test_webapp.py
@@ -2,10 +2,10 @@
import json
from mock import Mock
-from urlparse import ParseResult
from searx import webapp
from searx.testing import SearxTestCase
from searx.search import Search
+from searx.url_utils import ParseResult
class ViewsTestCase(SearxTestCase):
@@ -57,37 +57,35 @@ class ViewsTestCase(SearxTestCase):
def test_index_empty(self):
result = self.app.post('/')
self.assertEqual(result.status_code, 200)
- self.assertIn('searx
', result.data)
+ self.assertIn(b'searx
', result.data)
def test_index_html(self):
result = self.app.post('/', data={'q': 'test'})
self.assertIn(
- '', # noqa
+ b'', # noqa
result.data
)
self.assertIn(
- 'first test content
', # noqa
+ b'first test content
', # noqa
result.data
)
def test_index_json(self):
result = self.app.post('/', data={'q': 'test', 'format': 'json'})
- result_dict = json.loads(result.data)
+ result_dict = json.loads(result.data.decode('utf-8'))
self.assertEqual('test', result_dict['query'])
- self.assertEqual(
- result_dict['results'][0]['content'], 'first test content')
- self.assertEqual(
- result_dict['results'][0]['url'], 'http://first.test.xyz')
+ self.assertEqual(result_dict['results'][0]['content'], 'first test content')
+ self.assertEqual(result_dict['results'][0]['url'], 'http://first.test.xyz')
def test_index_csv(self):
result = self.app.post('/', data={'q': 'test', 'format': 'csv'})
self.assertEqual(
- 'title,url,content,host,engine,score\r\n'
- 'First Test,http://first.test.xyz,first test content,first.test.xyz,startpage,\r\n' # noqa
- 'Second Test,http://second.test.xyz,second test content,second.test.xyz,youtube,\r\n', # noqa
+ b'title,url,content,host,engine,score\r\n'
+ b'First Test,http://first.test.xyz,first test content,first.test.xyz,startpage,\r\n' # noqa
+ b'Second Test,http://second.test.xyz,second test content,second.test.xyz,youtube,\r\n', # noqa
result.data
)
@@ -95,65 +93,65 @@ class ViewsTestCase(SearxTestCase):
result = self.app.post('/', data={'q': 'test', 'format': 'rss'})
self.assertIn(
- 'Search results for "test" - searx',
+ b'Search results for "test" - searx',
result.data
)
self.assertIn(
- '3',
+ b'3',
result.data
)
self.assertIn(
- 'First Test',
+ b'First Test',
result.data
)
self.assertIn(
- 'http://first.test.xyz',
+ b'http://first.test.xyz',
result.data
)
self.assertIn(
- 'first test content',
+ b'first test content',
result.data
)
def test_about(self):
result = self.app.get('/about')
self.assertEqual(result.status_code, 200)
- self.assertIn('', result.data)
+ self.assertIn(b'', result.data)
def test_preferences(self):
result = self.app.get('/preferences')
self.assertEqual(result.status_code, 200)
self.assertIn(
- '