Merge pull request #308 from dalf/versions_upgrade

update versions.cfg to use the current up-to-date packages
This commit is contained in:
Adam Tauber 2015-05-02 14:58:32 -04:00
commit 93fd1e4c76
40 changed files with 484 additions and 396 deletions

View File

@ -28,7 +28,7 @@ from searx.poolrequests import get as http_get
def get(*args, **kwargs): def get(*args, **kwargs):
if not 'timeout' in kwargs: if 'timeout' not in kwargs:
kwargs['timeout'] = settings['server']['request_timeout'] kwargs['timeout'] = settings['server']['request_timeout']
return http_get(*args, **kwargs) return http_get(*args, **kwargs)

View File

@ -1,15 +1,17 @@
## Bing (Web) """
# Bing (Web)
# @website https://www.bing.com
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), @website https://www.bing.com
# max. 5000 query/month @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
# max. 5000 query/month
# @using-api no (because of query limit)
# @results HTML (using search portal) @using-api no (because of query limit)
# @stable no (HTML can change) @results HTML (using search portal)
# @parse url, title, content @stable no (HTML can change)
# @parse url, title, content
# @todo publishedDate
@todo publishedDate
"""
from urllib import urlencode from urllib import urlencode
from cgi import escape from cgi import escape

View File

@ -1,17 +1,19 @@
## Bing (Images) """
# Bing (Images)
# @website https://www.bing.com/images
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), @website https://www.bing.com/images
# max. 5000 query/month @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
# max. 5000 query/month
# @using-api no (because of query limit)
# @results HTML (using search portal) @using-api no (because of query limit)
# @stable no (HTML can change) @results HTML (using search portal)
# @parse url, title, img_src @stable no (HTML can change)
# @parse url, title, img_src
# @todo currently there are up to 35 images receive per page,
# because bing does not parse count=10. @todo currently there are up to 35 images receive per page,
# limited response to 10 images because bing does not parse count=10.
limited response to 10 images
"""
from urllib import urlencode from urllib import urlencode
from lxml import html from lxml import html
@ -76,7 +78,7 @@ def response(resp):
title = link.attrib.get('t1') title = link.attrib.get('t1')
ihk = link.attrib.get('ihk') ihk = link.attrib.get('ihk')
#url = 'http://' + link.attrib.get('t3') # url = 'http://' + link.attrib.get('t3')
url = yaml_data.get('surl') url = yaml_data.get('surl')
img_src = yaml_data.get('imgurl') img_src = yaml_data.get('imgurl')

View File

@ -1,13 +1,15 @@
## Bing (News) """
# Bing (News)
# @website https://www.bing.com/news
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), @website https://www.bing.com/news
# max. 5000 query/month @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
# max. 5000 query/month
# @using-api no (because of query limit)
# @results HTML (using search portal) @using-api no (because of query limit)
# @stable no (HTML can change) @results HTML (using search portal)
# @parse url, title, content, publishedDate @stable no (HTML can change)
@parse url, title, content, publishedDate
"""
from urllib import urlencode from urllib import urlencode
from cgi import escape from cgi import escape
@ -87,6 +89,8 @@ def response(resp):
publishedDate = parser.parse(publishedDate, dayfirst=False) publishedDate = parser.parse(publishedDate, dayfirst=False)
except TypeError: except TypeError:
publishedDate = datetime.now() publishedDate = datetime.now()
except ValueError:
publishedDate = datetime.now()
# append result # append result
results.append({'url': url, results.append({'url': url,

View File

@ -1,12 +1,14 @@
## Blekko (Images) """
# Blekko (Images)
# @website https://blekko.com
# @provide-api yes (inofficial) @website https://blekko.com
# @provide-api yes (inofficial)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, img_src @stable yes
@parse url, title, img_src
"""
from json import loads from json import loads
from urllib import urlencode from urllib import urlencode

View File

@ -1,12 +1,14 @@
## BTDigg (Videos, Music, Files) """
# BTDigg (Videos, Music, Files)
# @website https://btdigg.org
# @provide-api yes (on demand) @website https://btdigg.org
# @provide-api yes (on demand)
# @using-api no
# @results HTML (using search portal) @using-api no
# @stable no (HTML can change) @results HTML (using search portal)
# @parse url, title, content, seed, leech, magnetlink @stable no (HTML can change)
@parse url, title, content, seed, leech, magnetlink
"""
from urlparse import urljoin from urlparse import urljoin
from cgi import escape from cgi import escape

View File

@ -1,14 +1,16 @@
## Dailymotion (Videos) """
# Dailymotion (Videos)
# @website https://www.dailymotion.com
# @provide-api yes (http://www.dailymotion.com/developer) @website https://www.dailymotion.com
# @provide-api yes (http://www.dailymotion.com/developer)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, thumbnail, publishedDate, embedded @stable yes
# @parse url, title, thumbnail, publishedDate, embedded
# @todo set content-parameter with correct data
@todo set content-parameter with correct data
"""
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads
@ -48,7 +50,7 @@ def response(resp):
search_res = loads(resp.text) search_res = loads(resp.text)
# return empty array if there are no results # return empty array if there are no results
if not 'list' in search_res: if 'list' not in search_res:
return [] return []
# parse results # parse results

View File

@ -1,12 +1,14 @@
## Deezer (Music) """
# Deezer (Music)
# @website https://deezer.com
# @provide-api yes (http://developers.deezer.com/api/) @website https://deezer.com
# @provide-api yes (http://developers.deezer.com/api/)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, content, embedded @stable yes
@parse url, title, content, embedded
"""
from json import loads from json import loads
from urllib import urlencode from urllib import urlencode

View File

@ -1,14 +1,16 @@
## Deviantart (Images) """
# Deviantart (Images)
# @website https://www.deviantart.com/
# @provide-api yes (https://www.deviantart.com/developers/) (RSS) @website https://www.deviantart.com/
# @provide-api yes (https://www.deviantart.com/developers/) (RSS)
# @using-api no (TODO, rewrite to api)
# @results HTML @using-api no (TODO, rewrite to api)
# @stable no (HTML can change) @results HTML
# @parse url, title, thumbnail_src, img_src @stable no (HTML can change)
# @parse url, title, thumbnail_src, img_src
# @todo rewrite to api
@todo rewrite to api
"""
from urllib import urlencode from urllib import urlencode
from urlparse import urljoin from urlparse import urljoin

View File

@ -1,12 +1,14 @@
## Digg (News, Social media) """
# Digg (News, Social media)
# @website https://digg.com/
# @provide-api no @website https://digg.com/
# @provide-api no
# @using-api no
# @results HTML (using search portal) @using-api no
# @stable no (HTML can change) @results HTML (using search portal)
# @parse url, title, content, publishedDate, thumbnail @stable no (HTML can change)
@parse url, title, content, publishedDate, thumbnail
"""
from urllib import quote_plus from urllib import quote_plus
from json import loads from json import loads

View File

@ -1,17 +1,19 @@
## DuckDuckGo (Web) """
# DuckDuckGo (Web)
# @website https://duckduckgo.com/
# @provide-api yes (https://duckduckgo.com/api), @website https://duckduckgo.com/
# but not all results from search-site @provide-api yes (https://duckduckgo.com/api),
# but not all results from search-site
# @using-api no
# @results HTML (using search portal) @using-api no
# @stable no (HTML can change) @results HTML (using search portal)
# @parse url, title, content @stable no (HTML can change)
# @parse url, title, content
# @todo rewrite to api
# @todo language support @todo rewrite to api
# (the current used site does not support language-change) @todo language support
(the current used site does not support language-change)
"""
from urllib import urlencode from urllib import urlencode
from lxml.html import fromstring from lxml.html import fromstring

View File

@ -1,7 +1,9 @@
## Dummy """
# Dummy
# @results empty array
# @stable yes @results empty array
@stable yes
"""
# do search-request # do search-request

View File

@ -1,12 +1,14 @@
## Faroo (Web, News) """
# Faroo (Web, News)
# @website http://www.faroo.com
# @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key @website http://www.faroo.com
# @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, content, publishedDate, img_src @stable yes
@parse url, title, content, publishedDate, img_src
"""
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads

View File

@ -1,15 +1,17 @@
#!/usr/bin/env python #!/usr/bin/env python
## Flickr (Images) """
# Flickr (Images)
# @website https://www.flickr.com
# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html) @website https://www.flickr.com
# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, thumbnail, img_src @stable yes
#More info on api-key : https://www.flickr.com/services/apps/create/ @parse url, title, thumbnail, img_src
More info on api-key : https://www.flickr.com/services/apps/create/
"""
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads
@ -48,10 +50,10 @@ def response(resp):
search_results = loads(resp.text) search_results = loads(resp.text)
# return empty array if there are no results # return empty array if there are no results
if not 'photos' in search_results: if 'photos' not in search_results:
return [] return []
if not 'photo' in search_results['photos']: if 'photo' not in search_results['photos']:
return [] return []
photos = search_results['photos']['photo'] photos = search_results['photos']['photo']

View File

@ -1,14 +1,16 @@
#!/usr/bin/env python #!/usr/bin/env python
# Flickr (Images) """
# Flickr (Images)
# @website https://www.flickr.com
# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html) @website https://www.flickr.com
# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
# @using-api no
# @results HTML @using-api no
# @stable no @results HTML
# @parse url, title, thumbnail, img_src @stable no
@parse url, title, thumbnail, img_src
"""
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads

View File

@ -1,14 +1,16 @@
## General Files (Files) """
# General Files (Files)
# @website http://www.general-files.org
# @provide-api no (nothing found) @website http://www.general-files.org
# @provide-api no (nothing found)
# @using-api no (because nothing found)
# @results HTML (using search portal) @using-api no (because nothing found)
# @stable no (HTML can change) @results HTML (using search portal)
# @parse url, title, content @stable no (HTML can change)
# @parse url, title, content
# @todo detect torrents?
@todo detect torrents?
"""
from lxml import html from lxml import html

View File

@ -1,12 +1,14 @@
## Gigablast (Web) """
# Gigablast (Web)
# @website http://gigablast.com
# @provide-api yes (http://gigablast.com/api.html) @website http://gigablast.com
# @provide-api yes (http://gigablast.com/api.html)
# @using-api yes
# @results XML @using-api yes
# @stable yes @results XML
# @parse url, title, content @stable yes
@parse url, title, content
"""
from urllib import urlencode from urllib import urlencode
from cgi import escape from cgi import escape

View File

@ -1,12 +1,14 @@
## Github (It) """
# Github (It)
# @website https://github.com/
# @provide-api yes (https://developer.github.com/v3/) @website https://github.com/
# @provide-api yes (https://developer.github.com/v3/)
# @using-api yes
# @results JSON @using-api yes
# @stable yes (using api) @results JSON
# @parse url, title, content @stable yes (using api)
@parse url, title, content
"""
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads
@ -37,7 +39,7 @@ def response(resp):
search_res = loads(resp.text) search_res = loads(resp.text)
# check if items are recieved # check if items are recieved
if not 'items' in search_res: if 'items' not in search_res:
return [] return []
# parse results # parse results

View File

@ -1,13 +1,15 @@
## Google (Images) """
# Google (Images)
# @website https://www.google.com
# @provide-api yes (https://developers.google.com/web-search/docs/), @website https://www.google.com
# deprecated! @provide-api yes (https://developers.google.com/web-search/docs/),
# deprecated!
# @using-api yes
# @results JSON @using-api yes
# @stable yes (but deprecated) @results JSON
# @parse url, title, img_src @stable yes (but deprecated)
@parse url, title, img_src
"""
from urllib import urlencode, unquote from urllib import urlencode, unquote
from json import loads from json import loads

View File

@ -1,13 +1,15 @@
## Google (News) """
# Google (News)
# @website https://www.google.com
# @provide-api yes (https://developers.google.com/web-search/docs/), @website https://www.google.com
# deprecated! @provide-api yes (https://developers.google.com/web-search/docs/),
# deprecated!
# @using-api yes
# @results JSON @using-api yes
# @stable yes (but deprecated) @results JSON
# @parse url, title, content, publishedDate @stable yes (but deprecated)
@parse url, title, content, publishedDate
"""
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads

View File

@ -6,7 +6,7 @@ search_url = None
url_query = None url_query = None
content_query = None content_query = None
title_query = None title_query = None
#suggestion_xpath = '' # suggestion_xpath = ''
def iterate(iterable): def iterate(iterable):

View File

@ -1,12 +1,14 @@
## Kickass Torrent (Videos, Music, Files) """
# Kickass Torrent (Videos, Music, Files)
# @website https://kickass.so
# @provide-api no (nothing found) @website https://kickass.so
# @provide-api no (nothing found)
# @using-api no
# @results HTML (using search portal) @using-api no
# @stable yes (HTML can change) @results HTML (using search portal)
# @parse url, title, content, seed, leech, magnetlink @stable yes (HTML can change)
@parse url, title, content, seed, leech, magnetlink
"""
from urlparse import urljoin from urlparse import urljoin
from cgi import escape from cgi import escape

View File

@ -1,14 +1,16 @@
## general mediawiki-engine (Web) """
# general mediawiki-engine (Web)
# @website websites built on mediawiki (https://www.mediawiki.org)
# @provide-api yes (http://www.mediawiki.org/wiki/API:Search) @website websites built on mediawiki (https://www.mediawiki.org)
# @provide-api yes (http://www.mediawiki.org/wiki/API:Search)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title @stable yes
# @parse url, title
# @todo content
@todo content
"""
from json import loads from json import loads
from string import Formatter from string import Formatter

View File

@ -1,12 +1,14 @@
## Mixcloud (Music) """
# Mixcloud (Music)
# @website https://http://www.mixcloud.com/
# @provide-api yes (http://www.mixcloud.com/developers/ @website https://http://www.mixcloud.com/
# @provide-api yes (http://www.mixcloud.com/developers/
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, content, embedded, publishedDate @stable yes
@parse url, title, content, embedded, publishedDate
"""
from json import loads from json import loads
from urllib import urlencode from urllib import urlencode

View File

@ -1,12 +1,14 @@
## OpenStreetMap (Map) """
# OpenStreetMap (Map)
# @website https://openstreetmap.org/
# @provide-api yes (http://wiki.openstreetmap.org/wiki/Nominatim) @website https://openstreetmap.org/
# @provide-api yes (http://wiki.openstreetmap.org/wiki/Nominatim)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title @stable yes
@parse url, title
"""
from json import loads from json import loads
from searx.utils import searx_useragent from searx.utils import searx_useragent

View File

@ -1,12 +1,14 @@
## Photon (Map) """
# Photon (Map)
# @website https://photon.komoot.de
# @provide-api yes (https://photon.komoot.de/) @website https://photon.komoot.de
# @provide-api yes (https://photon.komoot.de/)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title @stable yes
@parse url, title
"""
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads

View File

@ -1,12 +1,14 @@
## Searchcode (It) """
# Searchcode (It)
# @website https://searchcode.com/
# @provide-api yes (https://searchcode.com/api/) @website https://searchcode.com/
# @provide-api yes (https://searchcode.com/api/)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, content @stable yes
@parse url, title, content
"""
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads

View File

@ -1,12 +1,14 @@
## Searchcode (It) """
# Searchcode (It)
# @website https://searchcode.com/
# @provide-api yes (https://searchcode.com/api/) @website https://searchcode.com/
# @provide-api yes (https://searchcode.com/api/)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, content @stable yes
@parse url, title, content
"""
from urllib import urlencode from urllib import urlencode
from json import loads from json import loads

View File

@ -1,12 +1,14 @@
## Soundcloud (Music) """
# Soundcloud (Music)
# @website https://soundcloud.com
# @provide-api yes (https://developers.soundcloud.com/) @website https://soundcloud.com
# @provide-api yes (https://developers.soundcloud.com/)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, content, publishedDate, embedded @stable yes
@parse url, title, content, publishedDate, embedded
"""
from json import loads from json import loads
from urllib import urlencode, quote_plus from urllib import urlencode, quote_plus

View File

@ -1,12 +1,14 @@
## Spotify (Music) """
# Spotify (Music)
# @website https://spotify.com
# @provide-api yes (https://developer.spotify.com/web-api/search-item/) @website https://spotify.com
# @provide-api yes (https://developer.spotify.com/web-api/search-item/)
# @using-api yes
# @results JSON @using-api yes
# @stable yes @results JSON
# @parse url, title, content, embedded @stable yes
@parse url, title, content, embedded
"""
from json import loads from json import loads
from urllib import urlencode from urllib import urlencode

View File

@ -1,12 +1,14 @@
## Stackoverflow (It) """
# Stackoverflow (It)
# @website https://stackoverflow.com/
# @provide-api not clear (https://api.stackexchange.com/docs/advanced-search) @website https://stackoverflow.com/
# @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
# @using-api no
# @results HTML @using-api no
# @stable no (HTML can change) @results HTML
# @parse url, title, content @stable no (HTML can change)
@parse url, title, content
"""
from urlparse import urljoin from urlparse import urljoin
from cgi import escape from cgi import escape

View File

@ -1,12 +1,14 @@
## Subtitleseeker (Video) """
# Subtitleseeker (Video)
# @website http://www.subtitleseeker.com
# @provide-api no @website http://www.subtitleseeker.com
# @provide-api no
# @using-api no
# @results HTML @using-api no
# @stable no (HTML can change) @results HTML
# @parse url, title, content @stable no (HTML can change)
@parse url, title, content
"""
from cgi import escape from cgi import escape
from urllib import quote_plus from urllib import quote_plus

View File

@ -1,14 +1,16 @@
## Twitter (Social media) """
# Twitter (Social media)
# @website https://twitter.com/
# @provide-api yes (https://dev.twitter.com/docs/using-search) @website https://twitter.com/
# @provide-api yes (https://dev.twitter.com/docs/using-search)
# @using-api no
# @results HTML (using search portal) @using-api no
# @stable no (HTML can change) @results HTML (using search portal)
# @parse url, title, content @stable no (HTML can change)
# @parse url, title, content
# @todo publishedDate
@todo publishedDate
"""
from urlparse import urljoin from urlparse import urljoin
from urllib import urlencode from urllib import urlencode

View File

@ -1,13 +1,14 @@
## 1x (Images) """
# 1x (Images)
# @website http://1x.com/
# @provide-api no
#
# @using-api no
# @results HTML
# @stable no (HTML can change)
# @parse url, title, thumbnail, img_src, content
@website http://1x.com/
@provide-api no
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, thumbnail, img_src, content
"""
from urllib import urlencode from urllib import urlencode
from urlparse import urljoin from urlparse import urljoin

View File

@ -1,14 +1,16 @@
## 500px (Images) """
# 500px (Images)
# @website https://500px.com
# @provide-api yes (https://developers.500px.com/) @website https://500px.com
# @provide-api yes (https://developers.500px.com/)
# @using-api no
# @results HTML @using-api no
# @stable no (HTML can change) @results HTML
# @parse url, title, thumbnail, img_src, content @stable no (HTML can change)
# @parse url, title, thumbnail, img_src, content
# @todo rewrite to api
@todo rewrite to api
"""
from urllib import urlencode from urllib import urlencode

View File

@ -1,4 +1,4 @@
## Yacy (Web, Images, Videos, Music, Files) # Yacy (Web, Images, Videos, Music, Files)
# #
# @website http://yacy.net # @website http://yacy.net
# @provide-api yes # @provide-api yes

View File

@ -1,13 +1,15 @@
## Yahoo (Web) """
# Yahoo (Web)
# @website https://search.yahoo.com/web
# @provide-api yes (https://developer.yahoo.com/boss/search/), @website https://search.yahoo.com/web
# $0.80/1000 queries @provide-api yes (https://developer.yahoo.com/boss/search/),
# $0.80/1000 queries
# @using-api no (because pricing)
# @results HTML (using search portal) @using-api no (because pricing)
# @stable no (HTML can change) @results HTML (using search portal)
# @parse url, title, content, suggestion @stable no (HTML can change)
@parse url, title, content, suggestion
"""
from urllib import urlencode from urllib import urlencode
from urlparse import unquote from urlparse import unquote

View File

@ -1,4 +1,4 @@
## Youtube (Videos) # Youtube (Videos)
# #
# @website https://www.youtube.com/ # @website https://www.youtube.com/
# @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/) # @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
@ -47,7 +47,7 @@ def response(resp):
search_results = loads(resp.text) search_results = loads(resp.text)
# return empty array if there are no results # return empty array if there are no results
if not 'feed' in search_results: if 'feed' not in search_results:
return [] return []
feed = search_results['feed'] feed = search_results['feed']

View File

@ -2,96 +2,115 @@
Babel = 1.3 Babel = 1.3
Flask = 0.10.1 Flask = 0.10.1
Flask-Babel = 0.9 Flask-Babel = 0.9
Jinja2 = 2.7.2 Jinja2 = 2.7.3
MarkupSafe = 0.18 MarkupSafe = 0.23
Pygments = 2.0.1 Pygments = 2.0.2
WebOb = 1.3.1 WebOb = 1.4.1
WebTest = 2.0.11 WebTest = 2.0.18
Werkzeug = 0.9.4 Werkzeug = 0.10.4
buildout-versions = 1.7 buildout-versions = 1.7
collective.recipe.omelette = 0.16 collective.recipe.omelette = 0.16
coverage = 3.7.1 coverage = 3.7.1
decorator = 3.4.0 decorator = 3.4.2
docutils = 0.11 docutils = 0.12
flake8 = 2.1.0 flake8 = 2.4.0
itsdangerous = 0.23 itsdangerous = 0.24
mccabe = 0.2.1 mccabe = 0.3
mock = 1.0.1 mock = 1.0.1
pep8 = 1.4.6 pep8 = 1.5.7
plone.testing = 4.0.8 plone.testing = 4.0.13
pyflakes = 0.7.3 pyflakes = 0.8.1
pytz = 2013b pytz = 2015.2
pyyaml = 3.10 pyyaml = 3.11
requests = 2.5.3 requests = 2.6.2
robotframework-debuglibrary = 0.3 robotframework-debuglibrary = 0.3
robotframework-httplibrary = 0.4.2 robotframework-httplibrary = 0.4.2
robotframework-selenium2library = 1.5.0 robotframework-selenium2library = 1.6.0
robotsuite = 1.4.2 robotsuite = 1.6.1
selenium = 2.39.0 selenium = 2.45.0
speaklater = 1.3 speaklater = 1.3
unittest2 = 0.5.1 unittest2 = 1.0.1
waitress = 0.8.8 waitress = 0.8.9
zc.recipe.testrunner = 2.0.0 zc.recipe.testrunner = 2.0.0
pyopenssl = 0.15.1 pyopenssl = 0.15.1
ndg-httpsclient = 0.3.3 ndg-httpsclient = 0.3.3
pyasn1 = 0.1.7 pyasn1 = 0.1.7
pyasn1-modules = 0.0.5 pyasn1-modules = 0.0.5
certifi = 14.05.14 certifi = 2015.04.28
#
cffi = 0.9.2
cryptography = 0.8.2
# Required by: # Required by:
# WebTest==2.0.11 # WebTest==2.0.18
beautifulsoup4 = 4.3.2 beautifulsoup4 = 4.3.2
# Required by: # Required by:
# robotframework-httplibrary==0.4.2 # cryptography==0.8.2
jsonpatch = 1.3 enum34 = 1.0.4
# Required by: # Required by:
# robotframework-httplibrary==0.4.2 # robotframework-httplibrary==0.4.2
jsonpointer = 1.1 jsonpatch = 1.9
# Required by:
# robotsuite==1.4.2
# searx==0.1
lxml = 3.2.5
# Required by: # Required by:
# robotframework-httplibrary==0.4.2 # robotframework-httplibrary==0.4.2
robotframework = 2.8.3 jsonpointer = 1.7
# Required by: # Required by:
# plone.testing==4.0.8 # traceback2==1.4.0
# robotsuite==1.4.2 linecache2 = 1.0.0
# searx==0.1
# zope.exceptions==4.0.6
# zope.interface==4.0.5
# zope.testrunner==4.4.1
setuptools = 2.1
# Required by: # Required by:
# zope.testrunner==4.4.1 # robotsuite==1.6.1
six = 1.6.1 # searx==0.7.0
lxml = 3.4.4
# Required by:
# cffi==0.9.2
pycparser = 2.12
# Required by:
# searx==0.7.0
python-dateutil = 2.4.2
# Required by:
# robotframework-httplibrary==0.4.2
robotframework = 2.8.7
# Required by:
# searx==0.7.0
# zope.exceptions==4.0.7
# zope.interface==4.1.2
# zope.testrunner==4.4.8
setuptools = 15.2
# Required by:
# robotsuite==1.6.1
# zope.testrunner==4.4.8
six = 1.9.0
# Required by:
# unittest2==1.0.1
traceback2 = 1.4.0
# Required by: # Required by:
# collective.recipe.omelette==0.16 # collective.recipe.omelette==0.16
zc.recipe.egg = 2.0.1 zc.recipe.egg = 2.0.1
# Required by: # Required by:
# zope.testrunner==4.4.1 # zope.testrunner==4.4.8
zope.exceptions = 4.0.6 zope.exceptions = 4.0.7
# Required by: # Required by:
# zope.testrunner==4.4.1 # zope.testrunner==4.4.8
zope.interface = 4.0.5 zope.interface = 4.1.2
# Required by: # Required by:
# plone.testing==4.0.8 # plone.testing==4.0.13
zope.testing = 4.1.2 zope.testing = 4.1.3
# Required by: # Required by:
# zc.recipe.testrunner==2.0.0 # zc.recipe.testrunner==2.0.0
zope.testrunner = 4.4.1 zope.testrunner = 4.4.8
# Required by:
# searx==0.3.0
python-dateutil = 2.2