diff --git a/searx/engines/searchcode_code.py b/searx/engines/searchcode_code.py
index ac9647fcd..c71832f46 100644
--- a/searx/engines/searchcode_code.py
+++ b/searx/engines/searchcode_code.py
@@ -10,8 +10,7 @@
from urllib import urlencode
from json import loads
-import cgi
-import re
+
# engine dependent config
categories = ['it']
@@ -21,17 +20,10 @@ paging = True
url = 'https://searchcode.com/'
search_url = url+'api/codesearch_I/?{query}&p={pageno}'
-code_endings = {'c': 'c',
- 'css': 'css',
- 'cpp': 'cpp',
- 'c++': 'cpp',
+# special code-endings which are not recognised by the file ending
+code_endings = {'cs': 'c#',
'h': 'c',
- 'html': 'html',
- 'hpp': 'cpp',
- 'js': 'js',
- 'lua': 'lua',
- 'php': 'php',
- 'py': 'python'}
+ 'hpp': 'cpp'}
# do search-request
@@ -45,7 +37,7 @@ def request(query, params):
# get response from search-request
def response(resp):
results = []
-
+
search_results = loads(resp.text)
# parse results
@@ -53,12 +45,14 @@ def response(resp):
href = result['url']
title = "" + result['name'] + " - " + result['filename']
repo = result['repo']
-
+
lines = dict()
for line, code in result['lines'].items():
lines[int(line)] = code
- code_language = code_endings.get(result['filename'].split('.')[-1].lower(), None)
+ code_language = code_endings.get(
+ result['filename'].split('.')[-1].lower(),
+ result['filename'].split('.')[-1].lower())
# append result
results.append({'url': href,
diff --git a/searx/engines/searchcode_doc.py b/searx/engines/searchcode_doc.py
index e07cbeab9..4d4d1ca5e 100644
--- a/searx/engines/searchcode_doc.py
+++ b/searx/engines/searchcode_doc.py
@@ -31,15 +31,18 @@ def request(query, params):
# get response from search-request
def response(resp):
results = []
-
+
search_results = loads(resp.text)
# parse results
for result in search_results['results']:
href = result['url']
- title = "[" + result['type'] + "] " + result['namespace'] + " " + result['name']
- content = '[' + result['type'] + "] " + result['name'] + " " + result['synopsis'] + "
" + result['description']
-
+ title = "[" + result['type'] + "] " +\
+ result['namespace'] + " " + result['name']
+ content = '[' + result['type'] + "] " +\
+ result['name'] + " " + result['synopsis'] +\
+ "
" + result['description']
+
# append result
results.append({'url': href,
'title': title,
diff --git a/searx/webapp.py b/searx/webapp.py
index 11db1bf2e..ab1e2747f 100644
--- a/searx/webapp.py
+++ b/searx/webapp.py
@@ -99,9 +99,13 @@ def code_highlighter(codelines, language=None):
if not language:
language = 'text'
- # find lexer by programing language
- lexer = get_lexer_by_name(language, stripall=True)
-
+ try:
+ # find lexer by programing language
+ lexer = get_lexer_by_name(language, stripall=True)
+ except:
+ # if lexer is not found, using default one
+ lexer = get_lexer_by_name('text', stripall=True)
+
html_code = ''
tmp_code = ''
last_line = None
@@ -112,20 +116,21 @@ def code_highlighter(codelines, language=None):
line_code_start = line
# new codeblock is detected
- if last_line != None and\
- last_line +1 != line:
+ if last_line is not None and\
+ last_line + 1 != line:
# highlight last codepart
- formatter = HtmlFormatter(linenos='inline', linenostart=line_code_start)
+ formatter = HtmlFormatter(linenos='inline',
+ linenostart=line_code_start)
html_code = html_code + highlight(tmp_code, lexer, formatter)
-
+
# reset conditions for next codepart
tmp_code = ''
line_code_start = line
# add codepart
tmp_code += code + '\n'
-
+
# update line
last_line = line