Aol Engine Update

This commit is contained in:
JustSadY 2024-09-28 17:34:28 +03:00
parent 0226c1383f
commit 97cec586c0
7 changed files with 303 additions and 0 deletions

View file

@ -0,0 +1,34 @@
from lxml import html
from searx.utils import (
extract_text,
eval_xpath,
)
about = {
"website": 'https://www.limetorrents.lol',
}
base_url = 'https://www.limetorrents.lol'
def request(query, params):
params['url'] = f"{base_url}/search/all/{query}/"
return params
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in eval_xpath(dom,
'//table[@class="table2"]//tr[@bgcolor="#F4F4F4"] | //table[@class="table2"]//tr[@bgcolor="#FFFFFF"]'):
title = result.xpath('.//td/div')
title = extract_text(title[0]) if title else None
url = result.xpath('.//td/div/a/@href')
url = url[0] if url else None
if url or title:
results.append({'url': url, 'title': title,})
return results

View file

@ -0,0 +1,41 @@
"""LimeTorrents
"""
from urllib.parse import urlencode
from lxml import html
from searx.utils import (
extract_text,
eval_xpath,
)
about = {
"website": 'https://limetorrent.net',
}
base_url = 'https://limetorrent.net'
def request(query, params):
query_params = {
'q': query,
}
params['url'] = f"{base_url}/search/?{urlencode(query_params)}"
return params
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in eval_xpath(dom, '//tbody/tr[@bgcolor="#F4F4F4"]'):
title = result.xpath('.//td/div')
title = extract_text(title[0]) if title else None
url = result.xpath('.//td/div/a/@href')
url = extract_text(url[0]) if url else None
if url or title:
results.append({'url': url, 'title': title, })
return results

View file

@ -0,0 +1,39 @@
from urllib.parse import urlencode
from lxml import html
from searx.utils import extract_text
about = {
"website": 'https://www.searchencrypt.com',
"results": 'HTML',
}
safesearch = True
base_url = 'https://www.searchencrypt.com/search'
def request(query, params):
query_params = {
'q': query,
}
params['url'] = f'{base_url}?{urlencode(query_params)}'
return params
def response(resp):
results = []
dom = html.fromstring(resp.text)
# Update XPath expressions based on provided HTML structure
for result in dom.xpath('//div[@class="serp__web-result__container"]'):
link = result.xpath('.//div/h3/a/@href')
link = link[0] if link else None
title = result.xpath('.//div/h3/a/span')
title = extract_text(title[0]) if title else None
content = result.xpath('.//div/p/a/span')
content = extract_text(content[0]) if content else 'None'
if link or title or content:
results.append({'url': link, 'title': title, 'content': content})
return results

72
searx/engines/sogou.py Normal file
View file

@ -0,0 +1,72 @@
from urllib.parse import urlencode
from lxml import html
from searx.utils import extract_text
about = {
"website": 'https://www.sogou.com/',
"results": 'HTML',
}
paging = True
base_url = 'https://www.sogou.com/web'
def request(query, params):
page = params.get('pageno', 1)
query_params = {
'query': query,
'page': page,
}
# Add the URL for the request
params['url'] = f'{base_url}?{urlencode(query_params)}'
# Custom headers for the request
headers = {
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'Origin': 'https://translate.sogou.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'application/json',
'Referer': 'https://translate.sogou.com/',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
# Merge with any existing headers in params
if 'headers' in params:
params['headers'].update(headers)
else:
params['headers'] = headers
return params
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in dom.xpath('//div[@class="vrwrap"]'):
# Extract link
link_divs = result.xpath('.//div[contains(@class, "r-sech") and (contains(@class, "click-better-sugg") or contains(@class, "result_list"))]')
link = link_divs[0].xpath('./@data-url')[0] if link_divs else None
# Extract title
title_elem = result.xpath('.//h3[@class="vr-title"]/a') or result.xpath('.//div/h3/a')
title = title_elem[0].text_content().strip() if title_elem else None
# Extract content from multiple possible elements
content_elem = result.xpath('.//div[@class="fz-mid space-txt"]')
content = content_elem[0].text_content().strip() if content_elem else 'None'
if link or title:
results.append({
'url': link,
'title': title,
'content': content,
})
return results

43
searx/engines/torlock.py Normal file
View file

@ -0,0 +1,43 @@
from lxml import html
from searx.utils import (
extract_text,
eval_xpath,
)
about = {
"website": 'https://www.torlock.com',
}
base_url = 'https://www.torlock.com'
paging = True
def request(query, params):
params['url'] = f"{base_url}/all/torrents/{query}/{params.get('pageno', 1)}.html"
return params
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in eval_xpath(dom, '(.//tr)'):
url_elem = result.xpath('.//div[@style="float:left"]/a/@href')
if url_elem:
url = url_elem[0]
if not (url.startswith("www") or url.startswith("http")):
url = f"{base_url}{url}"
else:
url = None
else:
url = None
title_elem = result.xpath('.//div[@style="float:left"]/a/b')
title = extract_text(title_elem[0]) if title_elem else None
if title and url:
results.append({
'url': url,
'title': title,
})
return results

View file

@ -0,0 +1,38 @@
from lxml import html
from searx.utils import (
extract_text,
eval_xpath,
)
about = {
"website": 'https://www.torrentdownloads.pro',
}
base_url = 'https://www.torrentdownloads.pro'
def request(query, params):
params['url'] = f"{base_url}/search/?search={query}"
return params
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in eval_xpath(dom, '(//div[@class="inner_container"])[2]/div[contains(@class, "grey_bar3")]'):
url_elem = result.xpath('.//p/a/@href')
url = url_elem[0] if url_elem else None
if url and not (url.startswith('www') or url.startswith('http')):
url = f"{base_url}{url}"
title_elem = result.xpath('.//p/a')
title = extract_text(title_elem[0]) if title_elem else None
if title and url:
results.append({
'url': url,
'title': title,
})
return results

View file

@ -326,6 +326,42 @@ engines:
categories: [ 'general', 'web' ]
disabled: true
- name: sogou
engine: sogou
shortcut: sogo
categories: [ 'general', 'web' ]
disabled: true
- name: searchencrypt
engine: searchencrypt
shortcut: srchen
categories: [ 'general', 'web' ]
disabled: true
- name: torlock
engine: torlock
categories: [ files, torrent ]
shortcut: trlck
disabled: true
- name: torrentdownloads
engine: torrentdownloads
categories: [ files, torrent ]
shortcut: trrnd
disabled: true
- name: limetorrent.net
engine: limetorrentnet
categories: [ files, torrent ]
shortcut: lmtn
disabled: true
- name: limetorrent.lol
engine: limetorrentlol
categories: [ files, torrent ]
shortcut: lmtl
disabled: true
- name: 9gag
engine: 9gag
shortcut: 9g