searxng/searx/engines/dictzone.py

69 lines
1.6 KiB
Python
Raw Normal View History

2016-09-06 14:36:04 +00:00
"""
Dictzone
@website https://dictzone.com/
@provide-api no
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
"""
2016-09-06 09:47:27 +00:00
import re
from lxml import html
2016-09-06 14:43:48 +00:00
from searx.utils import is_valid_lang
2016-11-30 17:43:03 +00:00
from searx.url_utils import urljoin
2016-09-06 09:47:27 +00:00
categories = ['general']
2019-08-05 13:43:21 +00:00
url = u'https://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}'
2016-09-06 09:47:27 +00:00
weight = 100
2016-11-30 17:43:03 +00:00
parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) ([^ ]+)$', re.I)
2016-09-06 09:47:27 +00:00
results_xpath = './/table[@id="r"]/tr'
def request(query, params):
2016-11-30 17:43:03 +00:00
m = parser_re.match(query)
2016-09-06 09:47:27 +00:00
if not m:
return params
from_lang, to_lang, query = m.groups()
2016-09-06 12:12:46 +00:00
from_lang = is_valid_lang(from_lang)
to_lang = is_valid_lang(to_lang)
2016-09-06 09:47:27 +00:00
2016-09-06 12:12:46 +00:00
if not from_lang or not to_lang:
return params
2016-09-06 09:47:27 +00:00
2016-09-06 14:43:48 +00:00
params['url'] = url.format(from_lang=from_lang[2],
to_lang=to_lang[2],
query=query.decode('utf-8'))
2016-09-06 09:47:27 +00:00
return params
2016-09-06 09:47:27 +00:00
def response(resp):
results = []
dom = html.fromstring(resp.text)
2016-09-06 10:37:26 +00:00
for k, result in enumerate(dom.xpath(results_xpath)[1:]):
2016-09-06 09:47:27 +00:00
try:
from_result, to_results_raw = result.xpath('./td')
except:
continue
to_results = []
for to_result in to_results_raw.xpath('./p/a'):
t = to_result.text_content()
if t.strip():
to_results.append(to_result.text_content())
results.append({
2016-09-06 10:37:26 +00:00
'url': urljoin(resp.url, '?%d' % k),
'title': from_result.text_content(),
'content': '; '.join(to_results)
2016-09-06 09:47:27 +00:00
})
return results