searxng/searx/engines/dictzone.py

85 lines
2.0 KiB
Python
Raw Normal View History

2016-09-06 14:36:04 +00:00
"""
Dictzone
@website https://dictzone.com/
@provide-api no
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
"""
2016-09-06 09:47:27 +00:00
import re
2016-09-06 10:37:26 +00:00
from urlparse import urljoin
2016-09-06 09:47:27 +00:00
from lxml import html
2016-09-06 12:12:46 +00:00
from cgi import escape
2016-09-06 09:47:27 +00:00
from searx.engines.xpath import extract_text
from searx.languages import language_codes
categories = ['general']
2016-09-06 09:47:27 +00:00
url = 'http://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}'
weight = 100
parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) ([^ ]+)$', re.I)
2016-09-06 09:47:27 +00:00
results_xpath = './/table[@id="r"]/tr'
2016-09-06 12:12:46 +00:00
def is_valid_lang(lang):
is_abbr = (len(lang) == 2)
if is_abbr:
for l in language_codes:
if l[0][:2] == lang.lower():
return (True, l[1].lower())
return False
else:
for l in language_codes:
if l[1].lower() == lang.lower():
return (True, l[1].lower())
return False
2016-09-06 09:47:27 +00:00
def request(query, params):
m = parser_re.match(unicode(query, 'utf8'))
if not m:
return params
from_lang, to_lang, query = m.groups()
2016-09-06 12:12:46 +00:00
from_lang = is_valid_lang(from_lang)
to_lang = is_valid_lang(to_lang)
2016-09-06 09:47:27 +00:00
2016-09-06 12:12:46 +00:00
if not from_lang or not to_lang:
return params
2016-09-06 09:47:27 +00:00
params['url'] = url.format(from_lang=from_lang[1],
to_lang=to_lang[1],
query=query)
2016-09-06 09:47:27 +00:00
return params
2016-09-06 09:47:27 +00:00
def response(resp):
results = []
dom = html.fromstring(resp.text)
2016-09-06 10:37:26 +00:00
for k, result in enumerate(dom.xpath(results_xpath)[1:]):
2016-09-06 09:47:27 +00:00
try:
from_result, to_results_raw = result.xpath('./td')
except:
continue
to_results = []
for to_result in to_results_raw.xpath('./p/a'):
t = to_result.text_content()
if t.strip():
to_results.append(to_result.text_content())
results.append({
2016-09-06 10:37:26 +00:00
'url': urljoin(resp.url, '?%d' % k),
2016-09-06 12:12:46 +00:00
'title': escape(from_result.text_content()),
'content': escape('; '.join(to_results))
2016-09-06 09:47:27 +00:00
})
return results