searxng/searx/engines/bing.py

104 lines
2.9 KiB
Python
Raw Permalink Normal View History

"""
Bing (Web)
@website https://www.bing.com
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
max. 5000 query/month
@using-api no (because of query limit)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
@todo publishedDate
"""
2014-09-01 12:38:59 +00:00
2014-02-05 19:24:31 +00:00
from lxml import html
2015-01-25 19:14:37 +00:00
from searx.engines.xpath import extract_text
2016-11-30 17:43:03 +00:00
from searx.url_utils import urlencode
from searx.utils import match_language, gen_useragent
2013-10-24 21:52:57 +00:00
2014-09-01 12:38:59 +00:00
# engine dependent config
categories = ['general']
2014-01-29 20:14:38 +00:00
paging = True
2014-01-31 03:35:23 +00:00
language_support = True
supported_languages_url = 'https://www.bing.com/account/general'
language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'}
2014-01-29 20:14:38 +00:00
2014-09-01 12:38:59 +00:00
# search-url
base_url = 'https://www.bing.com/'
search_string = 'search?{query}&first={offset}'
2013-10-24 21:52:57 +00:00
2014-09-02 15:13:44 +00:00
2014-09-01 12:38:59 +00:00
# do search-request
2013-10-24 21:52:57 +00:00
def request(query, params):
2014-01-29 20:14:38 +00:00
offset = (params['pageno'] - 1) * 10 + 1
2014-09-01 12:38:59 +00:00
lang = match_language(params['language'], supported_languages, language_aliases)
query = u'language:{} {}'.format(lang.split('-')[0].upper(), query.decode('utf-8')).encode('utf-8')
2014-09-01 12:38:59 +00:00
search_path = search_string.format(
2016-11-14 14:52:29 +00:00
query=urlencode({'q': query}),
2014-01-29 20:14:38 +00:00
offset=offset)
2014-01-31 03:35:23 +00:00
2013-10-24 21:52:57 +00:00
params['url'] = base_url + search_path
params['headers']['User-Agent'] = gen_useragent('Windows NT 6.3; WOW64')
2013-10-24 21:52:57 +00:00
return params
2014-09-01 12:38:59 +00:00
# get response from search-request
2013-10-24 21:52:57 +00:00
def response(resp):
results = []
2014-09-01 12:38:59 +00:00
dom = html.fromstring(resp.text)
2014-09-01 12:38:59 +00:00
2016-06-27 22:06:50 +00:00
try:
results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0]
.split()[0].replace(',', ''))})
except:
pass
2014-09-01 12:38:59 +00:00
# parse results
2013-10-24 21:52:57 +00:00
for result in dom.xpath('//div[@class="sa_cc"]'):
link = result.xpath('.//h3/a')[0]
url = link.attrib.get('href')
2015-01-25 19:14:37 +00:00
title = extract_text(link)
content = extract_text(result.xpath('.//p'))
2013-10-24 23:37:48 +00:00
2014-09-01 12:38:59 +00:00
# append result
results.append({'url': url,
'title': title,
2014-09-01 12:38:59 +00:00
'content': content})
# parse results again if nothing is found yet
2013-10-24 23:37:48 +00:00
for result in dom.xpath('//li[@class="b_algo"]'):
link = result.xpath('.//h2/a')[0]
url = link.attrib.get('href')
2015-01-25 19:14:37 +00:00
title = extract_text(link)
content = extract_text(result.xpath('.//p'))
2014-09-01 12:38:59 +00:00
# append result
results.append({'url': url,
'title': title,
2014-09-01 12:38:59 +00:00
'content': content})
# return results
2013-10-24 21:52:57 +00:00
return results
# get supported languages from their site
def _fetch_supported_languages(resp):
supported_languages = []
dom = html.fromstring(resp.text)
options = dom.xpath('//div[@id="limit-languages"]//input')
for option in options:
code = option.xpath('./@id')[0].replace('_', '-')
if code == 'nb':
code = 'no'
supported_languages.append(code)
return supported_languages