Compare commits

...

3 Commits

Author SHA1 Message Date
Alexandre Flament 3059a3d436
Merge 4398ce059f into 10d3af84b8 2024-11-18 09:11:30 +01:00
Markus Heiser 10d3af84b8 [fix] engine: duckduckgo - don't quote query string
The query string send to DDG must not be qouted.

The query string was URL-qouted in #4011, but the URL-qouted query string result
in unexpected *URL decoded* and other garbish results as reported in #4019
and #4020.  To test compare the results of a query like::

    !ddg Häuser und Straßen :de
    !ddg Häuser und Straßen :all
    !ddg 房屋和街道 :all
    !ddg 房屋和街道 :zh

Closed:

- [#4019] https://github.com/searxng/searxng/issues/4019
- [#4020] https://github.com/searxng/searxng/issues/4020

Related:

- [#4011] https://github.com/searxng/searxng/pull/4011

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
2024-11-17 18:14:22 +01:00
Alexandre Flament 4398ce059f Add baidu engine (experimental) 2022-07-26 10:52:35 +02:00
3 changed files with 182 additions and 3 deletions

169
searx/engines/baidu.py Normal file
View File

@ -0,0 +1,169 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Bing (Web)
- https://github.com/searx/searx/issues/2019#issuecomment-648227442
"""
import re
from urllib.parse import urlencode
from lxml import html
from searx.utils import eval_xpath, extract_text, eval_xpath_list, eval_xpath_getindex
from searx.network import raise_for_httperror, multi_requests, get, Request
from searx.exceptions import SearxEngineCaptchaException
about = {
"website": 'https://www.baidu.com',
"wikidata_id": 'Q14772',
"official_api_documentation": 'https://apis.baidu.com/',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
"language": 'zn',
}
# engine dependent config
categories = ['general', 'web']
paging = False
time_range_support = False
safesearch = False
base_url = 'https://www.baidu.com/'
search_string = 's?{query}'
skip_tpls = ('img_normal', 'short_video', 'yl_music_song', 'dict3', 'recommend_list')
desc_xpath_per_tpl = {
'se_com_default': './/span[contains(@class, "content-right_8Zs40")]',
'kaifa_pc_open_source_software': './/p[contains(@class, "c-color-text")]',
'bk_polysemy': './/div/@aria-label',
'se_st_single_video_zhanzhang': './/span[contains(@class, "c-span-last")]//p[2]',
}
def get_initial_parameters(params):
resp_index = get(base_url, headers=params['headers'], raise_for_httperror=True)
dom = html.fromstring(resp_index.text)
query_params = {}
for ielement in eval_xpath_list(dom, '//form[@id="form"]//input[@name]'):
name = ielement.attrib.get('name')
value = ielement.attrib.get('value')
query_params[name] = value
return query_params, resp_index.cookies
def request(query, params):
params['headers'].update(
{
'Accept-Language': 'en-US,en;q=0.5',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-User': '?1',
'Sec-GPC': '1',
'Upgrade-Insecure-Requests': '1',
'TE': 'trailers',
}
)
query_params, cookies = get_initial_parameters(params)
query_params['wd'] = query
params['url'] = base_url + search_string.format(query=urlencode(query_params))
params['cookies'] = cookies
params['raise_for_httperror'] = False
return params
def response(resp):
results = []
if resp.url.host == 'wappass.baidu.com' or resp.url.path.startswith('/static/captcha'):
raise SearxEngineCaptchaException()
raise_for_httperror(resp)
dom = html.fromstring(resp.text)
# follow redirect but don't use the result page to reduce the CAPTCHA issue
redirect_element = eval_xpath_getindex(dom, '//noscript/meta[@http-equiv="refresh"]/@content', 0, default=None)
if redirect_element and redirect_element.startswith('0; url='):
get(
base_url + redirect_element[8:],
headers=resp.search_params['headers'],
cookies=resp.search_params['cookies'],
)
for result in eval_xpath_list(dom, '//div[contains(@id,"content_left")]/div[contains(@class, "c-container")]'):
tpl = result.attrib.get('tpl')
if tpl in skip_tpls:
continue
if tpl == 'kaifa_pc_blog_weak':
# skip the result to kaifa.baidu.com (search engine for IT)
# but includes results from kaifa
for r2 in eval_xpath_list(result, './/div[contains(@class, "c-gap-bottom-small")]'):
title = extract_text(eval_xpath(r2, './/div[@class="c-row"]//a'))
url = extract_text(eval_xpath(r2, './/div[@class="c-row"]//a/@href'))
content = extract_text(eval_xpath(r2, '//span[@class="c-line-clamp2"]'))
results.append(
{
'url': url,
'title': title,
'content': content,
}
)
continue
# normal results
title = extract_text(eval_xpath(result, './/h3/a'))
url = extract_text(eval_xpath(result, './/h3/a/@href'))
if not title or not url:
continue
content = None
if tpl in desc_xpath_per_tpl:
# try the XPath for the Baidu template
content = extract_text(eval_xpath(result, desc_xpath_per_tpl[tpl]))
if not content:
# no content was found: try all the XPath from the Baidu templates
for xp in desc_xpath_per_tpl.values():
content = extract_text(eval_xpath(result, xp))
if content:
break
results.append(
{
'url': url,
'title': title,
'content': content,
}
)
# resolve the Baidu redirections
# note: Baidu does not support HTTP/2
request_list = [
Request.get(
u['url'].replace('http://www.baidu.com/link?url=', 'https://www.baidu.com/link?url='),
allow_redirects=False,
headers=resp.search_params['headers'],
)
for u in results
]
response_list = multi_requests(request_list)
for i, redirect_response in enumerate(response_list):
if not isinstance(redirect_response, Exception):
results[i]['url'] = redirect_response.headers['location']
return results
def debug_write_content_to_file(text):
RE_STYLE_ELEMENT = re.compile(r'<style[^>]*>[^<]+</style>')
RE_SCRIPT_ELEMENT = re.compile(r'<script[^>]*>[^<]+</script>')
RE_COMMENT_ELEMENT = re.compile(r'\<\!\-\-[^-]+\-\-\>')
with open('baidu.html', 'wt', encoding='utf-8') as f:
text = RE_STYLE_ELEMENT.sub("", text)
text = RE_SCRIPT_ELEMENT.sub("", text)
text = RE_COMMENT_ELEMENT.sub("", text)
text = "\n".join([ll.rstrip() for ll in text.splitlines() if ll.strip()])
f.write(text)

View File

@ -6,7 +6,7 @@ DuckDuckGo Lite
from typing import TYPE_CHECKING
import re
from urllib.parse import urlencode, quote_plus
from urllib.parse import urlencode
import json
import babel
import lxml.html
@ -263,7 +263,7 @@ def request(query, params):
params['url'] = url
params['method'] = 'POST'
params['data']['q'] = quote_plus(query)
params['data']['q'] = query
# The API is not documented, so we do some reverse engineering and emulate
# what https://html.duckduckgo.com/html does when you press "next Page" link
@ -381,7 +381,11 @@ def response(resp):
zero_click_info_xpath = '//div[@id="zero_click_abstract"]'
zero_click = extract_text(eval_xpath(doc, zero_click_info_xpath)).strip()
if zero_click and "Your IP address is" not in zero_click and "Your user agent:" not in zero_click:
if zero_click and (
"Your IP address is" not in zero_click
and "Your user agent:" not in zero_click
and "URL Decoded:" not in zero_click
):
current_query = resp.search_params["data"].get("q")
results.append(

View File

@ -422,6 +422,12 @@ engines:
shortcut: bi
disabled: true
- name: baidu
engine: baidu
shortcut: ba
timeout: 15
disabled: true
- name: bing images
engine: bing_images
shortcut: bii