2013-10-14 21:09:13 +00:00
|
|
|
|
2013-10-16 22:32:32 +00:00
|
|
|
'''
|
|
|
|
searx is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU Affero General Public License as published by
|
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
searx is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU Affero General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Affero General Public License
|
|
|
|
along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
|
|
|
|
|
|
|
(C) 2013- by Adam Tauber, <asciimoo@gmail.com>
|
|
|
|
'''
|
|
|
|
|
2013-10-14 21:09:13 +00:00
|
|
|
from os.path import realpath, dirname, splitext, join
|
|
|
|
from os import listdir
|
|
|
|
from imp import load_source
|
2013-10-15 16:19:06 +00:00
|
|
|
import grequests
|
2013-10-15 21:20:26 +00:00
|
|
|
from itertools import izip_longest, chain
|
2013-10-16 21:03:26 +00:00
|
|
|
from operator import itemgetter
|
2013-10-19 15:36:44 +00:00
|
|
|
from urlparse import urlparse
|
2013-10-19 17:01:06 +00:00
|
|
|
from searx import settings
|
2013-10-14 21:09:13 +00:00
|
|
|
|
|
|
|
engine_dir = dirname(realpath(__file__))
|
|
|
|
|
2013-10-15 17:11:43 +00:00
|
|
|
engines = {}
|
2013-10-14 21:09:13 +00:00
|
|
|
|
2013-10-17 19:06:28 +00:00
|
|
|
categories = {'general': []}
|
|
|
|
|
2013-10-14 21:09:13 +00:00
|
|
|
for filename in listdir(engine_dir):
|
|
|
|
if filename.startswith('_') or not filename.endswith('.py'):
|
|
|
|
continue
|
2013-10-19 17:01:06 +00:00
|
|
|
modname = splitext(filename)[0]
|
|
|
|
if modname in settings.blacklist:
|
|
|
|
continue
|
2013-10-14 21:09:13 +00:00
|
|
|
filepath = join(engine_dir, filename)
|
2013-10-15 16:19:06 +00:00
|
|
|
engine = load_source(modname, filepath)
|
2013-10-17 19:06:28 +00:00
|
|
|
engine.name = modname
|
2013-10-15 16:19:06 +00:00
|
|
|
if not hasattr(engine, 'request') or not hasattr(engine, 'response'):
|
|
|
|
continue
|
2013-10-15 17:11:43 +00:00
|
|
|
engines[modname] = engine
|
2013-10-17 19:06:28 +00:00
|
|
|
if not hasattr(engine, 'categories'):
|
|
|
|
categories['general'].append(engine)
|
|
|
|
else:
|
|
|
|
for category_name in engine.categories:
|
|
|
|
categories.setdefault(category_name, []).append(engine)
|
2013-10-15 16:19:06 +00:00
|
|
|
|
|
|
|
def default_request_params():
|
2013-10-19 20:34:46 +00:00
|
|
|
return {'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
|
2013-10-15 16:19:06 +00:00
|
|
|
|
2013-10-15 17:11:43 +00:00
|
|
|
def make_callback(engine_name, results, callback):
|
2013-10-15 16:19:06 +00:00
|
|
|
def process_callback(response, **kwargs):
|
2013-10-15 21:20:26 +00:00
|
|
|
cb_res = []
|
2013-10-15 17:11:43 +00:00
|
|
|
for result in callback(response):
|
|
|
|
result['engine'] = engine_name
|
2013-10-15 21:20:26 +00:00
|
|
|
cb_res.append(result)
|
|
|
|
results[engine_name] = cb_res
|
2013-10-15 16:19:06 +00:00
|
|
|
return process_callback
|
|
|
|
|
2013-10-15 20:18:08 +00:00
|
|
|
def search(query, request, selected_engines):
|
2013-10-15 16:19:06 +00:00
|
|
|
global engines
|
|
|
|
requests = []
|
2013-10-15 21:20:26 +00:00
|
|
|
results = {}
|
2013-10-15 16:19:06 +00:00
|
|
|
user_agent = request.headers.get('User-Agent', '')
|
2013-10-15 17:11:43 +00:00
|
|
|
for ename, engine in engines.items():
|
2013-10-15 20:18:08 +00:00
|
|
|
if ename not in selected_engines:
|
|
|
|
continue
|
2013-10-20 18:20:10 +00:00
|
|
|
request_params = default_request_params()
|
|
|
|
request_params['headers']['User-Agent'] = user_agent
|
|
|
|
request_params = engine.request(query, request_params)
|
2013-10-15 17:11:43 +00:00
|
|
|
callback = make_callback(ename, results, engine.response)
|
2013-10-15 16:19:06 +00:00
|
|
|
if request_params['method'] == 'GET':
|
|
|
|
req = grequests.get(request_params['url']
|
2013-10-20 18:20:10 +00:00
|
|
|
,headers=request_params['headers']
|
2013-10-15 16:19:06 +00:00
|
|
|
,hooks=dict(response=callback)
|
2013-10-19 20:34:46 +00:00
|
|
|
,cookies = request_params['cookies']
|
2013-10-15 16:19:06 +00:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
req = grequests.post(request_params['url']
|
|
|
|
,data=request_params['data']
|
2013-10-20 18:20:10 +00:00
|
|
|
,headers=request_params['headers']
|
2013-10-15 16:19:06 +00:00
|
|
|
,hooks=dict(response=callback)
|
2013-10-19 20:34:46 +00:00
|
|
|
,cookies = request_params['cookies']
|
2013-10-15 16:19:06 +00:00
|
|
|
)
|
|
|
|
requests.append(req)
|
|
|
|
grequests.map(requests)
|
2013-10-20 18:47:00 +00:00
|
|
|
flat_res = filter(None, chain.from_iterable(izip_longest(*results.values())))
|
2013-10-16 21:03:26 +00:00
|
|
|
flat_len = len(flat_res)
|
|
|
|
results = []
|
|
|
|
# deduplication + scoring
|
|
|
|
for i,res in enumerate(flat_res):
|
2013-10-19 15:36:44 +00:00
|
|
|
res['parsed_url'] = urlparse(res['url'])
|
2013-10-19 17:04:46 +00:00
|
|
|
score = (flat_len - i)*settings.weights.get(res['engine'], 1)
|
2013-10-16 21:03:26 +00:00
|
|
|
duplicated = False
|
|
|
|
for new_res in results:
|
2013-10-19 15:36:44 +00:00
|
|
|
if res['parsed_url'].netloc == new_res['parsed_url'].netloc and\
|
2013-10-19 18:11:56 +00:00
|
|
|
res['parsed_url'].path == new_res['parsed_url'].path and\
|
2013-10-19 18:44:35 +00:00
|
|
|
res['parsed_url'].query == new_res['parsed_url'].query:
|
2013-10-16 21:03:26 +00:00
|
|
|
duplicated = new_res
|
|
|
|
break
|
|
|
|
if duplicated:
|
2013-10-18 06:24:58 +00:00
|
|
|
if len(res.get('content', '')) > len(duplicated.get('content', '')):
|
2013-10-16 21:03:26 +00:00
|
|
|
duplicated['content'] = res['content']
|
|
|
|
duplicated['score'] += score
|
2013-10-19 16:28:48 +00:00
|
|
|
duplicated['engine'] += ', '+res['engine']
|
2013-10-19 15:36:44 +00:00
|
|
|
if duplicated['parsed_url'].scheme == 'https':
|
|
|
|
continue
|
|
|
|
elif res['parsed_url'].scheme == 'https':
|
|
|
|
duplicated['parsed_url'].scheme == 'https'
|
|
|
|
duplicated['url'] = duplicated['parsed_url'].geturl()
|
2013-10-16 21:03:26 +00:00
|
|
|
else:
|
|
|
|
res['score'] = score
|
|
|
|
results.append(res)
|
|
|
|
|
|
|
|
return sorted(results, key=itemgetter('score'), reverse=True)
|