libremiami-search/searx/search.py

442 lines
16 KiB
Python
Raw Normal View History

'''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2013- by Adam Tauber, <asciimoo@gmail.com>
'''
2014-07-07 13:59:27 +02:00
import grequests
import re
2014-07-07 13:59:27 +02:00
from itertools import izip_longest, chain
from datetime import datetime
from operator import itemgetter
from urlparse import urlparse, unquote
from searx.engines import (
categories, engines, engine_shortcuts
)
from searx.languages import language_codes
2014-07-07 13:59:27 +02:00
from searx.utils import gen_useragent
2014-07-07 13:59:27 +02:00
number_of_searches = 0
# get default reqest parameter
2014-07-07 13:59:27 +02:00
def default_request_params():
return {
'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
# create a callback wrapper for the search engine results
2014-07-07 13:59:27 +02:00
def make_callback(engine_name, results, suggestions, callback, params):
2014-07-07 13:59:27 +02:00
# creating a callback wrapper for the search engine results
def process_callback(response, **kwargs):
cb_res = []
response.search_params = params
# update stats with current page-load-time
2014-07-07 13:59:27 +02:00
engines[engine_name].stats['page_load_time'] += \
(datetime.now() - params['started']).total_seconds()
2014-07-07 13:59:27 +02:00
try:
search_results = callback(response)
except Exception, e:
# increase errors stats
2014-07-07 13:59:27 +02:00
engines[engine_name].stats['errors'] += 1
results[engine_name] = cb_res
# print engine name and specific error message
2014-07-07 13:59:27 +02:00
print '[E] Error with engine "{0}":\n\t{1}'.format(
engine_name, str(e))
return
2014-07-07 13:59:27 +02:00
for result in search_results:
result['engine'] = engine_name
# if it is a suggestion, add it to list of suggestions
2014-07-07 13:59:27 +02:00
if 'suggestion' in result:
# TODO type checks
suggestions.add(result['suggestion'])
continue
# append result
2014-07-07 13:59:27 +02:00
cb_res.append(result)
2014-07-07 13:59:27 +02:00
results[engine_name] = cb_res
2014-07-07 13:59:27 +02:00
return process_callback
# return the meaningful length of the content for a result
def content_result_len(result):
if isinstance(result.get('content'), basestring):
content = re.sub('[,;:!?\./\\\\ ()-_]', '', result.get('content'))
return len(content)
else:
return 0
2014-07-07 13:59:27 +02:00
# score results and remove duplications
2014-07-07 13:59:27 +02:00
def score_results(results):
# calculate scoring parameters
2014-07-07 13:59:27 +02:00
flat_res = filter(
None, chain.from_iterable(izip_longest(*results.values())))
flat_len = len(flat_res)
engines_len = len(results)
2014-07-07 13:59:27 +02:00
results = []
# pass 1: deduplication + scoring
2014-07-07 13:59:27 +02:00
for i, res in enumerate(flat_res):
res['parsed_url'] = urlparse(res['url'])
res['host'] = res['parsed_url'].netloc
if res['host'].startswith('www.'):
res['host'] = res['host'].replace('www.', '', 1)
res['engines'] = [res['engine']]
2014-09-22 23:39:21 +02:00
2014-07-07 13:59:27 +02:00
weight = 1.0
2014-09-22 23:39:21 +02:00
# strip multiple spaces and cariage returns from content
if 'content' in res:
res['content'] = re.sub(' +', ' ', res['content'].strip().replace('\n', ''))
# get weight of this engine if possible
2014-07-07 13:59:27 +02:00
if hasattr(engines[res['engine']], 'weight'):
weight = float(engines[res['engine']].weight)
# calculate score for that engine
2014-07-07 13:59:27 +02:00
score = int((flat_len - i) / engines_len) * weight + 1
# check for duplicates
2014-09-22 23:39:21 +02:00
duplicated = False
2014-07-07 13:59:27 +02:00
for new_res in results:
# remove / from the end of the url if required
2014-07-07 13:59:27 +02:00
p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path # noqa
p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path # noqa
# check if that result is a duplicate
2014-07-07 13:59:27 +02:00
if res['host'] == new_res['host'] and\
unquote(p1) == unquote(p2) and\
res['parsed_url'].query == new_res['parsed_url'].query and\
res.get('template') == new_res.get('template'):
duplicated = new_res
break
# merge duplicates together
2014-07-07 13:59:27 +02:00
if duplicated:
# using content with more text
if content_result_len(res) > content_result_len(duplicated):
2014-07-07 13:59:27 +02:00
duplicated['content'] = res['content']
# increase result-score
2014-07-07 13:59:27 +02:00
duplicated['score'] += score
# add engine to list of result-engines
2014-07-07 13:59:27 +02:00
duplicated['engines'].append(res['engine'])
# using https if possible
2014-07-07 13:59:27 +02:00
if duplicated['parsed_url'].scheme == 'https':
continue
elif res['parsed_url'].scheme == 'https':
duplicated['url'] = res['parsed_url'].geturl()
duplicated['parsed_url'] = res['parsed_url']
# if there is no duplicate found, append result
2014-07-07 13:59:27 +02:00
else:
res['score'] = score
results.append(res)
results = sorted(results, key=itemgetter('score'), reverse=True)
# pass 2 : group results by category and template
gresults = []
categoryPositions = {}
for i, res in enumerate(results):
# FIXME : handle more than one category per engine
category = engines[res['engine']].categories[0] + ':' + '' if 'template' not in res else res['template']
current = None if category not in categoryPositions else categoryPositions[category]
# group with previous results using the same category if the group can accept more result and is not too far from the current position
if current != None and (current['count'] > 0) and (len(gresults) - current['index'] < 20):
# group with the previous results using the same category with this one
index = current['index']
gresults.insert(index, res)
# update every index after the current one (including the current one)
for k in categoryPositions:
v = categoryPositions[k]['index']
if v >= index:
categoryPositions[k]['index'] = v+1
# update this category
current['count'] -= 1
else:
# same category
gresults.append(res)
# update categoryIndex
categoryPositions[category] = { 'index' : len(gresults), 'count' : 8 }
# return gresults
return gresults
class Search(object):
"""Search information container"""
def __init__(self, request):
# init vars
super(Search, self).__init__()
self.query = None
self.engines = []
self.categories = []
self.paging = False
self.pageno = 1
self.lang = 'all'
# set blocked engines
if request.cookies.get('blocked_engines'):
2014-02-07 02:45:12 +01:00
self.blocked_engines = request.cookies['blocked_engines'].split(',') # noqa
else:
self.blocked_engines = []
self.results = []
self.suggestions = []
self.request_data = {}
# set specific language if set
if request.cookies.get('language')\
and request.cookies['language'] in (x[0] for x in language_codes):
self.lang = request.cookies['language']
# set request method
if request.method == 'POST':
self.request_data = request.form
else:
self.request_data = request.args
# TODO better exceptions
if not self.request_data.get('q'):
raise Exception('noquery')
# set query
self.query = self.request_data['q']
# set pagenumber
pageno_param = self.request_data.get('pageno', '1')
if not pageno_param.isdigit() or int(pageno_param) < 1:
raise Exception('wrong pagenumber')
self.pageno = int(pageno_param)
# parse query, if tags are set, which change the serch engine or search-language
2014-02-09 01:07:18 +01:00
self.parse_query()
self.categories = []
# if engines are calculated from query, set categories by using that informations
2014-02-11 13:13:51 +01:00
if self.engines:
self.categories = list(set(engine['category']
2014-02-07 02:45:12 +01:00
for engine in self.engines))
# otherwise, using defined categories to calculate which engines should be used
else:
# set used categories
for pd_name, pd in self.request_data.items():
if pd_name.startswith('category_'):
category = pd_name[9:]
# if category is not found in list, skip
if not category in categories:
continue
# add category to list
self.categories.append(category)
# if no category is specified for this search, using user-defined default-configuration which (is stored in cookie)
2014-02-11 13:13:51 +01:00
if not self.categories:
cookie_categories = request.cookies.get('categories', '')
cookie_categories = cookie_categories.split(',')
for ccateg in cookie_categories:
if ccateg in categories:
self.categories.append(ccateg)
# if still no category is specified, using general as default-category
2014-02-11 13:13:51 +01:00
if not self.categories:
self.categories = ['general']
# using all engines for that search, which are declared under the specific categories
for categ in self.categories:
self.engines.extend({'category': categ,
'name': x.name}
for x in categories[categ]
if not x.name in self.blocked_engines)
2014-02-09 01:07:18 +01:00
# parse query, if tags are set, which change the serch engine or search-language
2014-02-09 01:07:18 +01:00
def parse_query(self):
query_parts = self.query.split()
modified = False
# check if language-prefix is set
2014-02-09 01:22:30 +01:00
if query_parts[0].startswith(':'):
2014-02-09 11:35:52 +01:00
lang = query_parts[0][1:].lower()
# check if any language-code is equal with declared language-codes
for lc in language_codes:
lang_id, lang_name, country = map(str.lower, lc)
# if correct language-code is found, set it as new search-language
if lang == lang_id\
or lang_id.startswith(lang)\
or lang == lang_name\
or lang == country:
self.lang = lang
modified = True
break
# check if category/engine prefix is set
2014-02-09 01:22:30 +01:00
elif query_parts[0].startswith('!'):
2014-02-09 01:07:18 +01:00
prefix = query_parts[0][1:].replace('_', ' ')
# check if prefix is equal with engine shortcut
2014-02-09 01:07:18 +01:00
if prefix in engine_shortcuts\
and not engine_shortcuts[prefix] in self.blocked_engines:
modified = True
self.engines.append({'category': 'none',
'name': engine_shortcuts[prefix]})
# check if prefix is equal with engine name
2014-02-09 01:07:18 +01:00
elif prefix in engines\
and not prefix in self.blocked_engines:
modified = True
self.engines.append({'category': 'none',
'name': prefix})
# check if prefix is equal with categorie name
2014-02-09 01:07:18 +01:00
elif prefix in categories:
modified = True
# using all engines for that search, which are declared under that categorie name
2014-02-09 01:07:18 +01:00
self.engines.extend({'category': prefix,
'name': engine.name}
for engine in categories[prefix]
if not engine in self.blocked_engines)
# if language, category or engine were specificed in this query, search for more tags which does the same
2014-02-09 01:07:18 +01:00
if modified:
self.query = self.query.replace(query_parts[0], '', 1).strip()
2014-02-09 01:23:01 +01:00
self.parse_query()
2014-07-07 13:59:27 +02:00
# do search-request
2014-07-07 13:59:27 +02:00
def search(self, request):
global number_of_searches
# init vars
2014-07-07 13:59:27 +02:00
requests = []
results = {}
suggestions = set()
# increase number of searches
2014-07-07 13:59:27 +02:00
number_of_searches += 1
# set default useragent
2014-07-07 13:59:27 +02:00
#user_agent = request.headers.get('User-Agent', '')
user_agent = gen_useragent()
# start search-reqest for all selected engines
2014-07-07 13:59:27 +02:00
for selected_engine in self.engines:
if selected_engine['name'] not in engines:
continue
engine = engines[selected_engine['name']]
# if paging is not supported, skip
2014-07-07 13:59:27 +02:00
if self.pageno > 1 and not engine.paging:
continue
# if search-language is set and engine does not provide language-support, skip
2014-07-07 13:59:27 +02:00
if self.lang != 'all' and not engine.language_support:
continue
# set default request parameters
2014-07-07 13:59:27 +02:00
request_params = default_request_params()
request_params['headers']['User-Agent'] = user_agent
request_params['category'] = selected_engine['category']
request_params['started'] = datetime.now()
request_params['pageno'] = self.pageno
request_params['language'] = self.lang
# update request parameters dependent on search-engine (contained in engines folder)
2014-07-07 13:59:27 +02:00
request_params = engine.request(self.query.encode('utf-8'),
request_params)
if request_params['url'] is None:
# TODO add support of offline engines
pass
# create a callback wrapper for the search engine results
2014-07-07 13:59:27 +02:00
callback = make_callback(
selected_engine['name'],
results,
suggestions,
engine.response,
request_params
)
# create dictionary which contain all informations about the request
2014-07-07 13:59:27 +02:00
request_args = dict(
headers=request_params['headers'],
hooks=dict(response=callback),
cookies=request_params['cookies'],
timeout=engine.timeout
)
# specific type of request (GET or POST)
2014-07-07 13:59:27 +02:00
if request_params['method'] == 'GET':
req = grequests.get
else:
req = grequests.post
request_args['data'] = request_params['data']
# ignoring empty urls
if not request_params['url']:
continue
# append request to list
2014-07-07 13:59:27 +02:00
requests.append(req(request_params['url'], **request_args))
# send all search-request
2014-07-07 13:59:27 +02:00
grequests.map(requests)
# update engine-specific stats
2014-07-07 13:59:27 +02:00
for engine_name, engine_results in results.items():
engines[engine_name].stats['search_count'] += 1
engines[engine_name].stats['result_count'] += len(engine_results)
# score results and remove duplications
2014-07-07 13:59:27 +02:00
results = score_results(results)
# update engine stats, using calculated score
2014-07-07 13:59:27 +02:00
for result in results:
for res_engine in result['engines']:
engines[result['engine']]\
.stats['score_count'] += result['score']
# return results and suggestions
2014-07-07 13:59:27 +02:00
return results, suggestions