2014-10-19 10:41:04 +00:00
|
|
|
# import htmlentitydefs
|
2015-01-11 12:26:40 +00:00
|
|
|
import locale
|
|
|
|
import dateutil.parser
|
|
|
|
import cStringIO
|
|
|
|
import csv
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
|
2014-01-20 01:31:20 +00:00
|
|
|
from codecs import getincrementalencoder
|
2014-04-24 23:46:40 +00:00
|
|
|
from HTMLParser import HTMLParser
|
|
|
|
from random import choice
|
|
|
|
|
2014-11-18 10:37:42 +00:00
|
|
|
from searx.version import VERSION_STRING
|
|
|
|
from searx import settings
|
2015-01-11 12:26:40 +00:00
|
|
|
from searx import logger
|
2014-11-18 10:37:42 +00:00
|
|
|
|
2015-01-11 12:26:40 +00:00
|
|
|
|
|
|
|
logger = logger.getChild('utils')
|
2014-01-10 22:38:08 +00:00
|
|
|
|
2015-05-02 10:35:57 +00:00
|
|
|
ua_versions = ('33.0',
|
2015-01-27 18:58:39 +00:00
|
|
|
'34.0',
|
2015-05-02 10:35:57 +00:00
|
|
|
'35.0',
|
|
|
|
'36.0',
|
|
|
|
'37.0')
|
2014-11-22 17:37:42 +00:00
|
|
|
|
2014-05-20 14:55:49 +00:00
|
|
|
ua_os = ('Windows NT 6.3; WOW64',
|
|
|
|
'X11; Linux x86_64',
|
|
|
|
'X11; Linux x86')
|
2015-08-02 17:38:27 +00:00
|
|
|
|
2015-05-02 10:35:57 +00:00
|
|
|
ua = "Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}"
|
2014-01-19 21:59:01 +00:00
|
|
|
|
2015-01-01 13:13:56 +00:00
|
|
|
blocked_tags = ('script',
|
|
|
|
'style')
|
|
|
|
|
2014-03-04 18:26:09 +00:00
|
|
|
|
2014-01-18 20:53:59 +00:00
|
|
|
def gen_useragent():
|
2014-01-12 19:13:14 +00:00
|
|
|
# TODO
|
2014-03-04 13:20:37 +00:00
|
|
|
return ua.format(os=choice(ua_os), version=choice(ua_versions))
|
2014-01-12 19:13:14 +00:00
|
|
|
|
2014-01-19 21:59:01 +00:00
|
|
|
|
2014-10-17 10:34:51 +00:00
|
|
|
def searx_useragent():
|
2014-12-29 20:31:04 +00:00
|
|
|
return 'searx/{searx_version} {suffix}'.format(
|
|
|
|
searx_version=VERSION_STRING,
|
2015-08-02 17:38:27 +00:00
|
|
|
suffix=settings['outgoing'].get('useragent_suffix', ''))
|
2014-10-19 10:41:04 +00:00
|
|
|
|
|
|
|
|
2014-01-10 22:38:08 +00:00
|
|
|
def highlight_content(content, query):
|
|
|
|
|
|
|
|
if not content:
|
|
|
|
return None
|
|
|
|
# ignoring html contents
|
|
|
|
# TODO better html content detection
|
|
|
|
if content.find('<') != -1:
|
|
|
|
return content
|
|
|
|
|
|
|
|
query = query.decode('utf-8')
|
|
|
|
if content.lower().find(query.lower()) > -1:
|
|
|
|
query_regex = u'({0})'.format(re.escape(query))
|
2014-05-16 14:51:23 +00:00
|
|
|
content = re.sub(query_regex, '<span class="highlight">\\1</span>',
|
|
|
|
content, flags=re.I | re.U)
|
2014-01-10 22:38:08 +00:00
|
|
|
else:
|
|
|
|
regex_parts = []
|
|
|
|
for chunk in query.split():
|
|
|
|
if len(chunk) == 1:
|
|
|
|
regex_parts.append(u'\W+{0}\W+'.format(re.escape(chunk)))
|
|
|
|
else:
|
|
|
|
regex_parts.append(u'{0}'.format(re.escape(chunk)))
|
|
|
|
query_regex = u'({0})'.format('|'.join(regex_parts))
|
2014-05-16 14:51:23 +00:00
|
|
|
content = re.sub(query_regex, '<span class="highlight">\\1</span>',
|
|
|
|
content, flags=re.I | re.U)
|
2014-01-10 22:38:08 +00:00
|
|
|
|
|
|
|
return content
|
2013-11-08 22:44:26 +00:00
|
|
|
|
2014-01-19 21:59:01 +00:00
|
|
|
|
2013-11-08 22:44:26 +00:00
|
|
|
class HTMLTextExtractor(HTMLParser):
|
|
|
|
def __init__(self):
|
|
|
|
HTMLParser.__init__(self)
|
2014-01-19 21:59:01 +00:00
|
|
|
self.result = []
|
2015-01-01 13:13:56 +00:00
|
|
|
self.tags = []
|
|
|
|
|
|
|
|
def handle_starttag(self, tag, attrs):
|
|
|
|
self.tags.append(tag)
|
|
|
|
|
|
|
|
def handle_endtag(self, tag):
|
2015-01-22 16:43:45 +00:00
|
|
|
if not self.tags:
|
|
|
|
return
|
|
|
|
|
2015-01-01 13:13:56 +00:00
|
|
|
if tag != self.tags[-1]:
|
|
|
|
raise Exception("invalid html")
|
2015-01-22 16:43:45 +00:00
|
|
|
|
2015-01-01 13:13:56 +00:00
|
|
|
self.tags.pop()
|
|
|
|
|
|
|
|
def is_valid_tag(self):
|
|
|
|
return not self.tags or self.tags[-1] not in blocked_tags
|
2013-11-08 22:44:26 +00:00
|
|
|
|
|
|
|
def handle_data(self, d):
|
2015-01-01 13:13:56 +00:00
|
|
|
if not self.is_valid_tag():
|
|
|
|
return
|
2013-11-08 22:44:26 +00:00
|
|
|
self.result.append(d)
|
|
|
|
|
|
|
|
def handle_charref(self, number):
|
2015-01-01 13:13:56 +00:00
|
|
|
if not self.is_valid_tag():
|
|
|
|
return
|
2014-01-20 01:31:20 +00:00
|
|
|
if number[0] in (u'x', u'X'):
|
|
|
|
codepoint = int(number[1:], 16)
|
|
|
|
else:
|
|
|
|
codepoint = int(number)
|
2013-11-08 22:44:26 +00:00
|
|
|
self.result.append(unichr(codepoint))
|
|
|
|
|
|
|
|
def handle_entityref(self, name):
|
2015-01-01 13:13:56 +00:00
|
|
|
if not self.is_valid_tag():
|
|
|
|
return
|
2014-10-19 10:41:04 +00:00
|
|
|
# codepoint = htmlentitydefs.name2codepoint[name]
|
|
|
|
# self.result.append(unichr(codepoint))
|
2013-11-18 15:47:20 +00:00
|
|
|
self.result.append(name)
|
2013-11-08 22:44:26 +00:00
|
|
|
|
|
|
|
def get_text(self):
|
2015-01-27 19:03:33 +00:00
|
|
|
return u''.join(self.result).strip()
|
2013-11-08 22:44:26 +00:00
|
|
|
|
2014-01-19 21:59:01 +00:00
|
|
|
|
2013-11-08 22:44:26 +00:00
|
|
|
def html_to_text(html):
|
2015-01-30 20:00:49 +00:00
|
|
|
html = html.replace('\n', ' ')
|
|
|
|
html = ' '.join(html.split())
|
2013-11-08 22:44:26 +00:00
|
|
|
s = HTMLTextExtractor()
|
|
|
|
s.feed(html)
|
|
|
|
return s.get_text()
|
2013-11-15 17:55:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
class UnicodeWriter:
|
|
|
|
"""
|
|
|
|
A CSV writer which will write rows to CSV file "f",
|
|
|
|
which is encoded in the given encoding.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
|
|
|
|
# Redirect output to a queue
|
|
|
|
self.queue = cStringIO.StringIO()
|
|
|
|
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
|
|
|
|
self.stream = f
|
2014-01-20 01:31:20 +00:00
|
|
|
self.encoder = getincrementalencoder(encoding)()
|
2013-11-15 17:55:18 +00:00
|
|
|
|
|
|
|
def writerow(self, row):
|
2014-01-20 01:31:20 +00:00
|
|
|
unicode_row = []
|
|
|
|
for col in row:
|
|
|
|
if type(col) == str or type(col) == unicode:
|
|
|
|
unicode_row.append(col.encode('utf-8').strip())
|
|
|
|
else:
|
|
|
|
unicode_row.append(col)
|
|
|
|
self.writer.writerow(unicode_row)
|
2013-11-15 17:55:18 +00:00
|
|
|
# Fetch UTF-8 output from the queue ...
|
|
|
|
data = self.queue.getvalue()
|
|
|
|
data = data.decode("utf-8")
|
|
|
|
# ... and reencode it into the target encoding
|
|
|
|
data = self.encoder.encode(data)
|
|
|
|
# write to the target stream
|
|
|
|
self.stream.write(data)
|
|
|
|
# empty queue
|
|
|
|
self.queue.truncate(0)
|
|
|
|
|
|
|
|
def writerows(self, rows):
|
|
|
|
for row in rows:
|
|
|
|
self.writerow(row)
|
2014-04-24 23:46:40 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_themes(root):
|
|
|
|
"""Returns available themes list."""
|
|
|
|
|
|
|
|
static_path = os.path.join(root, 'static')
|
|
|
|
templates_path = os.path.join(root, 'templates')
|
|
|
|
|
2015-01-01 16:48:12 +00:00
|
|
|
themes = os.listdir(os.path.join(static_path, 'themes'))
|
2014-04-24 23:46:40 +00:00
|
|
|
return static_path, templates_path, themes
|
2015-01-01 16:48:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_static_files(base_path):
|
2015-01-01 17:59:53 +00:00
|
|
|
base_path = os.path.join(base_path, 'static')
|
2015-01-01 16:48:12 +00:00
|
|
|
static_files = set()
|
2015-01-01 17:59:53 +00:00
|
|
|
base_path_length = len(base_path) + 1
|
|
|
|
for directory, _, files in os.walk(base_path):
|
2015-01-01 16:48:12 +00:00
|
|
|
for filename in files:
|
|
|
|
f = os.path.join(directory[base_path_length:], filename)
|
|
|
|
static_files.add(f)
|
|
|
|
return static_files
|
2015-01-01 17:59:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_result_templates(base_path):
|
|
|
|
base_path = os.path.join(base_path, 'templates')
|
|
|
|
result_templates = set()
|
|
|
|
base_path_length = len(base_path) + 1
|
|
|
|
for directory, _, files in os.walk(base_path):
|
|
|
|
if directory.endswith('result_templates'):
|
|
|
|
for filename in files:
|
|
|
|
f = os.path.join(directory[base_path_length:], filename)
|
|
|
|
result_templates.add(f)
|
|
|
|
return result_templates
|
2015-01-11 12:26:40 +00:00
|
|
|
|
|
|
|
|
|
|
|
def format_date_by_locale(date_string, locale_string):
|
|
|
|
# strftime works only on dates after 1900
|
|
|
|
parsed_date = dateutil.parser.parse(date_string)
|
|
|
|
if parsed_date.year <= 1900:
|
|
|
|
return parsed_date.isoformat().split('T')[0]
|
|
|
|
|
|
|
|
orig_locale = locale.getlocale()[0]
|
|
|
|
try:
|
|
|
|
locale.setlocale(locale.LC_ALL, locale_string)
|
|
|
|
except:
|
|
|
|
logger.warning('cannot set locale: {0}'.format(locale_string))
|
|
|
|
formatted_date = parsed_date.strftime(locale.nl_langinfo(locale.D_FMT))
|
|
|
|
try:
|
|
|
|
locale.setlocale(locale.LC_ALL, orig_locale)
|
|
|
|
except:
|
|
|
|
logger.warning('cannot set original locale: {0}'.format(orig_locale))
|
|
|
|
return formatted_date
|
2015-01-17 20:54:40 +00:00
|
|
|
|
|
|
|
|
|
|
|
def dict_subset(d, properties):
|
|
|
|
result = {}
|
|
|
|
for k in properties:
|
|
|
|
if k in d:
|
|
|
|
result[k] = d[k]
|
|
|
|
return result
|
2015-01-29 18:44:52 +00:00
|
|
|
|
|
|
|
|
|
|
|
def prettify_url(url):
|
|
|
|
if len(url) > 74:
|
|
|
|
return u'{0}[...]{1}'.format(url[:35], url[-35:])
|
|
|
|
else:
|
|
|
|
return url
|
2015-01-31 22:11:45 +00:00
|
|
|
|
|
|
|
|
2015-06-04 16:30:08 +00:00
|
|
|
# get element in list or default value
|
|
|
|
def list_get(a_list, index, default=None):
|
|
|
|
if len(a_list) > index:
|
|
|
|
return a_list[index]
|
|
|
|
else:
|
|
|
|
return default
|
|
|
|
|
|
|
|
|
2015-01-31 22:11:45 +00:00
|
|
|
def get_blocked_engines(engines, cookies):
|
|
|
|
if 'blocked_engines' not in cookies:
|
2015-02-03 17:48:52 +00:00
|
|
|
return [(engine_name, category) for engine_name in engines
|
|
|
|
for category in engines[engine_name].categories if engines[engine_name].disabled]
|
2015-01-31 22:11:45 +00:00
|
|
|
|
2015-02-03 17:37:38 +00:00
|
|
|
blocked_engine_strings = cookies.get('blocked_engines', '').split(',')
|
|
|
|
blocked_engines = []
|
|
|
|
|
|
|
|
if not blocked_engine_strings:
|
|
|
|
return blocked_engines
|
|
|
|
|
|
|
|
for engine_string in blocked_engine_strings:
|
|
|
|
if engine_string.find('__') > -1:
|
|
|
|
engine, category = engine_string.split('__', 1)
|
|
|
|
if engine in engines and category in engines[engine].categories:
|
|
|
|
blocked_engines.append((engine, category))
|
|
|
|
elif engine_string in engines:
|
|
|
|
for category in engines[engine_string].categories:
|
|
|
|
blocked_engines.append((engine_string, category))
|
|
|
|
|
|
|
|
return blocked_engines
|