2013-11-08 22:44:26 +00:00
|
|
|
from HTMLParser import HTMLParser
|
2013-11-18 15:47:20 +00:00
|
|
|
#import htmlentitydefs
|
2013-11-15 17:55:18 +00:00
|
|
|
import csv
|
2014-01-20 01:31:20 +00:00
|
|
|
from codecs import getincrementalencoder
|
2013-11-15 17:55:18 +00:00
|
|
|
import cStringIO
|
2014-01-10 22:38:08 +00:00
|
|
|
import re
|
|
|
|
|
2014-01-19 21:59:01 +00:00
|
|
|
|
2014-01-18 20:53:59 +00:00
|
|
|
def gen_useragent():
|
2014-01-12 19:13:14 +00:00
|
|
|
# TODO
|
2014-01-20 01:31:20 +00:00
|
|
|
ua = "Mozilla/5.0 (X11; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0"
|
|
|
|
return ua
|
2014-01-12 19:13:14 +00:00
|
|
|
|
2014-01-19 21:59:01 +00:00
|
|
|
|
2014-01-10 22:38:08 +00:00
|
|
|
def highlight_content(content, query):
|
|
|
|
|
|
|
|
if not content:
|
|
|
|
return None
|
|
|
|
# ignoring html contents
|
|
|
|
# TODO better html content detection
|
|
|
|
if content.find('<') != -1:
|
|
|
|
return content
|
|
|
|
|
|
|
|
query = query.decode('utf-8')
|
|
|
|
if content.lower().find(query.lower()) > -1:
|
|
|
|
query_regex = u'({0})'.format(re.escape(query))
|
|
|
|
content = re.sub(query_regex, '<b>\\1</b>', content, flags=re.I | re.U)
|
|
|
|
else:
|
|
|
|
regex_parts = []
|
|
|
|
for chunk in query.split():
|
|
|
|
if len(chunk) == 1:
|
|
|
|
regex_parts.append(u'\W+{0}\W+'.format(re.escape(chunk)))
|
|
|
|
else:
|
|
|
|
regex_parts.append(u'{0}'.format(re.escape(chunk)))
|
|
|
|
query_regex = u'({0})'.format('|'.join(regex_parts))
|
|
|
|
content = re.sub(query_regex, '<b>\\1</b>', content, flags=re.I | re.U)
|
|
|
|
|
|
|
|
return content
|
2013-11-08 22:44:26 +00:00
|
|
|
|
2014-01-19 21:59:01 +00:00
|
|
|
|
2013-11-08 22:44:26 +00:00
|
|
|
class HTMLTextExtractor(HTMLParser):
|
|
|
|
def __init__(self):
|
|
|
|
HTMLParser.__init__(self)
|
2014-01-19 21:59:01 +00:00
|
|
|
self.result = []
|
2013-11-08 22:44:26 +00:00
|
|
|
|
|
|
|
def handle_data(self, d):
|
|
|
|
self.result.append(d)
|
|
|
|
|
|
|
|
def handle_charref(self, number):
|
2014-01-20 01:31:20 +00:00
|
|
|
if number[0] in (u'x', u'X'):
|
|
|
|
codepoint = int(number[1:], 16)
|
|
|
|
else:
|
|
|
|
codepoint = int(number)
|
2013-11-08 22:44:26 +00:00
|
|
|
self.result.append(unichr(codepoint))
|
|
|
|
|
|
|
|
def handle_entityref(self, name):
|
2013-11-18 15:47:20 +00:00
|
|
|
#codepoint = htmlentitydefs.name2codepoint[name]
|
|
|
|
#self.result.append(unichr(codepoint))
|
|
|
|
self.result.append(name)
|
2013-11-08 22:44:26 +00:00
|
|
|
|
|
|
|
def get_text(self):
|
|
|
|
return u''.join(self.result)
|
|
|
|
|
2014-01-19 21:59:01 +00:00
|
|
|
|
2013-11-08 22:44:26 +00:00
|
|
|
def html_to_text(html):
|
|
|
|
s = HTMLTextExtractor()
|
|
|
|
s.feed(html)
|
|
|
|
return s.get_text()
|
2013-11-15 17:55:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
class UnicodeWriter:
|
|
|
|
"""
|
|
|
|
A CSV writer which will write rows to CSV file "f",
|
|
|
|
which is encoded in the given encoding.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
|
|
|
|
# Redirect output to a queue
|
|
|
|
self.queue = cStringIO.StringIO()
|
|
|
|
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
|
|
|
|
self.stream = f
|
2014-01-20 01:31:20 +00:00
|
|
|
self.encoder = getincrementalencoder(encoding)()
|
2013-11-15 17:55:18 +00:00
|
|
|
|
|
|
|
def writerow(self, row):
|
2014-01-20 01:31:20 +00:00
|
|
|
unicode_row = []
|
|
|
|
for col in row:
|
|
|
|
if type(col) == str or type(col) == unicode:
|
|
|
|
unicode_row.append(col.encode('utf-8').strip())
|
|
|
|
else:
|
|
|
|
unicode_row.append(col)
|
|
|
|
self.writer.writerow(unicode_row)
|
2013-11-15 17:55:18 +00:00
|
|
|
# Fetch UTF-8 output from the queue ...
|
|
|
|
data = self.queue.getvalue()
|
|
|
|
data = data.decode("utf-8")
|
|
|
|
# ... and reencode it into the target encoding
|
|
|
|
data = self.encoder.encode(data)
|
|
|
|
# write to the target stream
|
|
|
|
self.stream.write(data)
|
|
|
|
# empty queue
|
|
|
|
self.queue.truncate(0)
|
|
|
|
|
|
|
|
def writerows(self, rows):
|
|
|
|
for row in rows:
|
|
|
|
self.writerow(row)
|