import sys,os |
import sys |
|
import os |
|
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape |
import scrape |
from bs4 import BeautifulSoup |
from bs4 import BeautifulSoup |
import parsedatetime as pdt |
from time import mktime |
|
import feedparser |
import abc |
import abc |
class GenericOAICDisclogScraper(object): |
import unicodedata |
__metaclass__ = abc.ABCMeta |
import re |
@abc.abstractmethod |
import dateutil |
def getAgencyID(self): |
from dateutil.parser import * |
""" disclosr agency id """ |
from datetime import * |
return |
import codecs |
|
|
@abc.abstractmethod |
import difflib |
def getURL(self): |
|
""" disclog URL""" |
from StringIO import StringIO |
return |
|
|
from pdfminer.pdfparser import PDFDocument, PDFParser |
@abc.abstractmethod |
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf |
def getColumns(self,columns): |
from pdfminer.pdfdevice import PDFDevice, TagExtractor |
""" rearranges columns if required """ |
from pdfminer.converter import TextConverter |
return |
from pdfminer.cmapdb import CMapDB |
|
from pdfminer.layout import LAParams |
def doScrape(self): |
|
cal = pdt.Calendar() |
|
foidocsdb = scrape.couch['disclosr-foidocuments'] |
class GenericDisclogScraper(object): |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
__metaclass__ = abc.ABCMeta |
if content != None: |
agencyID = None |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
disclogURL = None |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
|
soup = BeautifulSoup(content) |
def remove_control_chars(self, input): |
for row in soup.table.find_all('tr'): |
return "".join([i for i in input if ord(i) in range(32, 127)]) |
columns = row.find_all('td') |
|
if len(columns) == 5: |
def getAgencyID(self): |
(id, date, description, title, notes) = self.getColumns(columns) |
""" disclosr agency id """ |
print id.string |
if self.agencyID is None: |
hash = scrape.mkhash(url+id.string) |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") |
links = [] |
return self.agencyID |
for atag in row.find_all("a"): |
|
if atag.has_key('href'): |
def getURL(self): |
links.append(scrape.fullurl(url,atag['href'])) |
""" disclog URL""" |
doc = foidocsdb.get(hash) |
if self.disclogURL is None: |
descriptiontxt = "" |
agency = scrape.agencydb.get(self.getAgencyID()) |
for string in description.stripped_strings: |
self.disclogURL = agency['FOIDocumentsURL'] |
descriptiontxt = descriptiontxt + string |
return self.disclogURL |
|
|
if doc == None: |
@abc.abstractmethod |
print "saving" |
def doScrape(self): |
dtresult = cal.parseDateText(date.string) |
""" do the scraping """ |
if len(dtresult) == 2: |
return |
(dtdate,dtr) = dtresult |
|
print dtdate |
|
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) |
class GenericHTMLDisclogScraper(GenericDisclogScraper): |
else: |
def doScrape(self): |
edate = "" |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, |
(url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, |
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} |
self.getURL(), "foidocuments", self.getAgencyID()) |
foidocsdb.save(doc) |
content = rcontent |
else: |
dochash = scrape.mkhash(content) |
print "already saved" |
doc = foidocsdb.get(dochash) |
|
if doc is None: |
elif len(row.find_all('th')) == 5: |
print "saving " + dochash |
print "header row" |
description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" |
|
last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) |
else: |
if last_attach != None: |
print "ERROR number of columns incorrect" |
html_diff = difflib.HtmlDiff() |
print row |
diff = html_diff.make_table(last_attach.read().split('\n'), |
|
content.split('\n')) |
|
edate = date.today().strftime("%Y-%m-%d") |
|
doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
|
, 'url': self.getURL(), 'docID': dochash, |
|
"date": edate, "title": "Disclosure Log Updated", |
|
"description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)} |
|
foidocsdb.save(doc) |
|
else: |
|
print "already saved" |
|
|
|
|
|
class GenericPDFDisclogScraper(GenericDisclogScraper): |
|
def doScrape(self): |
|
foidocsdb = scrape.couch['disclosr-foidocuments'] |
|
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
|
self.getURL(), "foidocuments", self.getAgencyID()) |
|
laparams = LAParams() |
|
rsrcmgr = PDFResourceManager(caching=True) |
|
outfp = StringIO() |
|
device = TextConverter(rsrcmgr, outfp, codec='utf-8', |
|
laparams=laparams) |
|
fp = StringIO() |
|
fp.write(content) |
|
|
|
process_pdf(rsrcmgr, device, fp, set(), caching=True, |
|
check_extractable=True) |
|
description = outfp.getvalue() |
|
fp.close() |
|
device.close() |
|
outfp.close() |
|
dochash = scrape.mkhash(description) |
|
doc = foidocsdb.get(dochash) |
|
if doc is None: |
|
print "saving " + dochash |
|
edate = date.today().strftime("%Y-%m-%d") |
|
doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
|
, 'url': self.getURL(), 'docID': dochash, |
|
"date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description)} |
|
foidocsdb.save(doc) |
|
else: |
|
print "already saved" |
|
|
|
|
|
class GenericDOCXDisclogScraper(GenericDisclogScraper): |
|
def doScrape(self): |
|
foidocsdb = scrape.couch['disclosr-foidocuments'] |
|
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb |
|
, self.getURL(), "foidocuments", self.getAgencyID()) |
|
mydoc = zipfile.ZipFile(file) |
|
xmlcontent = mydoc.read('word/document.xml') |
|
document = etree.fromstring(xmlcontent) |
|
## Fetch all the text out of the document we just created |
|
paratextlist = getdocumenttext(document) |
|
# Make explicit unicode version |
|
newparatextlist = [] |
|
for paratext in paratextlist: |
|
newparatextlist.append(paratext.encode("utf-8")) |
|
## Print our documnts test with two newlines under each paragraph |
|
description = '\n\n'.join(newparatextlist).strip(' \t\n\r') |
|
dochash = scrape.mkhash(description) |
|
doc = foidocsdb.get(dochash) |
|
|
|
if doc is None: |
|
print "saving " + dochash |
|
edate = time().strftime("%Y-%m-%d") |
|
doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
|
, 'url': self.getURL(), 'docID': dochash, |
|
"date": edate, "title": "Disclosure Log Updated", "description": description} |
|
foidocsdb.save(doc) |
|
else: |
|
print "already saved" |
|
|
|
|
|
class GenericRSSDisclogScraper(GenericDisclogScraper): |
|
def doScrape(self): |
|
foidocsdb = scrape.couch['disclosr-foidocuments'] |
|
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
|
self.getURL(), "foidocuments", self.getAgencyID()) |
|
feed = feedparser.parse(content) |
|
for entry in feed.entries: |
|
#print entry |
|
print entry.id |
|
dochash = scrape.mkhash(entry.id) |
|
doc = foidocsdb.get(dochash) |
|
#print doc |
|
if doc is None: |
|
print "saving " + dochash |
|
edate = datetime.fromtimestamp( |
|
mktime(entry.published_parsed)).strftime("%Y-%m-%d") |
|
doc = {'_id': dochash, 'agencyID': self.getAgencyID(), |
|
'url': entry.link, 'docID': entry.id, |
|
"date": edate, "title": entry.title} |
|
self.getDescription(entry, entry, doc) |
|
foidocsdb.save(doc) |
|
else: |
|
print "already saved" |
|
|
|
def getDescription(self, content, entry, doc): |
|
""" get description from rss entry""" |
|
doc.update({'description': content.summary}) |
|
|
|
return |
|
|
|
|
|
class GenericOAICDisclogScraper(GenericDisclogScraper): |
|
__metaclass__ = abc.ABCMeta |
|
|
|
@abc.abstractmethod |
|
def getColumns(self, columns): |
|
""" rearranges columns if required """ |
|
return |
|
|
|
def getColumnCount(self): |
|
return 5 |
|
|
|
def getDescription(self, content, entry, doc): |
|
""" get description from rss entry""" |
|
descriptiontxt = "" |
|
for string in content.stripped_strings: |
|
descriptiontxt = descriptiontxt + " \n" + string |
|
doc.update({'description': descriptiontxt}) |
|
|
|
def getTitle(self, content, entry, doc): |
|
doc.update({'title': (''.join(content.stripped_strings))}) |
|
|
|
def getTable(self, soup): |
|
return soup.table |
|
|
|
def getRows(self, table): |
|
return table.find_all('tr') |
|
|
|
def getDate(self, content, entry, doc): |
|
strdate = ''.join(content.stripped_strings).strip() |
|
(a, b, c) = strdate.partition("(") |
|
strdate = self.remove_control_chars(a.replace("Octber", "October").replace("Janrurary", "January").replace("1012","2012")) |
|
print strdate |
|
try: |
|
edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") |
|
except ValueError: |
|
print >> sys.stderr, "ERROR date invalid %s " % strdate |
|
print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip() |
|
edate = date.today().strftime("%Y-%m-%d") |
|
print edate |
|
doc.update({'date': edate}) |
|
return |
|
|
|
def getLinks(self, content, entry, doc): |
|
links = [] |
|
for atag in entry.find_all("a"): |
|
if atag.has_key('href'): |
|
links.append(scrape.fullurl(content, atag['href'])) |
|
if links != []: |
|
doc.update({'links': links}) |
|
return |
|
|
|
def doScrape(self): |
|
foidocsdb = scrape.couch['disclosr-foidocuments'] |
|
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
|
self.getURL(), "foidocuments", self.getAgencyID()) |
|
if content is not None: |
|
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": |
|
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
|
print "parsing" |
|
soup = BeautifulSoup(content) |
|
table = self.getTable(soup) |
|
for row in self.getRows(table): |
|
columns = row.find_all('td') |
|
if len(columns) is self.getColumnCount(): |
|
(id, date, title, |
|
description, notes) = self.getColumns(columns) |
|
print self.remove_control_chars( |
|
''.join(id.stripped_strings)) |
|
if id.string is None: |
|
dochash = scrape.mkhash( |
|
self.remove_control_chars( |
|
url + (''.join(date.stripped_strings)))) |
|
else: |
|
dochash = scrape.mkhash( |
|
self.remove_control_chars( |
|
url + (''.join(id.stripped_strings)))) |
|
doc = foidocsdb.get(dochash) |
|
|
|
if doc is None: |
|
print "saving " + dochash |
|
doc = {'_id': dochash, |
|
'agencyID': self.getAgencyID(), |
|
'url': self.getURL(), |
|
'docID': (''.join(id.stripped_strings))} |
|
self.getLinks(self.getURL(), row, doc) |
|
self.getTitle(title, row, doc) |
|
self.getDate(date, row, doc) |
|
self.getDescription(description, row, doc) |
|
if notes is not None: |
|
doc.update({'notes': ( |
|
''.join(notes.stripped_strings))}) |
|
badtitles = ['-', 'Summary of FOI Request' |
|
, 'FOI request(in summary form)' |
|
, 'Summary of FOI request received by the ASC', |
|
'Summary of FOI request received by agency/minister', |
|
'Description of Documents Requested', 'FOI request', |
|
'Description of FOI Request', 'Summary of request', 'Description', 'Summary', |
|
'Summary of FOIrequest received by agency/minister', |
|
'Summary of FOI request received', 'Description of FOI Request', |
|
"FOI request", 'Results 1 to 67 of 67'] |
|
if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '': |
|
print "saving" |
|
foidocsdb.save(doc) |
|
else: |
|
print "already saved " + dochash |
|
|
|
elif len(row.find_all('th')) is self.getColumnCount(): |
|
print "header row" |
|
|
|
else: |
|
print >> sys.stderr, "ERROR number of columns incorrect" |
|
print row |
|
|