import sys |
import sys |
import os |
import os |
|
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape |
import scrape |
from bs4 import BeautifulSoup |
from bs4 import BeautifulSoup |
from time import mktime |
from time import mktime |
import feedparser |
import feedparser |
import abc |
import abc |
import unicodedata |
import unicodedata |
import re |
import re |
import dateutil |
import dateutil |
from dateutil.parser import * |
from dateutil.parser import * |
from datetime import * |
from datetime import * |
import codecs |
import codecs |
|
|
import difflib |
import difflib |
|
|
from StringIO import StringIO |
from StringIO import StringIO |
|
|
from pdfminer.pdfparser import PDFDocument, PDFParser |
from pdfminer.pdfparser import PDFDocument, PDFParser |
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf |
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf |
from pdfminer.pdfdevice import PDFDevice, TagExtractor |
from pdfminer.pdfdevice import PDFDevice, TagExtractor |
from pdfminer.converter import TextConverter |
from pdfminer.converter import TextConverter |
from pdfminer.cmapdb import CMapDB |
from pdfminer.cmapdb import CMapDB |
from pdfminer.layout import LAParams |
from pdfminer.layout import LAParams |
|
|
|
|
class GenericDisclogScraper(object): |
class GenericDisclogScraper(object): |
__metaclass__ = abc.ABCMeta |
__metaclass__ = abc.ABCMeta |
agencyID = None |
agencyID = None |
disclogURL = None |
disclogURL = None |
|
|
def remove_control_chars(self, input): |
def remove_control_chars(self, input): |
return "".join([i for i in input if ord(i) in range(32, 127)]) |
return "".join([i for i in input if ord(i) in range(32, 127)]) |
|
|
def getAgencyID(self): |
def getAgencyID(self): |
""" disclosr agency id """ |
""" disclosr agency id """ |
if self.agencyID is None: |
if self.agencyID is None: |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") |
return self.agencyID |
return self.agencyID |
|
|
def getURL(self): |
def getURL(self): |
""" disclog URL""" |
""" disclog URL""" |
if self.disclogURL is None: |
if self.disclogURL is None: |
agency = scrape.agencydb.get(self.getAgencyID()) |
agency = scrape.agencydb.get(self.getAgencyID()) |
self.disclogURL = agency['FOIDocumentsURL'] |
self.disclogURL = agency['FOIDocumentsURL'] |
return self.disclogURL |
return self.disclogURL |
|
|
@abc.abstractmethod |
@abc.abstractmethod |
def doScrape(self): |
def doScrape(self): |
""" do the scraping """ |
""" do the scraping """ |
return |
return |
|
|
|
|
class GenericHTMLDisclogScraper(GenericDisclogScraper): |
class GenericHTMLDisclogScraper(GenericDisclogScraper): |
def doScrape(self): |
def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, |
(url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) |
self.getURL(), "foidocuments", self.getAgencyID()) |
content = rcontent |
content = rcontent |
dochash = scrape.mkhash(content) |
dochash = scrape.mkhash(content) |
doc = foidocsdb.get(dochash) |
doc = foidocsdb.get(dochash) |
if doc is None: |
if doc is None: |
print "saving " + dochash |
print "saving " + dochash |
description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" |
description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" |
last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) |
last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) |
if last_attach != None: |
if last_attach != None: |
html_diff = difflib.HtmlDiff() |
html_diff = difflib.HtmlDiff() |
diff = html_diff.make_table(last_attach.read().split('\n'), |
diff = html_diff.make_table(last_attach.read().split('\n'), |
content.split('\n')) |
content.split('\n')) |
edate = date.today().strftime("%Y-%m-%d") |
edate = date.today().strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
, 'url': self.getURL(), 'docID': dochash, |
, 'url': self.getURL(), 'docID': dochash, |
"date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description), "diff": diff} |
"date": edate, "title": "Disclosure Log Updated", |
|
"description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)} |
foidocsdb.save(doc) |
foidocsdb.save(doc) |
else: |
else: |
print "already saved" |
print "already saved" |
|
|
|
|
class GenericPDFDisclogScraper(GenericDisclogScraper): |
class GenericPDFDisclogScraper(GenericDisclogScraper): |
def doScrape(self): |
def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) |
self.getURL(), "foidocuments", self.getAgencyID()) |
laparams = LAParams() |
laparams = LAParams() |
rsrcmgr = PDFResourceManager(caching=True) |
rsrcmgr = PDFResourceManager(caching=True) |
outfp = StringIO() |
outfp = StringIO() |
device = TextConverter(rsrcmgr, outfp, codec='utf-8', |
device = TextConverter(rsrcmgr, outfp, codec='utf-8', |
laparams=laparams) |
laparams=laparams) |
fp = StringIO() |
fp = StringIO() |
fp.write(content) |
fp.write(content) |
|
|
process_pdf(rsrcmgr, device, fp, set(), caching=True, |
process_pdf(rsrcmgr, device, fp, set(), caching=True, |
check_extractable=True) |
check_extractable=True) |
description = outfp.getvalue() |
description = outfp.getvalue() |
fp.close() |
fp.close() |
device.close() |
device.close() |
outfp.close() |
outfp.close() |
dochash = scrape.mkhash(description) |
dochash = scrape.mkhash(description) |
doc = foidocsdb.get(dochash) |
doc = foidocsdb.get(dochash) |
if doc is None: |
if doc is None: |
print "saving " + dochash |
print "saving " + dochash |
edate = date.today().strftime("%Y-%m-%d") |
edate = date.today().strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
, 'url': self.getURL(), 'docID': dochash, |
, 'url': self.getURL(), 'docID': dochash, |
"date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description)} |
"date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description)} |
foidocsdb.save(doc) |
foidocsdb.save(doc) |
else: |
else: |
print "already saved" |
print "already saved" |
|
|
|
|
class GenericDOCXDisclogScraper(GenericDisclogScraper): |
class GenericDOCXDisclogScraper(GenericDisclogScraper): |
def doScrape(self): |
def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb |
, self.getURL(), "foidocuments", self.getAgencyID()) |
, self.getURL(), "foidocuments", self.getAgencyID()) |
mydoc = zipfile.ZipFile(file) |
mydoc = zipfile.ZipFile(file) |
xmlcontent = mydoc.read('word/document.xml') |
xmlcontent = mydoc.read('word/document.xml') |
document = etree.fromstring(xmlcontent) |
document = etree.fromstring(xmlcontent) |
## Fetch all the text out of the document we just created |
## Fetch all the text out of the document we just created |
paratextlist = getdocumenttext(document) |
paratextlist = getdocumenttext(document) |
# Make explicit unicode version |
# Make explicit unicode version |
newparatextlist = [] |
newparatextlist = [] |
for paratext in paratextlist: |
for paratext in paratextlist: |
newparatextlist.append(paratext.encode("utf-8")) |
newparatextlist.append(paratext.encode("utf-8")) |
## Print our documnts test with two newlines under each paragraph |
## Print our documnts test with two newlines under each paragraph |
description = '\n\n'.join(newparatextlist).strip(' \t\n\r') |
description = '\n\n'.join(newparatextlist).strip(' \t\n\r') |
dochash = scrape.mkhash(description) |
dochash = scrape.mkhash(description) |
doc = foidocsdb.get(dochash) |
doc = foidocsdb.get(dochash) |
|
|
if doc is None: |
if doc is None: |
print "saving " + dochash |
print "saving " + dochash |
edate = time().strftime("%Y-%m-%d") |
edate = time().strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
, 'url': self.getURL(), 'docID': dochash, |
, 'url': self.getURL(), 'docID': dochash, |
"date": edate, "title": "Disclosure Log Updated", "description": description} |
"date": edate, "title": "Disclosure Log Updated", "description": description} |
foidocsdb.save(doc) |
foidocsdb.save(doc) |
else: |
else: |
print "already saved" |
print "already saved" |
|
|
|
|
class GenericRSSDisclogScraper(GenericDisclogScraper): |
class GenericRSSDisclogScraper(GenericDisclogScraper): |
def doScrape(self): |
def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) |
self.getURL(), "foidocuments", self.getAgencyID()) |
feed = feedparser.parse(content) |
feed = feedparser.parse(content) |
for entry in feed.entries: |
for entry in feed.entries: |
#print entry |
#print entry |
print entry.id |
print entry.id |
dochash = scrape.mkhash(entry.id) |
dochash = scrape.mkhash(entry.id) |
doc = foidocsdb.get(dochash) |
doc = foidocsdb.get(dochash) |
#print doc |
#print doc |
if doc is None: |
if doc is None: |
print "saving " + dochash |
print "saving " + dochash |
edate = datetime.fromtimestamp( |
edate = datetime.fromtimestamp( |
mktime(entry.published_parsed)).strftime("%Y-%m-%d") |
mktime(entry.published_parsed)).strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID(), |
doc = {'_id': dochash, 'agencyID': self.getAgencyID(), |
'url': entry.link, 'docID': entry.id, |
'url': entry.link, 'docID': entry.id, |
"date": edate, "title": entry.title} |
"date": edate, "title": entry.title} |
self.getDescription(entry, entry, doc) |
self.getDescription(entry, entry, doc) |
foidocsdb.save(doc) |
foidocsdb.save(doc) |
else: |
else: |
print "already saved" |
print "already saved" |
|
|
def getDescription(self, content, entry, doc): |
def getDescription(self, content, entry, doc): |
""" get description from rss entry""" |
""" get description from rss entry""" |
doc.update({'description': content.summary}) |
doc.update({'description': content.summary}) |
|
|
return |
return |
|
|
|
|
class GenericOAICDisclogScraper(GenericDisclogScraper): |
class GenericOAICDisclogScraper(GenericDisclogScraper): |
__metaclass__ = abc.ABCMeta |
__metaclass__ = abc.ABCMeta |
|
|
@abc.abstractmethod |
@abc.abstractmethod |
def getColumns(self, columns): |
def getColumns(self, columns): |
""" rearranges columns if required """ |
""" rearranges columns if required """ |
return |
return |
|
|
def getColumnCount(self): |
def getColumnCount(self): |
return 5 |
return 5 |
|
|
def getDescription(self, content, entry, doc): |
def getDescription(self, content, entry, doc): |
""" get description from rss entry""" |
""" get description from rss entry""" |
descriptiontxt = "" |
descriptiontxt = "" |
for string in content.stripped_strings: |
for string in content.stripped_strings: |
descriptiontxt = descriptiontxt + " \n" + string |
descriptiontxt = descriptiontxt + " \n" + string |
doc.update({'description': descriptiontxt}) |
doc.update({'description': descriptiontxt}) |
|
|
def getTitle(self, content, entry, doc): |
def getTitle(self, content, entry, doc): |
doc.update({'title': (''.join(content.stripped_strings))}) |
doc.update({'title': (''.join(content.stripped_strings))}) |
|
|
def getTable(self, soup): |
def getTable(self, soup): |
return soup.table |
return soup.table |
|
|
def getRows(self, table): |
def getRows(self, table): |
return table.find_all('tr') |
return table.find_all('tr') |
|
|
def getDate(self, content, entry, doc): |
def getDate(self, content, entry, doc): |
date = ''.join(content.stripped_strings).strip() |
strdate = ''.join(content.stripped_strings).strip() |
(a, b, c) = date.partition("(") |
(a, b, c) = strdate.partition("(") |
date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012").replace("Janurary","January")) |
strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012")) |
print date |
print strdate |
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") |
try: |
|
edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") |
|
except ValueError: |
|
print >> sys.stderr, "ERROR date invalid %s " % strdate |
|
print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip() |
|
edate = date.today().strftime("%Y-%m-%d") |
print edate |
print edate |
doc.update({'date': edate}) |
doc.update({'date': edate}) |
return |
return |
|
|
def getLinks(self, content, entry, doc): |
def getLinks(self, content, entry, doc): |
links = [] |
links = [] |
for atag in entry.find_all("a"): |
for atag in entry.find_all("a"): |
if atag.has_key('href'): |
if atag.has_key('href'): |
links.append(scrape.fullurl(content, atag['href'])) |
links.append(scrape.fullurl(content, atag['href'])) |
if links != []: |
if links != []: |
doc.update({'links': links}) |
doc.update({'links': links}) |
return |
return |
|
|
def doScrape(self): |
def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) |
self.getURL(), "foidocuments", self.getAgencyID()) |
if content is not None: |
if content is not None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
print "parsing" |
print "parsing" |
soup = BeautifulSoup(content) |
soup = BeautifulSoup(content) |
table = self.getTable(soup) |
table = self.getTable(soup) |
for row in self.getRows(table): |
for row in self.getRows(table): |
columns = row.find_all('td') |
columns = row.find_all('td') |
if len(columns) is self.getColumnCount(): |
if len(columns) is self.getColumnCount(): |
(id, date, title, |
(id, date, title, |
description, notes) = self.getColumns(columns) |
description, notes) = self.getColumns(columns) |
print self.remove_control_chars( |
print self.remove_control_chars( |
''.join(id.stripped_strings)) |
''.join(id.stripped_strings)) |
if id.string is None: |
if id.string is None: |
dochash = scrape.mkhash( |
dochash = scrape.mkhash( |
self.remove_control_chars( |
self.remove_control_chars( |
url + (''.join(date.stripped_strings)))) |
url + (''.join(date.stripped_strings)))) |
else: |
else: |
dochash = scrape.mkhash( |
dochash = scrape.mkhash( |
self.remove_control_chars( |
self.remove_control_chars( |
url + (''.join(id.stripped_strings)))) |
url + (''.join(id.stripped_strings)))) |
doc = foidocsdb.get(dochash) |
doc = foidocsdb.get(dochash) |
|
|
if doc is None: |
if doc is None: |
print "saving " + dochash |
print "saving " + dochash |
doc = {'_id': dochash, |
doc = {'_id': dochash, |
'agencyID': self.getAgencyID(), |
'agencyID': self.getAgencyID(), |
'url': self.getURL(), |
'url': self.getURL(), |
'docID': (''.join(id.stripped_strings))} |
'docID': (''.join(id.stripped_strings))} |
self.getLinks(self.getURL(), row, doc) |
self.getLinks(self.getURL(), row, doc) |
self.getTitle(title, row, doc) |
self.getTitle(title, row, doc) |
self.getDate(date, row, doc) |
self.getDate(date, row, doc) |
self.getDescription(description, row, doc) |
self.getDescription(description, row, doc) |
if notes is not None: |
if notes is not None: |
doc.update({'notes': ( |
doc.update({'notes': ( |
''.join(notes.stripped_strings))}) |
''.join(notes.stripped_strings))}) |
badtitles = ['-', 'Summary of FOI Request' |
badtitles = ['-', 'Summary of FOI Request' |
, 'FOI request(in summary form)' |
, 'FOI request(in summary form)' |
, 'Summary of FOI request received by the ASC', |
, 'Summary of FOI request received by the ASC', |
'Summary of FOI request received by agency/minister', |
'Summary of FOI request received by agency/minister', |
'Description of Documents Requested', 'FOI request', |
'Description of Documents Requested', 'FOI request', |
'Description of FOI Request', 'Summary of request', 'Description', 'Summary', |
'Description of FOI Request', 'Summary of request', 'Description', 'Summary', |
'Summary of FOIrequest received by agency/minister', |
'Summary of FOIrequest received by agency/minister', |
'Summary of FOI request received', 'Description of FOI Request', |
'Summary of FOI request received', 'Description of FOI Request', |
"FOI request", 'Results 1 to 67 of 67'] |
"FOI request", 'Results 1 to 67 of 67'] |
if doc['title'] not in badtitles\ |
if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '': |
and doc['description'] != '': |
|
print "saving" |
print "saving" |
foidocsdb.save(doc) |
foidocsdb.save(doc) |
else: |
else: |
print "already saved " + dochash |
print "already saved " + dochash |
|
|
elif len(row.find_all('th')) is self.getColumnCount(): |
elif len(row.find_all('th')) is self.getColumnCount(): |
print "header row" |
print "header row" |
|
|
else: |
else: |
print "ERROR number of columns incorrect" |
print >> sys.stderr, "ERROR number of columns incorrect" |
print row |
print row |
|
|