refactor description parsing
[disclosr.git] / documents / genericScrapers.py
blob:a/documents/genericScrapers.py -> blob:b/documents/genericScrapers.py
import sys,os import sys
  import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import parsedatetime as pdt  
from time import mktime from time import mktime
from datetime import datetime  
import feedparser import feedparser
import abc import abc
  import unicodedata
  import re
  import dateutil
  from dateutil.parser import *
  from datetime import *
  import codecs
   
  import difflib
   
  from StringIO import StringIO
   
  from pdfminer.pdfparser import PDFDocument, PDFParser
  from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
  from pdfminer.pdfdevice import PDFDevice, TagExtractor
  from pdfminer.converter import TextConverter
  from pdfminer.cmapdb import CMapDB
  from pdfminer.layout import LAParams
   
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
def getAgencyID(self):  
""" disclosr agency id """ def remove_control_chars(self, input):
if self.agencyID == None: return "".join([i for i in input if ord(i) in range(32, 127)])
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")  
return self.agencyID def getAgencyID(self):
  """ disclosr agency id """
def getURL(self): if self.agencyID is None:
""" disclog URL""" self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
if self.disclogURL == None: return self.agencyID
agency = scrape.agencydb.get(self.getAgencyID())  
self.disclogURL = agency['FOIDocumentsURL'] def getURL(self):
return self.disclogURL """ disclog URL"""
  if self.disclogURL is None:
@abc.abstractmethod agency = scrape.agencydb.get(self.getAgencyID())
def doScrape(self): self.disclogURL = agency['FOIDocumentsURL']
""" do the scraping """ return self.disclogURL
return  
  @abc.abstractmethod
@abc.abstractmethod def doScrape(self):
def getDescription(self, content, entry, doc): """ do the scraping """
""" get description""" return
return  
  class GenericHTMLDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  content = rcontent.read()
  dochash = scrape.mkhash(content)
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
  last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
  if last_attach != None:
  html_diff = difflib.HtmlDiff()
  description = description + "\nChanges: "
  description = description + html_diff.make_table(last_attach.read().split('\n'),
  content.split('\n'))
  edate = date.today().strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated", "description": description}
  foidocsdb.save(doc)
  else:
  print "already saved"
   
  class GenericPDFDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  laparams = LAParams()
  rsrcmgr = PDFResourceManager(caching=True)
  outfp = StringIO()
  device = TextConverter(rsrcmgr, outfp, codec='utf-8',
  laparams=laparams)
  fp = StringIO()
  fp.write(content.read())
   
  process_pdf(rsrcmgr, device, fp, set(), caching=True,
  check_extractable=True)
  description = outfp.getvalue()
  fp.close()
  device.close()
  outfp.close()
  dochash = scrape.mkhash(description)
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  edate = date.today().strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated", "description": description}
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  class GenericDOCXDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
  , self.getURL(), "foidocuments", self.getAgencyID())
  mydoc = zipfile.ZipFile(file)
  xmlcontent = mydoc.read('word/document.xml')
  document = etree.fromstring(xmlcontent)
  ## Fetch all the text out of the document we just created
  paratextlist = getdocumenttext(document)
  # Make explicit unicode version
  newparatextlist = []
  for paratext in paratextlist:
  newparatextlist.append(paratext.encode("utf-8"))
  ## Print our documnts test with two newlines under each paragraph
  description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
  dochash = scrape.mkhash(description)
  doc = foidocsdb.get(dochash)
   
  if doc is None:
  print "saving " + dochash
  edate = time().strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated", "description": description}
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
class GenericRSSDisclogScraper(GenericDisclogScraper): class GenericRSSDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
feed = feedparser.parse(content) self.getURL(), "foidocuments", self.getAgencyID())
for entry in feed.entries: feed = feedparser.parse(content)
#print entry for entry in feed.entries:
print entry.id #print entry
hash = scrape.mkhash(entry.id) print entry.id
#print hash dochash = scrape.mkhash(entry.id)
doc = foidocsdb.get(hash) doc = foidocsdb.get(dochash)
#print doc #print doc
if doc == None: if doc is None:
print "saving" print "saving " + dochash
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") edate = datetime.fromtimestamp(
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, mktime(entry.published_parsed)).strftime("%Y-%m-%d")
"date": edate,"title": entry.title} doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
self.getDescription(entry,entry, doc) 'url': entry.link, 'docID': entry.id,
foidocsdb.save(doc) "date": edate, "title": entry.title}
  self.getDescription(entry, entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  doc.update({'description': content.summary})
  return
   
   
  class GenericOAICDisclogScraper(GenericDisclogScraper):
  __metaclass__ = abc.ABCMeta
   
  @abc.abstractmethod
  def getColumns(self, columns):
  """ rearranges columns if required """
  return
   
  def getColumnCount(self):
  return 5
   
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  descriptiontxt = ""
  for string in content.stripped_strings:
  descriptiontxt = descriptiontxt + " \n" + string
  doc.update({'description': descriptiontxt})
   
  def getTitle(self, content, entry, doc):
  doc.update({'title': (''.join(content.stripped_strings))})
   
  def getTable(self, soup):
  return soup.table
   
  def getRows(self, table):
  return table.find_all('tr')
   
  def getDate(self, content, entry, doc):
  date = ''.join(content.stripped_strings).strip()
  (a, b, c) = date.partition("(")
  date = self.remove_control_chars(a.replace("Octber", "October"))
  print date
  edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  doc.update({'date': edate})
  return
   
  def getLinks(self, content, entry, doc):
  links = []
  for atag in entry.find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(content, atag['href']))
  if links != []:
  doc.update({'links': links})
  return
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  if content is not None:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  print "parsing"
  soup = BeautifulSoup(content)
  table = self.getTable(soup)
  for row in self.getRows(table):
  columns = row.find_all('td')
  if len(columns) is self.getColumnCount():
  (id, date, title,
  description, notes) = self.getColumns(columns)
  print self.remove_control_chars(
  ''.join(id.stripped_strings))
  if id.string is None:
  dochash = scrape.mkhash(
  self.remove_control_chars(
  url + (''.join(date.stripped_strings))))
else: else:
print "already saved" dochash = scrape.mkhash(
def getDescription(self, content, entry, doc): self.remove_control_chars(
""" get description from rss entry""" url + (''.join(id.stripped_strings))))
doc.update({'description': content.summary}) doc = foidocsdb.get(dochash)
return  
  if doc is None:
class GenericOAICDisclogScraper(GenericDisclogScraper): print "saving " + dochash
__metaclass__ = abc.ABCMeta doc = {'_id': dochash,
@abc.abstractmethod 'agencyID': self.getAgencyID(),
def getColumns(self,columns): 'url': self.getURL(),
""" rearranges columns if required """ 'docID': (''.join(id.stripped_strings))}
return self.getLinks(self.getURL(), row, doc)
def getColumnCount(self): self.getTitle(title, row, doc)
return 5 self.getDate(date, row, doc)
def getDescription(self, content, entry, doc): self.getDescription(description, row, doc)
""" get description from rss entry""" if notes is not None:
descriptiontxt = "" doc.update({ 'notes': (
for string in content.stripped_strings: ''.join(notes.stripped_strings))})
descriptiontxt = descriptiontxt + " \n" + string badtitles = ['-','Summary of FOI Request'
doc.update({'description': descriptiontxt}) , 'FOI request(in summary form)'
return , 'Summary of FOI request received by the ASC',
  'Summary of FOI request received by agency/minister',
def doScrape(self): 'Description of Documents Requested','FOI request',
cal = pdt.Calendar() 'Description of FOI Request','Summary of request','Description','Summary',
foidocsdb = scrape.couch['disclosr-foidocuments'] 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) if doc['title'] not in badtitles\
if content != None: and doc['description'] != '':
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": print "saving"
# http://www.crummy.com/software/BeautifulSoup/documentation.html foidocsdb.save(doc)
soup = BeautifulSoup(content) else:
for row in soup.table.find_all('tr'): print "already saved " + dochash
columns = row.find_all('td')  
if len(columns) == self.getColumnCount(): elif len(row.find_all('th')) is self.getColumnCount():
(id, date, description, title, notes) = self.getColumns(columns) print "header row"
print id.string  
hash = scrape.mkhash(url+id.string) else:
links = [] print "ERROR number of columns incorrect"
for atag in row.find_all("a"): print row
if atag.has_key('href'):  
links.append(scrape.fullurl(url,atag['href']))  
doc = foidocsdb.get(hash)  
   
if doc == None:  
print "saving"  
dtresult = cal.parseDateText(date.string)  
if len(dtresult) == 2:  
(dtdate,dtr) = dtresult  
print dtdate  
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])  
else:  
edate = datetime.strptime(date.string, "%d %B %Y").strftime("%Y-%m-%d")  
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string,  
"date": edate,"title": title.string}  
self.getDescription(description,row, doc)  
   
if links != []:  
doc.update({'links': links})  
if notes != None:  
doc.update({ 'notes': notes.string})  
foidocsdb.save(doc)  
else:  
print "already saved "+hash  
   
elif len(row.find_all('th')) == self.getColumnCount():  
print "header row"  
   
else:  
print "ERROR number of columns incorrect"  
print row