fix agd scraper
[disclosr.git] / documents / genericScrapers.py
blob:a/documents/genericScrapers.py -> blob:b/documents/genericScrapers.py
import sys,os import sys
  import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import parsedatetime as pdt  
from time import mktime from time import mktime
from datetime import datetime  
import feedparser import feedparser
import abc import abc
import unicodedata, re import unicodedata
  import re
  import dateutil
  from dateutil.parser import *
  from datetime import *
  import codecs
   
  from StringIO import StringIO
   
  from docx import *
  from lxml import etree
  import zipfile
   
  from pdfminer.pdfparser import PDFDocument, PDFParser
  from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
  from pdfminer.pdfdevice import PDFDevice, TagExtractor
  from pdfminer.converter import TextConverter
  from pdfminer.cmapdb import CMapDB
  from pdfminer.layout import LAParams
   
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
def remove_control_chars(self, input):  
return "".join([i for i in input if ord(i) in range(32, 127)]) def remove_control_chars(self, input):
def getAgencyID(self): return "".join([i for i in input if ord(i) in range(32, 127)])
""" disclosr agency id """  
if self.agencyID == None: def getAgencyID(self):
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") """ disclosr agency id """
return self.agencyID if self.agencyID is None:
  self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
def getURL(self): return self.agencyID
""" disclog URL"""  
if self.disclogURL == None: def getURL(self):
agency = scrape.agencydb.get(self.getAgencyID()) """ disclog URL"""
self.disclogURL = agency['FOIDocumentsURL'] if self.disclogURL is None:
return self.disclogURL agency = scrape.agencydb.get(self.getAgencyID())
  self.disclogURL = agency['FOIDocumentsURL']
@abc.abstractmethod return self.disclogURL
def doScrape(self):  
""" do the scraping """ @abc.abstractmethod
return def doScrape(self):
  """ do the scraping """
@abc.abstractmethod return
def getDescription(self, content, entry, doc):  
""" get description"""  
return class GenericPDFDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  laparams = LAParams()
  rsrcmgr = PDFResourceManager(caching=True)
  outfp = StringIO.StringIO()
  device = TextConverter(rsrcmgr, outfp, codec='utf-8',
  laparams=laparams)
  fp = StringIO.StringIO()
  fp.write(content)
  description = output.getvalue()
  process_pdf(rsrcmgr, device, fp, set(), caching=True,
  check_extractable=True)
  fp.close()
  device.close()
  outfp.close()
  dochash = scrape.mkhash(description)
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated"}
  self.getDescription(entry, entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  class GenericDOCXDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
  , self.getURL(), "foidocuments", self.getAgencyID())
  mydoc = zipfile.ZipFile(file)
  xmlcontent = mydoc.read('word/document.xml')
  document = etree.fromstring(xmlcontent)
  ## Fetch all the text out of the document we just created
  paratextlist = getdocumenttext(document)
  # Make explicit unicode version
  newparatextlist = []
  for paratext in paratextlist:
  newparatextlist.append(paratext.encode("utf-8"))
  ## Print our documnts test with two newlines under each paragraph
  description = '\n\n'.join(newparatextlist)
  dochash = scrape.mkhash(description)
  doc = foidocsdb.get(dochash)
   
  if doc is None:
  print "saving " + dochash
  edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated"}
  self.getDescription(entry, entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
class GenericRSSDisclogScraper(GenericDisclogScraper): class GenericRSSDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
feed = feedparser.parse(content) self.getURL(), "foidocuments", self.getAgencyID())
for entry in feed.entries: feed = feedparser.parse(content)
#print entry for entry in feed.entries:
print entry.id #print entry
hash = scrape.mkhash(entry.id) print entry.id
#print hash dochash = scrape.mkhash(entry.id)
doc = foidocsdb.get(hash) doc = foidocsdb.get(dochash)
#print doc #print doc
if doc == None: if doc is None:
print "saving" print "saving " + dochash
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") edate = datetime.fromtimestamp(
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, mktime(entry.published_parsed)).strftime("%Y-%m-%d")
"date": edate,"title": entry.title} doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
self.getDescription(entry,entry, doc) 'url': entry.link, 'docID': entry.id,
foidocsdb.save(doc) "date": edate, "title": entry.title}
  self.getDescription(entry, entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  doc.update({'description': content.summary})
  return
   
   
  class GenericOAICDisclogScraper(GenericDisclogScraper):
  __metaclass__ = abc.ABCMeta
   
  @abc.abstractmethod
  def getColumns(self, columns):
  """ rearranges columns if required """
  return
   
  def getColumnCount(self):
  return 5
   
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  descriptiontxt = ""
  for string in content.stripped_strings:
  descriptiontxt = descriptiontxt + " \n" + string
  doc.update({'description': descriptiontxt})
   
  def getTitle(self, content, entry, doc):
  doc.update({'title': (''.join(content.stripped_strings))})
   
  def getTable(self, soup):
  return soup.table
   
  def getRows(self, table):
  return table.find_all('tr')
   
  def getDate(self, content, entry, doc):
  date = ''.join(content.stripped_strings).strip()
  (a, b, c) = date.partition("(")
  date = self.remove_control_chars(a.replace("Octber", "October"))
  print date
  edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  doc.update({'date': edate})
  return
   
  def getLinks(self, content, entry, doc):
  links = []
  for atag in entry.find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(content, atag['href']))
  if links != []:
  doc.update({'links': links})
  return
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  if content is not None:
  if mime_type is "text/html"\
  or mime_type is "application/xhtml+xml"\
  or mime_type is"application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  soup = BeautifulSoup(content)
  table = self.getTable(soup)
  for row in self.getRows(table):
  columns = row.find_all('td')
  if len(columns) is self.getColumnCount():
  (id, date, title,
  description, notes) = self.getColumns(columns)
  print self.remove_control_chars(
  ''.join(id.stripped_strings))
  if id.string is None:
  dochash = scrape.mkhash(
  self.remove_control_chars(
  url + (''.join(date.stripped_strings))))
else: else:
print "already saved" dochash = scrape.mkhash(
def getDescription(self, content, entry, doc): self.remove_control_chars(
""" get description from rss entry""" url + (''.join(id.stripped_strings))))
doc.update({'description': content.summary}) doc = foidocsdb.get(hash)
return  
  if doc is None:
class GenericOAICDisclogScraper(GenericDisclogScraper): print "saving " + hash
__metaclass__ = abc.ABCMeta doc = {'_id': hash,
@abc.abstractmethod 'agencyID': self.getAgencyID(),
def getColumns(self,columns): 'url': self.getURL(),
""" rearranges columns if required """ 'docID': (''.join(id.stripped_strings))}
return self.getLinks(self.getURL(), row, doc)
def getColumnCount(self): self.getTitle(title, row, doc)
return 5 self.getDate(date, row, doc)
def getDescription(self, content, entry, doc): self.getDescription(description, row, doc)
""" get description from rss entry""" if notes is not None:
descriptiontxt = "" doc.update({ 'notes': (
for string in content.stripped_strings: ''.join(notes.stripped_strings))})
descriptiontxt = descriptiontxt + " \n" + string badtitles = ['-','Summary of FOI Request'
doc.update({'description': descriptiontxt}) , 'FOI request(in summary form)'
return , 'Summary of FOI request received by the ASC',
def getTable(self, soup): 'Summary of FOI request received by agency/minister',
return soup.table 'Description of Documents Requested','FOI request',
  'Description of FOI Request','Summary of request','Description','Summary',
def doScrape(self): 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67']
cal = pdt.Calendar() if doc['title'] not in badtitles\
foidocsdb = scrape.couch['disclosr-foidocuments'] and doc['description'] != '':
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) print "saving"
if content != None: foidocsdb.save(doc)
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": else:
# http://www.crummy.com/software/BeautifulSoup/documentation.html print "already saved " + dochash
soup = BeautifulSoup(content)  
table = self.getTable(soup) elif len(row.find_all('th')) is self.getColumnCount():
for row in table.find_all('tr'): print "header row"
columns = row.find_all('td')  
if len(columns) == self.getColumnCount(): else:
(id, date, description, title, notes) = self.getColumns(columns) print "ERROR number of columns incorrect"
print id.string print row
if id.string == None:  
hash = scrape.mkhash(self.remove_control_chars(url+date.string))  
else:  
hash = scrape.mkhash(self.remove_control_chars(url+id.string))  
links = []  
for atag in row.find_all("a"):  
if atag.has_key('href'):  
links.append(scrape.fullurl(url,atag['href']))  
doc = foidocsdb.get(hash)  
   
if doc == None:  
print "saving"  
dtresult = cal.parseDateText(date.string)  
if len(dtresult) == 2:  
(dtdate,dtr) = dtresult  
print dtdate  
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])  
else:  
edate = datetime.strptime(date.string.strip(), "%d %B %Y").strftime("%Y-%m-%d")  
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string,  
"date": edate,"title": title.string}  
if links != []:  
doc.update({'links': links})  
self.getDescription(description,row, doc)  
if notes != None:  
doc.update({ 'notes': notes.string})  
foidocsdb.save(doc)  
else:  
print "already saved "+hash  
   
elif len(row.find_all('th')) == self.getColumnCount():  
print "header row"  
   
else:  
print "ERROR number of columns incorrect"  
print row