Add bootstrap css
[disclosr.git] / documents / genericScrapers.py
blob:a/documents/genericScrapers.py -> blob:b/documents/genericScrapers.py
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
   
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
  from time import mktime
  import feedparser
import abc import abc
import dateutil.parser import unicodedata, re
  import dateutil
class GenericOAICDisclogScraper(object): from dateutil.parser import *
__metaclass__ = abc.ABCMeta from datetime import *
  import codecs
   
  from StringIO import StringIO
   
  from docx import *
  from lxml import etree
  import zipfile
   
  from pdfminer.pdfparser import PDFDocument, PDFParser
  from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
  from pdfminer.pdfdevice import PDFDevice, TagExtractor
  from pdfminer.converter import TextConverter
  from pdfminer.cmapdb import CMapDB
  from pdfminer.layout import LAParams
   
  class GenericDisclogScraper(object):
  __metaclass__ = abc.ABCMeta
  agencyID = None
  disclogURL = None
  def remove_control_chars(self, input):
  return "".join([i for i in input if ord(i) in range(32, 127)])
  def getAgencyID(self):
  """ disclosr agency id """
  if self.agencyID == None:
  self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
  return self.agencyID
   
  def getURL(self):
  """ disclog URL"""
  if self.disclogURL == None:
  agency = scrape.agencydb.get(self.getAgencyID())
  self.disclogURL = agency['FOIDocumentsURL']
  return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def getAgencyID(self): def doScrape(self):
""" disclosr agency id """ """ do the scraping """
return return
   
@abc.abstractmethod class GenericPDFDisclogScraper(GenericDisclogScraper):
def getURL(self):  
""" disclog URL""" def doScrape(self):
return foidocsdb = scrape.couch['disclosr-foidocuments']
  (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
   
  laparams = LAParams()
   
  rsrcmgr = PDFResourceManager(caching=True)
   
  outfp = StringIO.StringIO()
   
  device = TextConverter(rsrcmgr, outfp, codec='utf-8', laparams=laparams)
   
   
  fp = StringIO.StringIO()
  fp.write(content)
  description = output.getvalue();
  process_pdf(rsrcmgr, device, fp, set(), caching=True, check_extractable=True)
  fp.close()
  device.close()
  outfp.close()
   
  hash = scrape.mkhash(description)
  #print hash
  doc = foidocsdb.get(hash)
  #print doc
  if doc == None:
  print "saving "+ hash
  edate = datetime.fromtimestamp(mktime( )).strftime("%Y-%m-%d")
  doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': hash,
  "date": edate,"title": "Disclosure Log Updated"}
  self.getDescription(entry,entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  class GenericDOCXDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
   
  mydoc = zipfile.ZipFile(file)
  xmlcontent = mydoc.read('word/document.xml')
  document = etree.fromstring(xmlcontent)
   
  ## Fetch all the text out of the document we just created
  paratextlist = getdocumenttext(document)
   
  # Make explicit unicode version
  newparatextlist = []
  for paratext in paratextlist:
  newparatextlist.append(paratext.encode("utf-8"))
   
  ## Print our documnts test with two newlines under each paragraph
  description = '\n\n'.join(newparatextlist)
   
  hash = scrape.mkhash(description)
  #print hash
  doc = foidocsdb.get(hash)
  #print doc
  if doc == None:
  print "saving "+ hash
  edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
  doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': hash,
  "date": edate,"title": "Disclosure Log Updated"}
  self.getDescription(entry,entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  class GenericRSSDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
  feed = feedparser.parse(content)
  for entry in feed.entries:
  #print entry
  print entry.id
  hash = scrape.mkhash(entry.id)
  #print hash
  doc = foidocsdb.get(hash)
  #print doc
  if doc == None:
  print "saving "+ hash
  edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
  doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
  "date": edate,"title": entry.title}
  self.getDescription(entry,entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  doc.update({'description': content.summary})
  return
   
  class GenericOAICDisclogScraper(GenericDisclogScraper):
  __metaclass__ = abc.ABCMeta
@abc.abstractmethod @abc.abstractmethod
def getColumns(self,columns): def getColumns(self,columns):
""" rearranges columns if required """ """ rearranges columns if required """
  return
  def getColumnCount(self):
  return 5
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  descriptiontxt = ""
  for string in content.stripped_strings:
  descriptiontxt = descriptiontxt + " \n" + string
  doc.update({'description': descriptiontxt})
  return
  def getTitle(self, content, entry, doc):
  doc.update({'title': (''.join(content.stripped_strings))})
  return
  def getTable(self, soup):
  return soup.table
  def getRows(self, table):
  return table.find_all('tr')
  def getDate(self, content, entry, doc):
  date = ''.join(content.stripped_strings).strip()
  (a,b,c) = date.partition("(")
  date = self.remove_control_chars(a.replace("Octber","October"))
  print date
  edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  doc.update({'date': edate})
  return
  def getLinks(self, content, entry, doc):
  links = []
  for atag in entry.find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(content,atag['href']))
  if links != []:
  doc.update({'links': links})
return return
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
for row in soup.table.find_all('tr'): table = self.getTable(soup)
  for row in self.getRows(table):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) == 5: if len(columns) == self.getColumnCount():
(id, date, description, title, notes) = self.getColumns(columns) (id, date, title, description, notes) = self.getColumns(columns)
print id.string print self.remove_control_chars(''.join(id.stripped_strings))
hash = scrape.mkhash(url+id.string) if id.string == None:
links = [] hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
for atag in row.find_all("a"): else:
if atag.has_key('href'): hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
links.append(scrape.fullurl(url,atag['href']))  
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
descriptiontxt = ""  
for string in description.stripped_strings:  
descriptiontxt = descriptiontxt + string  
if doc == None: if doc == None:
print "saving" print "saving " +hash
edate = dateutil.parser.parse(date.string).date().strftime("%Y-%m-%d") doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))}
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, self.getLinks(self.getURL(),row,doc)
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} self.getTitle(title,row, doc)
foidocsdb.save(doc) self.getDate(date,row, doc)
  self.getDescription(description,row, doc)
  if notes != None:
  doc.update({ 'notes': (''.join(notes.stripped_strings))})
  badtitles = ['-','Summary of FOI Request','FOI request(in summary form)','Summary of FOI request received by the ASC',
  'Summary of FOI request received by agency/minister','Description of Documents Requested','FOI request','Description of FOI Request','Summary of request','Description','Summary',
  'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67']
  if doc['title'] not in badtitles and doc['description'] != '':
  print "saving"
  foidocsdb.save(doc)
else: else:
print "already saved" print "already saved "+hash
elif len(row.find_all('th')) == 5: elif len(row.find_all('th')) == self.getColumnCount():
print "header row" print "header row"
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row