beginnings rss scraper
[disclosr.git] / documents / genericScrapers.py
blob:a/documents/genericScrapers.py -> blob:b/documents/genericScrapers.py
import sys,os import sys
  import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import parsedatetime as pdt  
from time import mktime from time import mktime
from datetime import datetime  
import feedparser import feedparser
import abc import abc
  import unicodedata
class GenericRSSDisclogScraper(object): import re
__metaclass__ = abc.ABCMeta import dateutil
@abc.abstractmethod from dateutil.parser import *
def getAgencyID(self): from datetime import *
""" disclosr agency id """ import codecs
return  
  import difflib
@abc.abstractmethod  
def getURL(self): from StringIO import StringIO
""" disclog URL"""  
return from pdfminer.pdfparser import PDFDocument, PDFParser
def doScrape(self): from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
foidocsdb = scrape.couch['disclosr-foidocuments'] from pdfminer.pdfdevice import PDFDevice, TagExtractor
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) from pdfminer.converter import TextConverter
feed = feedparser.parse(content) from pdfminer.cmapdb import CMapDB
for entry in feed.entries: from pdfminer.layout import LAParams
#print entry  
print entry.id  
hash = scrape.mkhash(entry.link) class GenericDisclogScraper(object):
doc = foidocsdb.get(hash) __metaclass__ = abc.ABCMeta
if doc == None: agencyID = None
print "saving" disclogURL = None
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")  
doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, def remove_control_chars(self, input):
"date": edate, "description": entry.summary,"title": entry.title} return "".join([i for i in input if ord(i) in range(32, 127)])
foidocsdb.save(doc)  
  def getAgencyID(self):
  """ disclosr agency id """
  if self.agencyID is None:
  self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
  return self.agencyID
   
  def getURL(self):
  """ disclog URL"""
  if self.disclogURL is None:
  agency = scrape.agencydb.get(self.getAgencyID())
  self.disclogURL = agency['FOIDocumentsURL']
  return self.disclogURL
   
  @abc.abstractmethod
  def doScrape(self):
  """ do the scraping """
  return
   
  class GenericHTMLDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  content = rcontent.read()
  dochash = scrape.mkhash(content)
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
  last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
  if last_attach != None:
  html_diff = difflib.HtmlDiff()
  description = description + "\nChanges: "
  description = description + html_diff.make_table(last_attach.read().split('\n'),
  content.split('\n'))
  edate = date.today().strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated", "description": description}
  foidocsdb.save(doc)
  else:
  print "already saved"
   
  class GenericPDFDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  laparams = LAParams()
  rsrcmgr = PDFResourceManager(caching=True)
  outfp = StringIO()
  device = TextConverter(rsrcmgr, outfp, codec='utf-8',
  laparams=laparams)
  fp = StringIO()
  fp.write(content.read())
   
  process_pdf(rsrcmgr, device, fp, set(), caching=True,
  check_extractable=True)
  description = outfp.getvalue()
  fp.close()
  device.close()
  outfp.close()
  dochash = scrape.mkhash(description)
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  edate = date.today().strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated", "description": description}
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  class GenericDOCXDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
  , self.getURL(), "foidocuments", self.getAgencyID())
  mydoc = zipfile.ZipFile(file)
  xmlcontent = mydoc.read('word/document.xml')
  document = etree.fromstring(xmlcontent)
  ## Fetch all the text out of the document we just created
  paratextlist = getdocumenttext(document)
  # Make explicit unicode version
  newparatextlist = []
  for paratext in paratextlist:
  newparatextlist.append(paratext.encode("utf-8"))
  ## Print our documnts test with two newlines under each paragraph
  description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
  dochash = scrape.mkhash(description)
  doc = foidocsdb.get(dochash)
   
  if doc is None:
  print "saving " + dochash
  edate = time().strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated", "description": description}
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  class GenericRSSDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  feed = feedparser.parse(content)
  for entry in feed.entries:
  #print entry
  print entry.id
  dochash = scrape.mkhash(entry.id)
  doc = foidocsdb.get(dochash)
  #print doc
  if doc is None:
  print "saving " + dochash
  edate = datetime.fromtimestamp(
  mktime(entry.published_parsed)).strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
  'url': entry.link, 'docID': entry.id,
  "date": edate, "title": entry.title}
  self.getDescription(entry, entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  doc.update({'description': content.summary})
  return
   
   
  class GenericOAICDisclogScraper(GenericDisclogScraper):
  __metaclass__ = abc.ABCMeta
   
  @abc.abstractmethod
  def getColumns(self, columns):
  """ rearranges columns if required """
  return
   
  def getColumnCount(self):
  return 5
   
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  descriptiontxt = ""
  for string in content.stripped_strings:
  descriptiontxt = descriptiontxt + " \n" + string
  doc.update({'description': descriptiontxt})
   
  def getTitle(self, content, entry, doc):
  doc.update({'title': (''.join(content.stripped_strings))})
   
  def getTable(self, soup):
  return soup.table
   
  def getRows(self, table):
  return table.find_all('tr')
   
  def getDate(self, content, entry, doc):
  date = ''.join(content.stripped_strings).strip()
  (a, b, c) = date.partition("(")
  date = self.remove_control_chars(a.replace("Octber", "October"))
  print date
  edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  doc.update({'date': edate})
  return
   
  def getLinks(self, content, entry, doc):
  links = []
  for atag in entry.find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(content, atag['href']))
  if links != []:
  doc.update({'links': links})
  return
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  if content is not None:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  print "parsing"
  soup = BeautifulSoup(content)
  table = self.getTable(soup)
  for row in self.getRows(table):
  columns = row.find_all('td')
  if len(columns) is self.getColumnCount():
  (id, date, title,
  description, notes) = self.getColumns(columns)
  print self.remove_control_chars(
  ''.join(id.stripped_strings))
  if id.string is None:
  dochash = scrape.mkhash(
  self.remove_control_chars(
  url + (''.join(date.stripped_strings))))
else: else:
print "already saved" dochash = scrape.mkhash(
  self.remove_control_chars(
class GenericOAICDisclogScraper(object): url + (''.join(id.stripped_strings))))
__metaclass__ = abc.ABCMeta doc = foidocsdb.get(dochash)
@abc.abstractmethod  
def getAgencyID(self): if doc is None:
""" disclosr agency id """ print "saving " + dochash
return doc = {'_id': dochash,
  'agencyID': self.getAgencyID(),
@abc.abstractmethod 'url': self.getURL(),
def getURL(self): 'docID': (''.join(id.stripped_strings))}
""" disclog URL""" self.getLinks(self.getURL(), row, doc)
return self.getTitle(title, row, doc)
  self.getDate(date, row, doc)
@abc.abstractmethod self.getDescription(description, row, doc)
def getColumns(self,columns): if notes is not None:
""" rearranges columns if required """ doc.update({ 'notes': (
return ''.join(notes.stripped_strings))})
  badtitles = ['-','Summary of FOI Request'
def doScrape(self): , 'FOI request(in summary form)'
cal = pdt.Calendar() , 'Summary of FOI request received by the ASC',
foidocsdb = scrape.couch['disclosr-foidocuments'] 'Summary of FOI request received by agency/minister',
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) 'Description of Documents Requested','FOI request',
if content != None: 'Description of FOI Request','Summary of request','Description','Summary',
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67']
# http://www.crummy.com/software/BeautifulSoup/documentation.html if doc['title'] not in badtitles\
soup = BeautifulSoup(content) and doc['description'] != '':
for row in soup.table.find_all('tr'): print "saving"
columns = row.find_all('td') foidocsdb.save(doc)
if len(columns) == 5: else:
(id, date, description, title, notes) = self.getColumns(columns) print "already saved " + dochash
print id.string  
hash = scrape.mkhash(url+id.string) elif len(row.find_all('th')) is self.getColumnCount():
links = [] print "header row"
for atag in row.find_all("a"):  
if atag.has_key('href'): else:
links.append(scrape.fullurl(url,atag['href'])) print "ERROR number of columns incorrect"
doc = foidocsdb.get(hash) print row
descriptiontxt = ""  
for string in description.stripped_strings:  
descriptiontxt = descriptiontxt + " \n" + string  
   
if doc == None:  
print "saving"  
dtresult = cal.parseDateText(date.string)  
if len(dtresult) == 2:  
(dtdate,dtr) = dtresult  
print dtdate  
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])  
else:  
edate = ""  
doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,  
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}  
foidocsdb.save(doc)  
else:  
print "already saved"  
   
elif len(row.find_all('th')) == 5:  
print "header row"  
   
else:  
print "ERROR number of columns incorrect"  
print row