beginning deewr scraper
[disclosr.git] / documents / genericScrapers.py
blob:a/documents/genericScrapers.py -> blob:b/documents/genericScrapers.py
import sys,os import sys
  import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import parsedatetime as pdt  
from time import mktime from time import mktime
from datetime import datetime  
import feedparser import feedparser
import abc import abc
  import unicodedata
class GenericRSSDisclogScraper(object): import re
__metaclass__ = abc.ABCMeta import dateutil
@abc.abstractmethod from dateutil.parser import *
def getAgencyID(self): from datetime import *
""" disclosr agency id """ import codecs
return  
  from StringIO import StringIO
@abc.abstractmethod  
def getURL(self): from docx import *
""" disclog URL""" from lxml import etree
return import zipfile
   
def getDescription(self, entry, doc): from pdfminer.pdfparser import PDFDocument, PDFParser
""" get description from rss entry""" from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
doc['description'] = entry.summary from pdfminer.pdfdevice import PDFDevice, TagExtractor
return from pdfminer.converter import TextConverter
  from pdfminer.cmapdb import CMapDB
def doScrape(self): from pdfminer.layout import LAParams
foidocsdb = scrape.couch['disclosr-foidocuments']  
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())  
feed = feedparser.parse(content) class GenericDisclogScraper(object):
for entry in feed.entries: __metaclass__ = abc.ABCMeta
#print entry agencyID = None
print entry.id disclogURL = None
hash = scrape.mkhash(entry.id)  
#print hash def remove_control_chars(self, input):
doc = foidocsdb.get(hash) return "".join([i for i in input if ord(i) in range(32, 127)])
#print doc  
if doc == None: def getAgencyID(self):
print "saving" """ disclosr agency id """
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") if self.agencyID is None:
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
"date": edate,"title": entry.title} return self.agencyID
self.getDescription(entry, doc)  
foidocsdb.save(doc) def getURL(self):
  """ disclog URL"""
  if self.disclogURL is None:
  agency = scrape.agencydb.get(self.getAgencyID())
  self.disclogURL = agency['FOIDocumentsURL']
  return self.disclogURL
   
  @abc.abstractmethod
  def doScrape(self):
  """ do the scraping """
  return
   
   
  class GenericPDFDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  laparams = LAParams()
  rsrcmgr = PDFResourceManager(caching=True)
  outfp = StringIO.StringIO()
  device = TextConverter(rsrcmgr, outfp, codec='utf-8',
  laparams=laparams)
  fp = StringIO.StringIO()
  fp.write(content)
  description = output.getvalue()
  process_pdf(rsrcmgr, device, fp, set(), caching=True,
  check_extractable=True)
  fp.close()
  device.close()
  outfp.close()
  dochash = scrape.mkhash(description)
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated"}
  self.getDescription(entry, entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  class GenericDOCXDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
  , self.getURL(), "foidocuments", self.getAgencyID())
  mydoc = zipfile.ZipFile(file)
  xmlcontent = mydoc.read('word/document.xml')
  document = etree.fromstring(xmlcontent)
  ## Fetch all the text out of the document we just created
  paratextlist = getdocumenttext(document)
  # Make explicit unicode version
  newparatextlist = []
  for paratext in paratextlist:
  newparatextlist.append(paratext.encode("utf-8"))
  ## Print our documnts test with two newlines under each paragraph
  description = '\n\n'.join(newparatextlist)
  dochash = scrape.mkhash(description)
  doc = foidocsdb.get(dochash)
   
  if doc is None:
  print "saving " + dochash
  edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated"}
  self.getDescription(entry, entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  class GenericRSSDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  feed = feedparser.parse(content)
  for entry in feed.entries:
  #print entry
  print entry.id
  dochash = scrape.mkhash(entry.id)
  doc = foidocsdb.get(dochash)
  #print doc
  if doc is None:
  print "saving " + dochash
  edate = datetime.fromtimestamp(
  mktime(entry.published_parsed)).strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
  'url': entry.link, 'docID': entry.id,
  "date": edate, "title": entry.title}
  self.getDescription(entry, entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
   
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  doc.update({'description': content.summary})
  return
   
   
  class GenericOAICDisclogScraper(GenericDisclogScraper):
  __metaclass__ = abc.ABCMeta
   
  @abc.abstractmethod
  def getColumns(self, columns):
  """ rearranges columns if required """
  return
   
  def getColumnCount(self):
  return 5
   
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  descriptiontxt = ""
  for string in content.stripped_strings:
  descriptiontxt = descriptiontxt + " \n" + string
  doc.update({'description': descriptiontxt})
   
  def getTitle(self, content, entry, doc):
  doc.update({'title': (''.join(content.stripped_strings))})
   
  def getTable(self, soup):
  return soup.table
   
  def getRows(self, table):
  return table.find_all('tr')
   
  def getDate(self, content, entry, doc):
  date = ''.join(content.stripped_strings).strip()
  (a, b, c) = date.partition("(")
  date = self.remove_control_chars(a.replace("Octber", "October"))
  print date
  edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  doc.update({'date': edate})
  return
   
  def getLinks(self, content, entry, doc):
  links = []
  for atag in entry.find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(content, atag['href']))
  if links != []:
  doc.update({'links': links})
  return
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  if content is not None:
  if mime_type is "text/html"\
  or mime_type is "application/xhtml+xml"\
  or mime_type is"application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  soup = BeautifulSoup(content)
  table = self.getTable(soup)
  for row in self.getRows(table):
  columns = row.find_all('td')
  if len(columns) is self.getColumnCount():
  (id, date, title,
  description, notes) = self.getColumns(columns)
  print self.remove_control_chars(
  ''.join(id.stripped_strings))
  if id.string is None:
  dochash = scrape.mkhash(
  self.remove_control_chars(
  url + (''.join(date.stripped_strings))))
else: else:
print "already saved" dochash = scrape.mkhash(
  self.remove_control_chars(
class GenericOAICDisclogScraper(object): url + (''.join(id.stripped_strings))))
__metaclass__ = abc.ABCMeta doc = foidocsdb.get(hash)
@abc.abstractmethod  
def getAgencyID(self): if doc is None:
""" disclosr agency id """ print "saving " + hash
return doc = {'_id': hash,
  'agencyID': self.getAgencyID(),
@abc.abstractmethod 'url': self.getURL(),
def getURL(self): 'docID': (''.join(id.stripped_strings))}
""" disclog URL""" self.getLinks(self.getURL(), row, doc)
return self.getTitle(title, row, doc)
  self.getDate(date, row, doc)
@abc.abstractmethod self.getDescription(description, row, doc)
def getColumns(self,columns): if notes is not None:
""" rearranges columns if required """ doc.update({ 'notes': (
return ''.join(notes.stripped_strings))})
  badtitles = ['-','Summary of FOI Request'
def doScrape(self): , 'FOI request(in summary form)'
cal = pdt.Calendar() , 'Summary of FOI request received by the ASC',
foidocsdb = scrape.couch['disclosr-foidocuments'] 'Summary of FOI request received by agency/minister',
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) 'Description of Documents Requested','FOI request',
if content != None: 'Description of FOI Request','Summary of request','Description','Summary',
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67']
# http://www.crummy.com/software/BeautifulSoup/documentation.html if doc['title'] not in badtitles\
soup = BeautifulSoup(content) and doc['description'] != '':
for row in soup.table.find_all('tr'): print "saving"
columns = row.find_all('td') foidocsdb.save(doc)
if len(columns) == 5: else:
(id, date, description, title, notes) = self.getColumns(columns) print "already saved " + dochash
print id.string  
hash = scrape.mkhash(url+id.string) elif len(row.find_all('th')) is self.getColumnCount():
links = [] print "header row"
for atag in row.find_all("a"):  
if atag.has_key('href'): else:
links.append(scrape.fullurl(url,atag['href'])) print "ERROR number of columns incorrect"
doc = foidocsdb.get(hash) print row
descriptiontxt = ""  
for string in description.stripped_strings:  
descriptiontxt = descriptiontxt + " \n" + string  
   
if doc == None:  
print "saving"  
dtresult = cal.parseDateText(date.string)  
if len(dtresult) == 2:  
(dtdate,dtr) = dtresult  
print dtdate  
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])  
else:  
edate = ""  
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,  
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}  
foidocsdb.save(doc)  
else:  
print "already saved"  
   
elif len(row.find_all('th')) == 5:  
print "header row"  
   
else:  
print "ERROR number of columns incorrect"  
print row