made generic OAIC format table scraper class
[disclosr.git] / documents / genericScrapers.py
blob:a/documents/genericScrapers.py -> blob:b/documents/genericScrapers.py
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
  from bs4 import BeautifulSoup
  from time import mktime
  import feedparser
  import abc
  import unicodedata, re
  import dateutil
  from dateutil.parser import *
  from datetime import *
   
from bs4 import BeautifulSoup class GenericDisclogScraper(object):
import abc __metaclass__ = abc.ABCMeta
  agencyID = None
  disclogURL = None
  def remove_control_chars(self, input):
  return "".join([i for i in input if ord(i) in range(32, 127)])
  def getAgencyID(self):
  """ disclosr agency id """
  if self.agencyID == None:
  self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
  return self.agencyID
   
class GenericOAICDisclogScraper(object): def getURL(self):
__metaclass__ = abc.ABCMeta """ disclog URL"""
  if self.disclogURL == None:
  agency = scrape.agencydb.get(self.getAgencyID())
  self.disclogURL = agency['FOIDocumentsURL']
  return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def getAgencyID(self): def doScrape(self):
""" disclosr agency id """ """ do the scraping """
return return
   
@abc.abstractmethod @abc.abstractmethod
def getURL(self): def getDescription(self, content, entry, doc):
""" disclog URL""" """ get description"""
return return
   
   
   
  class GenericRSSDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
  feed = feedparser.parse(content)
  for entry in feed.entries:
  #print entry
  print entry.id
  hash = scrape.mkhash(entry.id)
  #print hash
  doc = foidocsdb.get(hash)
  #print doc
  if doc == None:
  print "saving "+ hash
  edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
  doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
  "date": edate,"title": entry.title}
  self.getDescription(entry,entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  doc.update({'description': content.summary})
  return
   
  class GenericOAICDisclogScraper(GenericDisclogScraper):
  __metaclass__ = abc.ABCMeta
@abc.abstractmethod @abc.abstractmethod
def getColumns(self,columns): def getColumns(self,columns):
""" rearranges columns if required """ """ rearranges columns if required """
  return
  def getColumnCount(self):
  return 5
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  descriptiontxt = ""
  for string in content.stripped_strings:
  descriptiontxt = descriptiontxt + " \n" + string
  doc.update({'description': descriptiontxt})
  return
  def getTitle(self, content, entry, doc):
  doc.update({'title': content.string})
  return
  def getTable(self, soup):
  return soup.table
  def getDate(self, content, entry, doc):
  edate = parse(''.join(content.stripped_strings).strip(), dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  doc.update({'date': edate})
  return
  def getLinks(self, content, entry, doc):
  links = []
  for atag in entry.find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(content,atag['href']))
  if links != []:
  doc.update({'links': links})
return return
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
for row in soup.table.find_all('tr'): table = self.getTable(soup)
  for row in table.find_all('tr'):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) == 5: if len(columns) == self.getColumnCount():
(id, date, description, title, notes) = self.getColumns(columns) (id, date, description, title, notes) = self.getColumns(columns)
print id.string print ''.join(id.stripped_strings)
hash = scrape.mkhash(url+id.string) if id.string == None:
links = [] hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
for atag in row.find_all("a"): else:
if atag.has_key('href'): hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
links.append(scrape.fullurl(url,atag['href']))  
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
descriptiontxt = ""  
for string in description.stripped_strings:  
descriptiontxt = descriptiontxt + string  
if doc == None: if doc == None:
print "saving" print "saving " +hash
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string}
"date": date.string, "description": descriptiontxt,"title": title.string,"notes": notes.string} self.getLinks(self.getURL(),row,doc)
  self.getTitle(title,row, doc)
  self.getDate(date,row, doc)
  self.getDescription(description,row, doc)
  if notes != None:
  doc.update({ 'notes': notes.string})
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved "+hash
elif len(row.find_all('th')) == 5: elif len(row.find_all('th')) == self.getColumnCount():
print "header row" print "header row"
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row