beginning deewr scraper
[disclosr.git] / documents / genericScrapers.py
blob:a/documents/genericScrapers.py -> blob:b/documents/genericScrapers.py
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import parsedatetime as pdt  
from time import mktime from time import mktime
from datetime import datetime  
import feedparser import feedparser
import abc import abc
  import unicodedata, re
  import dateutil
  from dateutil.parser import *
  from datetime import *
   
class GenericRSSDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
@abc.abstractmethod agencyID = None
  disclogURL = None
  def remove_control_chars(self, input):
  return "".join([i for i in input if ord(i) in range(32, 127)])
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
return if self.agencyID == None:
  self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
  return self.agencyID
   
@abc.abstractmethod  
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
return if self.disclogURL == None:
  agency = scrape.agencydb.get(self.getAgencyID())
  self.disclogURL = agency['FOIDocumentsURL']
  return self.disclogURL
   
def getDescription(self, entry, doc): @abc.abstractmethod
""" get description from rss entry""" def doScrape(self):
doc['description'] = entry.summary """ do the scraping """
return return
   
  @abc.abstractmethod
  def getDescription(self, content, entry, doc):
  """ get description"""
  return
   
   
   
  class GenericRSSDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
feed = feedparser.parse(content) feed = feedparser.parse(content)
for entry in feed.entries: for entry in feed.entries:
#print entry #print entry
print entry.id print entry.id
hash = scrape.mkhash(entry.id) hash = scrape.mkhash(entry.id)
#print hash #print hash
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
#print doc #print doc
if doc == None: if doc == None:
print "saving" print "saving "+ hash
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
"date": edate,"title": entry.title} "date": edate,"title": entry.title}
self.getDescription(entry, doc) self.getDescription(entry,entry, doc)
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
  def getDescription(self, content, entry, doc):
class GenericOAICDisclogScraper(object): """ get description from rss entry"""
__metaclass__ = abc.ABCMeta doc.update({'description': content.summary})
@abc.abstractmethod  
def getAgencyID(self):  
""" disclosr agency id """  
return return
   
@abc.abstractmethod class GenericOAICDisclogScraper(GenericDisclogScraper):
def getURL(self): __metaclass__ = abc.ABCMeta
""" disclog URL"""  
return  
   
@abc.abstractmethod @abc.abstractmethod
def getColumns(self,columns): def getColumns(self,columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
  def getColumnCount(self):
  return 5
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  descriptiontxt = ""
  for string in content.stripped_strings:
  descriptiontxt = descriptiontxt + " \n" + string
  doc.update({'description': descriptiontxt})
  return
  def getTitle(self, content, entry, doc):
  doc.update({'title': content.string})
  return
  def getTable(self, soup):
  return soup.table
  def getDate(self, content, entry, doc):
  edate = parse(''.join(content.stripped_strings).strip(), dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  doc.update({'date': edate})
  return
  def getLinks(self, content, entry, doc):
  links = []
  for atag in entry.find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(content,atag['href']))
  if links != []:
  doc.update({'links': links})
  return
   
def doScrape(self): def doScrape(self):
cal = pdt.Calendar()  
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
for row in soup.table.find_all('tr'): table = self.getTable(soup)
  for row in table.find_all('tr'):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) == 5: if len(columns) == self.getColumnCount():
(id, date, description, title, notes) = self.getColumns(columns) (id, date, description, title, notes) = self.getColumns(columns)
print id.string print ''.join(id.stripped_strings)
hash = scrape.mkhash(url+id.string) if id.string == None:
links = [] hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
for atag in row.find_all("a"): else:
if atag.has_key('href'): hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
links.append(scrape.fullurl(url,atag['href']))  
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
descriptiontxt = ""  
for string in description.stripped_strings:  
descriptiontxt = descriptiontxt + " \n" + string  
if doc == None: if doc == None:
print "saving" print "saving " +hash
dtresult = cal.parseDateText(date.string) doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string}
if len(dtresult) == 2: self.getLinks(self.getURL(),row,doc)
(dtdate,dtr) = dtresult self.getTitle(title,row, doc)
print dtdate self.getDate(date,row, doc)
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) self.getDescription(description,row, doc)
else: if notes != None:
edate = "" doc.update({ 'notes': notes.string})
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,  
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}  
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved "+hash
elif len(row.find_all('th')) == 5: elif len(row.find_all('th')) == self.getColumnCount():
print "header row" print "header row"
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row