Add bootstrap css
[disclosr.git] / documents / genericScrapers.py
blob:a/documents/genericScrapers.py -> blob:b/documents/genericScrapers.py
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
  from bs4 import BeautifulSoup
  import parsedatetime as pdt
  from time import mktime
  from datetime import datetime
  import feedparser
  import abc
  import unicodedata, re
   
from bs4 import BeautifulSoup class GenericDisclogScraper(object):
import abc __metaclass__ = abc.ABCMeta
import dateutil.parser agencyID = None
  disclogURL = None
  def remove_control_chars(self, input):
  return "".join([i for i in input if ord(i) in range(32, 127)])
  def getAgencyID(self):
  """ disclosr agency id """
  if self.agencyID == None:
  self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
  return self.agencyID
   
class GenericOAICDisclogScraper(object): def getURL(self):
__metaclass__ = abc.ABCMeta """ disclog URL"""
  if self.disclogURL == None:
  agency = scrape.agencydb.get(self.getAgencyID())
  self.disclogURL = agency['FOIDocumentsURL']
  return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def getAgencyID(self): def doScrape(self):
""" disclosr agency id """ """ do the scraping """
return return
   
@abc.abstractmethod @abc.abstractmethod
def getURL(self): def getDescription(self, content, entry, doc):
""" disclog URL""" """ get description"""
return return
   
   
   
  class GenericRSSDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
  feed = feedparser.parse(content)
  for entry in feed.entries:
  #print entry
  print entry.id
  hash = scrape.mkhash(entry.id)
  #print hash
  doc = foidocsdb.get(hash)
  #print doc
  if doc == None:
  print "saving"
  edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
  doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
  "date": edate,"title": entry.title}
  self.getDescription(entry,entry, doc)
  foidocsdb.save(doc)
  else:
  print "already saved"
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  doc.update({'description': content.summary})
  return
   
  class GenericOAICDisclogScraper(GenericDisclogScraper):
  __metaclass__ = abc.ABCMeta
@abc.abstractmethod @abc.abstractmethod
def getColumns(self,columns): def getColumns(self,columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
  def getColumnCount(self):
  return 5
  def getDescription(self, content, entry, doc):
  """ get description from rss entry"""
  descriptiontxt = ""
  for string in content.stripped_strings:
  descriptiontxt = descriptiontxt + " \n" + string
  doc.update({'description': descriptiontxt})
  return
  def getTable(self, soup):
  return soup.table
   
def doScrape(self): def doScrape(self):
  cal = pdt.Calendar()
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
for row in soup.table.find_all('tr'): table = self.getTable(soup)
  for row in table.find_all('tr'):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) == 5: if len(columns) == self.getColumnCount():
(id, date, description, title, notes) = self.getColumns(columns) (id, date, description, title, notes) = self.getColumns(columns)
print id.string print id.string
hash = scrape.mkhash(url+id.string) if id.string == None:
  hash = scrape.mkhash(self.remove_control_chars(url+date.string))
  else:
  hash = scrape.mkhash(self.remove_control_chars(url+id.string))
links = [] links = []
for atag in row.find_all("a"): for atag in row.find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(url,atag['href'])) links.append(scrape.fullurl(url,atag['href']))
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
descriptiontxt = ""  
for string in description.stripped_strings:  
descriptiontxt = descriptiontxt + string  
if doc == None: if doc == None:
print "saving" print "saving"
edate = dateutil.parser.parse(date.string).date().strftime("%Y-%m-%d") dtresult = cal.parseDateText(date.string)
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, if len(dtresult) == 2:
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} (dtdate,dtr) = dtresult
  print dtdate
  edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
  else:
  edate = datetime.strptime(date.string.strip(), "%d %B %Y").strftime("%Y-%m-%d")
  doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string,
  "date": edate,"title": title.string}
  if links != []:
  doc.update({'links': links})
  self.getDescription(description,row, doc)
  if notes != None:
  doc.update({ 'notes': notes.string})
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved "+hash
elif len(row.find_all('th')) == 5: elif len(row.find_all('th')) == self.getColumnCount():
print "header row" print "header row"
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row