made generic OAIC format table scraper class
[disclosr.git] / documents / scrapers / 3cd40b1240e987cbcd3f0e67054ce259.py
blob:a/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py -> blob:b/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import genericScrapers
foidocsdb = scrape.couch['disclosr-foidocuments'] #RSS feed not detailed
   
#RSS feed not detailed #http://www.doughellmann.com/PyMOTW/abc/
from bs4 import BeautifulSoup class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
#http://www.apvma.gov.au/about/foi/disclosure/index.php def getAgencyID(self):
agencyID = "3cd40b1240e987cbcd3f0e67054ce259" return "3cd40b1240e987cbcd3f0e67054ce259"
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, "http://www.apvma.gov.au/about/foi/disclosure/index.php", "foidocuments", agencyID)  
if content != None:  
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":  
# http://www.crummy.com/software/BeautifulSoup/documentation.html  
soup = BeautifulSoup(content)  
for row in soup.table.find_all('tr'):  
columns = row.find_all('td')  
if len(columns) == 5:  
(id, date, description, title, notes) = columns  
print id.string  
hash = scrape.mkhash(url+id.string)  
links = []  
for atag in row.find_all("a"):  
if atag.has_key('href'):  
links.append(scrape.fullurl(url,atag['href']))  
doc = foidocsdb.get(hash)  
descriptiontxt = ""  
for string in description.stripped_strings:  
descriptiontxt = descriptiontxt + string  
if doc == None:  
print "saving"  
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, "links": links, 'docID': id.string, "date": date.string, "description": descriptiontxt,"title": title.string,"notes": notes.string}  
foidocsdb.save(doc)  
else:  
print "already saved"  
elif len(row.find_all('th')) == 5:  
print "header row"  
else:  
print "ERROR number of columns incorrect"  
print row  
   
  def getURL(self):
  return "http://www.apvma.gov.au/about/foi/disclosure/index.php"
   
  def getColumns(self,columns):
  (id, date, description, title, notes) = columns
  return (id, date, description, title, notes)
   
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()