beginnings rss scraper
[disclosr.git] / documents / scrapers / be9996f0ac58f71f23d074e82d44ead3.py
blob:a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py -> blob:b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import genericScrapers
foidocsdb = scrape.couch['disclosr-foidocuments'] #RSS feed not detailed
   
import feedparser #http://www.doughellmann.com/PyMOTW/abc/
feed = feedparser.parse( "http://foi.deewr.gov.au/disclosure-log/rss") class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
print feed.entries[0] def getAgencyID(self):
#foreach feed.entries return "be9996f0ac58f71f23d074e82d44ead3"
   
  def getURL(self):
  return "http://foi.deewr.gov.au/disclosure-log/rss"
   
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
  ScraperImplementation().doScrape()