more scrapers
[disclosr.git] / documents / scrapers / 8e874a2fde8aa0ccdc6d14573d766540.py
blob:a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py -> blob:b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
  import codecs
  #http://www.doughellmann.com/PyMOTW/abc/
  class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getDescription(self,content, entry,doc):
  link = None
  links = []
  description = ""
  for atag in entry.find_all('a'):
  if atag.has_key('href'):
  link = scrape.fullurl(self.getURL(),atag['href'])
  (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
  if htcontent != None:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  soup = BeautifulSoup(htcontent)
  for text in soup.find(id="divFullWidthColumn").stripped_strings:
  description = description + text.encode('ascii', 'ignore')
   
  for atag in soup.find(id="divFullWidthColumn").find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(link,atag['href']))
   
  if links != []:
  doc.update({'links': links})
  if description != "":
  doc.update({ 'description': description})
   
  def getColumnCount(self):
  return 2
  def getTable(self,soup):
  return soup.find(id = "TwoColumnSorting")
  def getColumns(self,columns):
  ( title, date) = columns
  return (title, date, title, title, None)
  class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getDescription(self,content, entry,doc):
  link = None
  links = []
  description = ""
  for atag in entry.find_all('a'):
  if atag.has_key('href'):
  link = scrape.fullurl(self.getURL(),atag['href'])
  (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
  if htcontent != None:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  soup = BeautifulSoup(htcontent)
  for text in soup.find(id="content-item").stripped_strings:
  description = description + text + " \n"
  for atag in soup.find(id="content-item").find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(link,atag['href']))
  if links != []:
  doc.update({'links': links})
  if description != "":
  doc.update({ 'description': description})
   
  if links != []:
  doc.update({'links': links})
  if description != "":
  doc.update({ 'description': description})
   
  def getColumnCount(self):
  return 2
  def getTable(self,soup):
  return soup.find(class_ = "doc-list")
  def getColumns(self,columns):
  (date, title) = columns
  return (title, date, title, title, None)
   
  if __name__ == '__main__':
  print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  #NewScraperImplementation().doScrape()
  print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  osi = OldScraperImplementation()
  osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI"
  osi.doScrape()
  # old site too