more scrapers
more scrapers


Former-commit-id: 45b01acb63b33a260852f5090a74575c926822bc

  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import dateutil
  from dateutil.parser import *
  from datetime import *
 
 
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
  def getDate(self, content, entry, doc):
  date = ''.join(entry.find('th').stripped_strings).strip()
  (a, b, c) = date.partition("(")
  date = self.remove_control_chars(a.replace("Octber", "October"))
  print date
  edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  doc.update({'date': edate})
  return
  def getColumnCount(self):
  return 4
 
  def getTable(self, soup):
  return soup.find(summary="List of Defence documents released under Freedom of Information requets")
 
  def getColumns(self, columns):
  (id, description, access, notes) = columns
  return (id, None, description, description, notes)
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
 
  nsi = ScraperImplementation()
  nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201213.cfm"
  nsi.doScrape()
 
  nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201112.cfm"
  nsi.doScrape()
 
  nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201011.cfm"
  nsi.doScrape()
 
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import dateutil
  from dateutil.parser import *
  from datetime import *
  import scrape
  from bs4 import BeautifulSoup
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
  def getDescription(self,content, entry,doc):
  link = None
  links = []
  description = ""
  for atag in entry.find_all('a'):
  if atag.has_key('href'):
  link = scrape.fullurl(self.getURL(), atag['href'])
  (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
  if htcontent != None:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
  soup = BeautifulSoup(htcontent)
  row = soup.find(id="content_div_148050")
  description = ''.join(row.stripped_strings)
  for atag in row.find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(link, atag['href']))
 
  if links != []:
  doc.update({'links': links})
  if description != "":
  doc.update({ 'description': description})
  def getColumnCount(self):
  return 4
 
  def getColumns(self, columns):
  (id, date, datepub, title) = columns
  return (id, date, title, title, None)
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
 
  nsi = ScraperImplementation()
  nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1"
  nsi.doScrape()
  nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2"
  nsi.doScrape()
  nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3"
  nsi.doScrape()
  nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4"
  nsi.doScrape()
  nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5"
  nsi.doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import dateutil
  from dateutil.parser import *
  from datetime import *
 
 
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
  def getColumnCount(self):
  return 6
 
  def getColumns(self, columns):
  (id, date, title, description, datepub, notes) = columns
  return (id, date, title, description, notes)
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
 
  nsi = ScraperImplementation()
  nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/pmo/2011-12.cfm"
  nsi.doScrape()
  nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2011-12.cfm"
  nsi.doScrape()
  nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2012-13.cfm"
  nsi.doScrape()
  nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omsi/2011-12.cfm"
  nsi.doScrape()
  nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omps/2012-13.cfm"
  nsi.doScrape()
 
multiple pages  
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import codecs import codecs
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getDescription(self,content, entry,doc): def getDescription(self,content, entry,doc):
link = None link = None
links = [] links = []
description = "" description = ""
for atag in entry.find_all('a'): for atag in entry.find_all('a'):
if atag.has_key('href'): if atag.has_key('href'):
link = scrape.fullurl(self.getURL(),atag['href']) link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None: if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent) soup = BeautifulSoup(htcontent)
for text in soup.find(id="divFullWidthColumn").stripped_strings: for text in soup.find(id="divFullWidthColumn").stripped_strings:
description = description + text.encode('ascii', 'ignore') description = description + text.encode('ascii', 'ignore')
   
for atag in soup.find(id="divFullWidthColumn").find_all("a"): for atag in soup.find(id="divFullWidthColumn").find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href'])) links.append(scrape.fullurl(link,atag['href']))
   
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
if description != "": if description != "":
doc.update({ 'description': description}) doc.update({ 'description': description})
   
def getColumnCount(self): def getColumnCount(self):
return 2 return 2
def getTable(self,soup): def getTable(self,soup):
return soup.find(id = "TwoColumnSorting") return soup.find(id = "TwoColumnSorting")
def getColumns(self,columns): def getColumns(self,columns):
( title, date) = columns ( title, date) = columns
return (title, date, title, title, None) return (title, date, title, title, None)
class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getDescription(self,content, entry,doc): def getDescription(self,content, entry,doc):
link = None link = None
links = [] links = []
description = "" description = ""
for atag in entry.find_all('a'): for atag in entry.find_all('a'):
if atag.has_key('href'): if atag.has_key('href'):
link = scrape.fullurl(self.getURL(),atag['href']) link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None: if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent) soup = BeautifulSoup(htcontent)
for text in soup.find(id="content-item").stripped_strings: for text in soup.find(id="content-item").stripped_strings:
description = description + text + " \n" description = description + text + " \n"
for atag in soup.find(id="content-item").find_all("a"): for atag in soup.find(id="content-item").find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href'])) links.append(scrape.fullurl(link,atag['href']))
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
if description != "": if description != "":
doc.update({ 'description': description}) doc.update({ 'description': description})
   
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
if description != "": if description != "":
doc.update({ 'description': description}) doc.update({ 'description': description})
   
def getColumnCount(self): def getColumnCount(self):
return 2 return 2
def getTable(self,soup): def getTable(self,soup):
return soup.find(class_ = "doc-list") return soup.find(class_ = "doc-list")
def getColumns(self,columns): def getColumns(self,columns):
(date, title) = columns (date, title) = columns
return (title, date, title, title, None) return (title, date, title, title, None)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
#NewScraperImplementation().doScrape() NewScraperImplementation().doScrape()
print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
osi = OldScraperImplementation() osi = OldScraperImplementation()
osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI"
osi.doScrape() osi.doScrape()
# old site too  
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import dateutil
  from dateutil.parser import *
  from datetime import *
 
 
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
  def getColumnCount(self):
  return 2
 
  def getColumns(self, columns):
  (date, title) = columns
  return (title, date, title, title, None)
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
 
  nsi = ScraperImplementation()
  nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2012.htm"
  nsi.doScrape()
  nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2011.htm"
  nsi.doScrape()
  nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2010.htm"
  nsi.doScrape()
  nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2009.htm"
  nsi.doScrape()
 
multipage immi