more scrapers
more scrapers


Former-commit-id: 45b01acb63b33a260852f5090a74575c926822bc

--- /dev/null
+++ b/documents/scrapers/00a294de663db69062ca09aede7c0487.py
@@ -1,1 +1,47 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
 
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+    def getDate(self, content, entry, doc):
+        date = ''.join(entry.find('th').stripped_strings).strip()
+        (a, b, c) = date.partition("(")
+        date = self.remove_control_chars(a.replace("Octber", "October"))
+        print date
+        edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+        print edate
+        doc.update({'date': edate})
+        return
+    def getColumnCount(self):
+        return 4
+
+    def getTable(self, soup):
+        return soup.find(summary="List of Defence documents released under Freedom of Information requets")
+
+    def getColumns(self, columns):
+        (id, description, access, notes) = columns
+        return (id, None, description, description, notes)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201213.cfm"
+    nsi.doScrape()
+
+    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201112.cfm"
+    nsi.doScrape()
+
+    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201011.cfm"
+    nsi.doScrape()
+
+

--- a/documents/scrapers/00a294de663db69062ca09aede7c0487.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage
 

--- a/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-ACMA style
 

--- /dev/null
+++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py
@@ -1,1 +1,58 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import scrape
+from bs4 import BeautifulSoup
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getDescription(self,content, entry,doc):
+        link = None
+        links = []
+        description = ""
+        for atag in entry.find_all('a'):
+            if atag.has_key('href'):
+                link = scrape.fullurl(self.getURL(), atag['href'])
+                (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+                if htcontent != None:
+                    if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                        soup = BeautifulSoup(htcontent)
+                        row  = soup.find(id="content_div_148050")
+                        description = ''.join(row.stripped_strings)
+                        for atag in row.find_all("a"):
+                                    if atag.has_key('href'):
+                                        links.append(scrape.fullurl(link, atag['href']))
+
+        if links != []:
+                     doc.update({'links': links})
+        if description != "":
+            doc.update({ 'description': description})
+    def getColumnCount(self):
+        return 4
+
+    def getColumns(self, columns):
+        (id, date, datepub, title) = columns
+        return (id, date, title, title, None)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5"
+    nsi.doScrape()
+

--- a/documents/scrapers/1803322b27286950cab0c543168b5f21.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage log
 

--- /dev/null
+++ b/documents/scrapers/8796220032faf94501bd366763263685.py
@@ -1,1 +1,37 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
 
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getColumnCount(self):
+        return 6
+
+    def getColumns(self, columns):
+        (id, date, title, description, datepub, notes) = columns
+        return (id, date, title, description, notes)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/pmo/2011-12.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2011-12.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2012-13.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omsi/2011-12.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omps/2012-13.cfm"
+    nsi.doScrape()
+

--- a/documents/scrapers/8796220032faf94501bd366763263685.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multiple pages
 

--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -3,7 +3,7 @@
 import genericScrapers
 import scrape
 from bs4 import BeautifulSoup
-import codecs 
+import codecs
 #http://www.doughellmann.com/PyMOTW/abc/
 class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
         def getDescription(self,content, entry,doc):
@@ -20,7 +20,7 @@
                                                 soup = BeautifulSoup(htcontent)
                                                 for text in soup.find(id="divFullWidthColumn").stripped_strings:
                                                     description = description + text.encode('ascii', 'ignore')
-                                                
+
                                                 for atag in soup.find(id="divFullWidthColumn").find_all("a"):
                                                       	if atag.has_key('href'):
                                                               	links.append(scrape.fullurl(link,atag['href']))
@@ -76,11 +76,10 @@
 if __name__ == '__main__':
     print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
     print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
-    #NewScraperImplementation().doScrape()
+    NewScraperImplementation().doScrape()
     print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
     print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
     osi = OldScraperImplementation()
     osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI"
     osi.doScrape()
-# old site too
 

--- /dev/null
+++ b/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.py
@@ -1,1 +1,35 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
 
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getColumnCount(self):
+        return 2
+
+    def getColumns(self, columns):
+        (date, title) = columns
+        return (title, date, title, title, None)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2012.htm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2011.htm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2010.htm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2009.htm"
+    nsi.doScrape()
+

--- a/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage immi