pagination buttons
pagination buttons


Former-commit-id: 0d4b5ebba67b230af49f20496bf8f46498cccfc7

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -196,10 +196,9 @@
         (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
             self.getURL(), "foidocuments", self.getAgencyID())
         if content is not None:
-            if mime_type is "text/html"\
-            or mime_type is "application/xhtml+xml"\
-            or mime_type is"application/xml":
+            if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
             # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                print "parsing"
                 soup = BeautifulSoup(content)
                 table = self.getTable(soup)
                 for row in self.getRows(table):
@@ -217,11 +216,11 @@
                             dochash = scrape.mkhash(
                                 self.remove_control_chars(
                                     url + (''.join(id.stripped_strings))))
-                        doc = foidocsdb.get(hash)
+                        doc = foidocsdb.get(dochash)
 
                         if doc is None:
-                            print "saving " + hash
-                            doc = {'_id': hash,
+                            print "saving " + dochash
+                            doc = {'_id': dochash,
                             'agencyID': self.getAgencyID(),
                             'url': self.getURL(),
                             'docID': (''.join(id.stripped_strings))}

 Binary files /dev/null and b/documents/img/feed-icon-14x14.png differ
--- a/documents/index.php
+++ b/documents/index.php
@@ -5,7 +5,13 @@
 include_once('../include/common.inc.php');
 $startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99');
 ?>
+<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
+<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
 <?php
+if ($startkey != '9999-99-99') {
+    echo "<a class='btn btn-large btn-info' href='?start_key=$startkey'><i class='icon-circle-arrow-left icon-white'></i> previous page</a>";
+}
+echo "<a class='btn btn-large btn-primary' href='?start_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
 
 $agenciesdb = $server->get_db('disclosr-agencies');
 
@@ -25,7 +31,10 @@
 } catch (SetteeRestClientException $e) {
     setteErrorHandler($e);
 }
-echo "<a href='?start_key=$endkey'>next page</a>";
+if ($startkey != '9999-99-99') {
+    echo "<a class='btn btn-large btn-info' href='?start_key=$startkey'><i class='icon-circle-arrow-left icon-white'></i> previous page</a>";
+}
+echo "<a class='btn btn-large btn-primary' href='?start_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
 include_footer_documents();
 ?>
 

--- /dev/null
+++ b/documents/scrapers/00a294de663db69062ca09aede7c0487.py
@@ -1,1 +1,47 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
 
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+    def getDate(self, content, entry, doc):
+        date = ''.join(entry.find('th').stripped_strings).strip()
+        (a, b, c) = date.partition("(")
+        date = self.remove_control_chars(a.replace("Octber", "October"))
+        print date
+        edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+        print edate
+        doc.update({'date': edate})
+        return
+    def getColumnCount(self):
+        return 4
+
+    def getTable(self, soup):
+        return soup.find(summary="List of Defence documents released under Freedom of Information requets")
+
+    def getColumns(self, columns):
+        (id, description, access, notes) = columns
+        return (id, None, description, description, notes)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201213.cfm"
+    nsi.doScrape()
+
+    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201112.cfm"
+    nsi.doScrape()
+
+    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201011.cfm"
+    nsi.doScrape()
+
+

--- a/documents/scrapers/00a294de663db69062ca09aede7c0487.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage
 

--- a/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-ACMA style
 

--- /dev/null
+++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py
@@ -1,1 +1,58 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import scrape
+from bs4 import BeautifulSoup
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getDescription(self,content, entry,doc):
+        link = None
+        links = []
+        description = ""
+        for atag in entry.find_all('a'):
+            if atag.has_key('href'):
+                link = scrape.fullurl(self.getURL(), atag['href'])
+                (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+                if htcontent != None:
+                    if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                        soup = BeautifulSoup(htcontent)
+                        row  = soup.find(id="content_div_148050")
+                        description = ''.join(row.stripped_strings)
+                        for atag in row.find_all("a"):
+                                    if atag.has_key('href'):
+                                        links.append(scrape.fullurl(link, atag['href']))
+
+        if links != []:
+                     doc.update({'links': links})
+        if description != "":
+            doc.update({ 'description': description})
+    def getColumnCount(self):
+        return 4
+
+    def getColumns(self, columns):
+        (id, date, datepub, title) = columns
+        return (id, date, title, title, None)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5"
+    nsi.doScrape()
+

--- a/documents/scrapers/1803322b27286950cab0c543168b5f21.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage log
 

--- /dev/null
+++ b/documents/scrapers/8796220032faf94501bd366763263685.py
@@ -1,1 +1,37 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
 
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getColumnCount(self):
+        return 6
+
+    def getColumns(self, columns):
+        (id, date, title, description, datepub, notes) = columns
+        return (id, date, title, description, notes)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/pmo/2011-12.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2011-12.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2012-13.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omsi/2011-12.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omps/2012-13.cfm"
+    nsi.doScrape()
+

--- a/documents/scrapers/8796220032faf94501bd366763263685.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multiple pages
 

--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -3,7 +3,7 @@
 import genericScrapers
 import scrape
 from bs4 import BeautifulSoup
-import codecs 
+import codecs
 #http://www.doughellmann.com/PyMOTW/abc/
 class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
         def getDescription(self,content, entry,doc):
@@ -20,7 +20,7 @@
                                                 soup = BeautifulSoup(htcontent)
                                                 for text in soup.find(id="divFullWidthColumn").stripped_strings:
                                                     description = description + text.encode('ascii', 'ignore')
-                                                
+
                                                 for atag in soup.find(id="divFullWidthColumn").find_all("a"):
                                                       	if atag.has_key('href'):
                                                               	links.append(scrape.fullurl(link,atag['href']))
@@ -76,11 +76,10 @@
 if __name__ == '__main__':
     print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
     print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
-    #NewScraperImplementation().doScrape()
+    NewScraperImplementation().doScrape()
     print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
     print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
     osi = OldScraperImplementation()
     osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI"
     osi.doScrape()
-# old site too
 

--- /dev/null
+++ b/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.py
@@ -1,1 +1,35 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
 
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getColumnCount(self):
+        return 2
+
+    def getColumns(self, columns):
+        (date, title) = columns
+        return (title, date, title, title, None)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2012.htm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2011.htm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2010.htm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2009.htm"
+    nsi.doScrape()
+

--- a/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage immi
 

--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -163,13 +163,13 @@
     if (isset($row->value->links)) {
         $result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">';
         foreach ($row->value->links as $link) {
-            $result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href=' . $link . ' itemprop="url contentURL">' . urlencode($link) . "</a></li>";
+            $result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href=' . htmlspecialchars ($link) . ' itemprop="url contentURL">' . htmlspecialchars ( $link) . "</a></li>";
         }
 
         $result .= "</ul>";
     }
     $result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>";
-    $result .= "</div>";
+    $result .= "</div>\n";
     return $result;
 }