fix charts
fix charts


Former-commit-id: d6e49522e61927665c8ba633dad5a13344f34841

--- a/documents/about.php
+++ b/documents/about.php
@@ -1,7 +1,7 @@
 <?php
 
 include('template.inc.php');
-include_header_documents("");
+include_header_documents("About");
 include_once('../include/common.inc.php');
 ?>
 <h1>About</h1>

--- a/documents/charts.php
+++ b/documents/charts.php
@@ -1,6 +1,6 @@
 <?php
 include('template.inc.php');
-include_header_documents("");
+include_header_documents("Charts");
 include_once('../include/common.inc.php');
 $agenciesdb = $server->get_db('disclosr-agencies');
 
@@ -15,29 +15,28 @@
     <h1><a href="about.php">Charts</a></h1>
     <h4 class="subheader">Lorem ipsum.</h4>
 </div>
-<div id="employees" style="width:1000px;height:900px;"></div>
+<div id="bydate" style="width:1000px;height:300px;"></div>
+<div id="byagency" style="width:1200px;height:300px;"></div>
 <script id="source">
     window.onload = function() {
         $(document).ready(function() {
   var
     d1    = [],
-    start = new Date("2009/01/01 01:00").getTime(),
-    options,
-    graph,
-    i, x, o;
+    options1,
+     o1;
 
 <?php
     try {
-        $rows = $foidocsdb->get_view("app", "byDate?group=true", null, true)->rows;
+        $rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true")->rows;
 
 
         $dataValues = Array();
         foreach ($rows as $row) {
-            $dataValues[$row->value] = $row->key;
+            $dataValues[$row->key] = $row->value;
         }
         $i = 0;
         ksort($dataValues);
-        foreach ($dataValues as $value => $key) {
+        foreach ($dataValues as $key => $value) {
 $date = date_create_from_format('Y-m-d', $key);
 if (date_format($date, 'U') != "") {
             echo "       d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL;
@@ -52,7 +51,7 @@
 
 
         
-  options = {
+  options1 = {
     xaxis : {
       mode : 'time', 
       labelsAngle : 45
@@ -68,19 +67,19 @@
   function drawGraph (opts) {
 
     // Clone the options, so the 'options' variable always keeps intact.
-    o = Flotr._.extend(Flotr._.clone(options), opts || {});
+    o1 = Flotr._.extend(Flotr._.clone(options1), opts || {});
 
     // Return a new graph.
     return Flotr.draw(
-      document.getElementById("employees"),
+      document.getElementById("bydate"),
       [ d1 ],
-      o
+      o1
     );
   }
 
   graph = drawGraph();      
         
-  Flotr.EventAdapter.observe(container, 'flotr:select', function(area){
+  Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:select', function(area){
     // Draw selected area
     graph = drawGraph({
       xaxis : { min : area.x1, max : area.x2, mode : 'time', labelsAngle : 45 },
@@ -89,10 +88,74 @@
   });
         
   // When graph is clicked, draw the graph with default area.
-  Flotr.EventAdapter.observe(container, 'flotr:click', function () { graph = drawGraph(); });
+  Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:click', function () { graph = drawGraph(); });
 
         });
 }; 
+
+var d2 = [];
+var agencylabels = [];
+function agencytrackformatter(obj) {
+                   
+                        return agencylabels[Math.floor(obj.x)] +" = "+obj.y;
+                     
+                }
+                function agencytickformatter(val, axis) {
+                    if (agencylabels[Math.floor(val)]) {
+                        return '<p style="margin-top:8em;-webkit-transform:rotate(-90deg);">'+(agencylabels[Math.floor(val)])+"</b>";
+                     
+                    } else {
+                        return "";
+                    }
+                }
+<?php
+    try {
+        $rows = $foidocsdb->get_view("app", "byAgencyID?group=true")->rows;
+
+
+        $dataValues = Array();
+        $i = 0;
+        foreach ($rows as $row) {
+            echo "       d2.push([".$i.", $row->value]);" . PHP_EOL;
+            echo "       agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL;
+            
+            $i++;
+        }
+    } catch (SetteeRestClientException $e) {
+        setteErrorHandler($e);
+    }
+    ?>
+  // Draw the graph
+  Flotr.draw(
+   document.getElementById("byagency"),
+    [d2],
+    {
+      bars : {
+        show : true,
+        horizontal : false,
+        shadowSize : 0,
+        barWidth : 0.5
+      },
+mouse : {
+                        track : true,
+                        relative : true,
+                    trackFormatter: agencytrackformatter
+                    },
+      yaxis : {
+        min : 0,
+        autoscaleMargin : 1
+      },
+      xaxis: {
+                    minorTickFreq: 1,
+                    noTicks: agencylabels.length,
+                    showMinorLabels: true,
+                        tickFormatter: agencytickformatter
+                    },
+                    legend: {
+                        show: false
+                    }
+    }
+  );
 </script>
 
 <?php

--- a/documents/disclogsList.php
+++ b/documents/disclogsList.php
@@ -1,7 +1,7 @@
 <?php
 
 include('template.inc.php');
-include_header_documents("");
+include_header_documents("List of Disclosure Logs");
 include_once('../include/common.inc.php');
 
 echo "<table>

--- /dev/null
+++ b/documents/disclosr-documents.nja
@@ -1,1 +1,7 @@
-
+{
+  "venv": "", 
+  "project-type": "Import from sources", 
+  "name": "disclosr-documents", 
+  "license": "GNU General Public License v3", 
+  "description": ""
+}

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -15,10 +15,6 @@
 
 from StringIO import StringIO
 
-from docx import *
-from lxml import etree
-import zipfile
-
 from pdfminer.pdfparser import PDFDocument, PDFParser
 from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
 from pdfminer.pdfdevice import PDFDevice, TagExtractor
@@ -39,14 +35,14 @@
         """ disclosr agency id """
         if self.agencyID is None:
             self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
-            return self.agencyID
+        return self.agencyID
 
     def getURL(self):
         """ disclog URL"""
         if self.disclogURL is None:
             agency = scrape.agencydb.get(self.getAgencyID())
             self.disclogURL = agency['FOIDocumentsURL']
-            return self.disclogURL
+        return self.disclogURL
 
     @abc.abstractmethod
     def doScrape(self):
@@ -62,14 +58,15 @@
              self.getURL(), "foidocuments", self.getAgencyID())
         laparams = LAParams()
         rsrcmgr = PDFResourceManager(caching=True)
-        outfp = StringIO.StringIO()
+        outfp = StringIO()
         device = TextConverter(rsrcmgr, outfp, codec='utf-8',
              laparams=laparams)
-        fp = StringIO.StringIO()
-        fp.write(content)
-        description = output.getvalue()
+        fp = StringIO()
+        fp.write(content.read())
+
         process_pdf(rsrcmgr, device, fp, set(), caching=True,
              check_extractable=True)
+        description = outfp.getvalue()
         fp.close()
         device.close()
         outfp.close()
@@ -77,11 +74,10 @@
         doc = foidocsdb.get(dochash)
         if doc is None:
             print "saving " + dochash
-            edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
+            edate = date.today().strftime("%Y-%m-%d")
             doc = {'_id': dochash, 'agencyID': self.getAgencyID()
             , 'url': self.getURL(), 'docID': dochash,
-            "date": edate, "title": "Disclosure Log Updated"}
-            self.getDescription(entry, entry, doc)
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
             foidocsdb.save(doc)
         else:
             print "already saved"
@@ -103,17 +99,16 @@
         for paratext in paratextlist:
             newparatextlist.append(paratext.encode("utf-8"))
         ## Print our documnts test with two newlines under each paragraph
-        description = '\n\n'.join(newparatextlist)
+        description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
         dochash = scrape.mkhash(description)
         doc = foidocsdb.get(dochash)
 
         if doc is None:
             print "saving " + dochash
-            edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
+            edate = time().strftime("%Y-%m-%d")
             doc = {'_id': dochash, 'agencyID': self.getAgencyID()
             , 'url': self.getURL(), 'docID': dochash,
-            "date": edate, "title": "Disclosure Log Updated"}
-            self.getDescription(entry, entry, doc)
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
             foidocsdb.save(doc)
         else:
             print "already saved"
@@ -201,10 +196,9 @@
         (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
             self.getURL(), "foidocuments", self.getAgencyID())
         if content is not None:
-            if mime_type is "text/html"\
-            or mime_type is "application/xhtml+xml"\
-            or mime_type is"application/xml":
+            if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
             # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                print "parsing"
                 soup = BeautifulSoup(content)
                 table = self.getTable(soup)
                 for row in self.getRows(table):
@@ -222,11 +216,11 @@
                             dochash = scrape.mkhash(
                                 self.remove_control_chars(
                                     url + (''.join(id.stripped_strings))))
-                        doc = foidocsdb.get(hash)
+                        doc = foidocsdb.get(dochash)
 
                         if doc is None:
-                            print "saving " + hash
-                            doc = {'_id': hash,
+                            print "saving " + dochash
+                            doc = {'_id': dochash,
                             'agencyID': self.getAgencyID(),
                             'url': self.getURL(),
                             'docID': (''.join(id.stripped_strings))}

 Binary files /dev/null and b/documents/img/feed-icon-14x14.png differ
--- a/documents/index.php
+++ b/documents/index.php
@@ -3,10 +3,11 @@
 include('template.inc.php');
 include_header_documents("");
 include_once('../include/common.inc.php');
-$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99');
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
 ?>
+<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
+<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
 <?php
-
 $agenciesdb = $server->get_db('disclosr-agencies');
 
 $idtoname = Array();
@@ -15,17 +16,18 @@
 }
 $foidocsdb = $server->get_db('disclosr-foidocuments');
 try {
-    $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows;
+    $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
     if ($rows) {
         foreach ($rows as $key => $row) {
             echo displayLogEntry($row, $idtoname);
+		if (!isset($startkey)) $startkey =  $row->key;
             $endkey = $row->key;
         }
     }
 } catch (SetteeRestClientException $e) {
     setteErrorHandler($e);
 }
-echo "<a href='?start_key=$endkey'>next page</a>";
+echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
 include_footer_documents();
 ?>
 

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -8,186 +8,188 @@
 import time
 import os
 import mimetypes
-import re
 import urllib
 import urlparse
 
 def mkhash(input):
-	return hashlib.md5(input).hexdigest().encode("utf-8")
+    return hashlib.md5(input).hexdigest().encode("utf-8")
 
 def canonurl(url):
-	r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
-	if the URL looks invalid.
-	>>> canonurl('\xe2\x9e\xa1.ws')  # tinyarro.ws
-	'http://xn--hgi.ws/'
-	"""
-	# strip spaces at the ends and ensure it's prefixed with 'scheme://'
-	url = url.strip()
-	if not url:
-		return ''
-	if not urlparse.urlsplit(url).scheme:
-		url = 'http://' + url
-
-	# turn it into Unicode
-	#try:
-	#    url = unicode(url, 'utf-8')
-	#except UnicodeDecodeError:
-	#    return ''  # bad UTF-8 chars in URL
-
-	# parse the URL into its components
-	parsed = urlparse.urlsplit(url)
-	scheme, netloc, path, query, fragment = parsed
-
-	# ensure scheme is a letter followed by letters, digits, and '+-.' chars
-	if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
-		return ''
-	scheme = str(scheme)
-
-	# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
-	match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
-	if not match:
-		return ''
-	domain, port = match.groups()
-	netloc = domain + (port if port else '')
-	netloc = netloc.encode('idna')
-
-	# ensure path is valid and convert Unicode chars to %-encoded
-	if not path:
-		path = '/'  # eg: 'http://google.com' -> 'http://google.com/'
-	path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
-
-	# ensure query is valid
-	query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
-
-	# ensure fragment is valid
-	fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
-
-	# piece it all back together, truncating it to a maximum of 4KB
-	url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
-	return url[:4096]
+    r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
+    if the URL looks invalid.
+    >>> canonurl('\xe2\x9e\xa1.ws')  # tinyarro.ws
+    'http://xn--hgi.ws/'
+    """
+    # strip spaces at the ends and ensure it's prefixed with 'scheme://'
+    url = url.strip()
+    if not url:
+        return ''
+    if not urlparse.urlsplit(url).scheme:
+        url = 'http://' + url
+
+    # turn it into Unicode
+    #try:
+    #    url = unicode(url, 'utf-8')
+    #except UnicodeDecodeError:
+    #    return ''  # bad UTF-8 chars in URL
+
+    # parse the URL into its components
+    parsed = urlparse.urlsplit(url)
+    scheme, netloc, path, query, fragment = parsed
+
+    # ensure scheme is a letter followed by letters, digits, and '+-.' chars
+    if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
+        return ''
+    scheme = str(scheme)
+
+    # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
+    match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
+    if not match:
+        return ''
+    domain, port = match.groups()
+    netloc = domain + (port if port else '')
+    netloc = netloc.encode('idna')
+
+    # ensure path is valid and convert Unicode chars to %-encoded
+    if not path:
+        path = '/'  # eg: 'http://google.com' -> 'http://google.com/'
+    path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
+
+    # ensure query is valid
+    query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
+
+    # ensure fragment is valid
+    fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
+
+    # piece it all back together, truncating it to a maximum of 4KB
+    url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
+    return url[:4096]
 
 def fullurl(url,href):
-	href = href.replace(" ","%20")
-	href = re.sub('#.*$','',href)
-	return urljoin(url,href)
+    href = href.replace(" ","%20")
+    href = re.sub('#.*$','',href)
+    return urljoin(url,href)
 
 #http://diveintopython.org/http_web_services/etags.html
-class NotModifiedHandler(urllib2.BaseHandler):  
-	def http_error_304(self, req, fp, code, message, headers):
-		addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
-		addinfourl.code = code
-		return addinfourl
+class NotModifiedHandler(urllib2.BaseHandler):
+    def http_error_304(self, req, fp, code, message, headers):
+        addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
+        addinfourl.code = code
+        return addinfourl
 
 def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
-	url = canonurl(url)
-	hash = mkhash(url)
-	req = urllib2.Request(url)
-	print "Fetching %s (%s)" % (url,hash)
-	if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-		print "Not a valid HTTP url"
-		return (None,None,None)
-	doc = docsdb.get(hash) 
-	if doc == None:
-		doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
-	else:
-		if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
-			print "Uh oh, trying to scrape URL again too soon!"
-			last_attachment_fname = doc["_attachments"].keys()[-1]
-			last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
-			content = last_attachment
-			return (doc['url'],doc['mime_type'],content)
-		if scrape_again == False:
-			print "Not scraping this URL again as requested"
-			return (None,None,None)
-
-	time.sleep(3) # wait 3 seconds to give webserver time to recover
-	
-	req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
-	#if there is a previous version stored in couchdb, load caching helper tags
-	if doc.has_key('etag'):
-		req.add_header("If-None-Match", doc['etag'])
-	if doc.has_key('last_modified'):
-		req.add_header("If-Modified-Since", doc['last_modified'])
-	 
-	opener = urllib2.build_opener(NotModifiedHandler())
-	try:
-		url_handle = opener.open(req)
-		doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
-		headers = url_handle.info() # the addinfourls have the .info() too
-		doc['etag'] = headers.getheader("ETag")
-		doc['last_modified'] = headers.getheader("Last-Modified") 
-		doc['date'] = headers.getheader("Date") 
-		doc['page_scraped'] = time.time() 
-		doc['web_server'] = headers.getheader("Server") 
-		doc['via'] = headers.getheader("Via") 
-		doc['powered_by'] = headers.getheader("X-Powered-By") 
-		doc['file_size'] = headers.getheader("Content-Length") 
-		content_type = headers.getheader("Content-Type")
-		if content_type != None:
-			 doc['mime_type'] = content_type.split(";")[0]
-		else:
-			 (type,encoding) = mimetypes.guess_type(url)
-			 doc['mime_type'] = type
-		if hasattr(url_handle, 'code'):
-			if url_handle.code == 304:
-				print "the web page has not been modified"
-				return (None,None,None)
-			else: 
-				content = url_handle.read()
-				docsdb.save(doc)
-				doc = docsdb.get(hash) # need to get a _rev
-				docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) 
-				return (doc['url'], doc['mime_type'], content)
-				#store as attachment epoch-filename
-				
-	except urllib2.URLError as e:
-			error = ""
-			if hasattr(e, 'reason'):
-				error = "error %s in downloading %s" % (str(e.reason), url)
-			elif hasattr(e, 'code'):
-				error = "error %s in downloading %s" % (e.code, url)
-			print error
-			doc['error'] = error
-			docsdb.save(doc)
-			return (None,None,None)
+    url = canonurl(url)
+    hash = mkhash(url)
+    req = urllib2.Request(url)
+    print "Fetching %s (%s)" % (url,hash)
+    if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
+        print "Not a valid HTTP url"
+        return (None,None,None)
+    doc = docsdb.get(hash)
+    if doc == None:
+        doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
+    else:
+        if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
+            print "Uh oh, trying to scrape URL again too soon!"+hash
+            last_attachment_fname = doc["_attachments"].keys()[-1]
+            last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+            content = last_attachment
+            return (doc['url'],doc['mime_type'],content)
+        if scrape_again == False:
+            print "Not scraping this URL again as requested"
+            return (None,None,None)
+
+    req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
+    #if there is a previous version stored in couchdb, load caching helper tags
+    if doc.has_key('etag'):
+        req.add_header("If-None-Match", doc['etag'])
+    if doc.has_key('last_modified'):
+        req.add_header("If-Modified-Since", doc['last_modified'])
+
+    opener = urllib2.build_opener(NotModifiedHandler())
+    try:
+        url_handle = opener.open(req)
+        doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
+        headers = url_handle.info() # the addinfourls have the .info() too
+        doc['etag'] = headers.getheader("ETag")
+        doc['last_modified'] = headers.getheader("Last-Modified")
+        doc['date'] = headers.getheader("Date")
+        doc['page_scraped'] = time.time()
+        doc['web_server'] = headers.getheader("Server")
+        doc['via'] = headers.getheader("Via")
+        doc['powered_by'] = headers.getheader("X-Powered-By")
+        doc['file_size'] = headers.getheader("Content-Length")
+        content_type = headers.getheader("Content-Type")
+        if content_type != None:
+             doc['mime_type'] = content_type.split(";")[0]
+        else:
+             (type,encoding) = mimetypes.guess_type(url)
+             doc['mime_type'] = type
+        if hasattr(url_handle, 'code'):
+            if url_handle.code == 304:
+                print "the web page has not been modified"+hash
+                last_attachment_fname = doc["_attachments"].keys()[-1]
+                last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+                content = last_attachment
+                return (doc['url'],doc['mime_type'],content)
+            else:
+                print "new webpage loaded"
+                content = url_handle.read()
+                docsdb.save(doc)
+                doc = docsdb.get(hash) # need to get a _rev
+                docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
+                return (doc['url'], doc['mime_type'], content)
+                #store as attachment epoch-filename
+
+    except urllib2.URLError as e:
+            print "error!"
+            error = ""
+            if hasattr(e, 'reason'):
+                error = "error %s in downloading %s" % (str(e.reason), url)
+            elif hasattr(e, 'code'):
+                error = "error %s in downloading %s" % (e.code, url)
+            print error
+            doc['error'] = error
+            docsdb.save(doc)
+            return (None,None,None)
 
 
 
 def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
-	(url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
-	badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
-	if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
-		if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
-				# http://www.crummy.com/software/BeautifulSoup/documentation.html
-				soup = BeautifulSoup(content)
-				navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
-				for nav in navIDs:
-					print "Removing element", nav['id']
-					nav.extract()
-					navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
-					for nav in navClasses:
-						print "Removing element", nav['class']
-						nav.extract()
-					links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
-					linkurls = set([])
-					for link in links:
-						if link.has_key("href"):
-							if link['href'].startswith("http"):
-								# lets not do external links for now
-								# linkurls.add(link['href'])
-								None
-							if link['href'].startswith("mailto"):
-								# not http
-								None
-							if link['href'].startswith("javascript"):
-								# not http
-								None
-							else:
-								# remove anchors and spaces in urls
-								linkurls.add(fullurl(url,link['href']))
-					for linkurl in linkurls:
-							   #print linkurl
-							   scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)    
+    (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
+    badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
+    if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
+        if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                soup = BeautifulSoup(content)
+                navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
+                for nav in navIDs:
+                    print "Removing element", nav['id']
+                    nav.extract()
+                    navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
+                    for nav in navClasses:
+                        print "Removing element", nav['class']
+                        nav.extract()
+                    links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
+                    linkurls = set([])
+                    for link in links:
+                        if link.has_key("href"):
+                            if link['href'].startswith("http"):
+                                # lets not do external links for now
+                                # linkurls.add(link['href'])
+                                None
+                            if link['href'].startswith("mailto"):
+                                # not http
+                                None
+                            if link['href'].startswith("javascript"):
+                                # not http
+                                None
+                            else:
+                                # remove anchors and spaces in urls
+                                linkurls.add(fullurl(url,link['href']))
+                    for linkurl in linkurls:
+                               #print linkurl
+                               scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
 
 #couch = couchdb.Server('http://192.168.1.148:5984/')
 couch = couchdb.Server('http://127.0.0.1:5984/')
@@ -196,20 +198,20 @@
 docsdb = couch['disclosr-documents']
 
 if __name__ == "__main__":
-	for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
-		agency = agencydb.get(row.id)
-		print agency['name']
-		for key in agency.keys():
-			if key == "FOIDocumentsURL" and "status" not in agency.keys:
-				scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
-			if key == 'website' and False:
-				scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
-                                agency['metadata']['lastScraped'] = time.time()
-			if key.endswith('URL') and False:
-				print key 
-				depth = 1
-				if 'scrapeDepth' in agency.keys():
-					depth = agency['scrapeDepth']
-				scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
-		agencydb.save(agency)
-
+    for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
+        agency = agencydb.get(row.id)
+        print agency['name']
+        for key in agency.keys():
+            if key == "FOIDocumentsURL" and "status" not in agency.keys:
+                scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
+            if key == 'website' and False:
+                scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
+                agency['metadata']['lastScraped'] = time.time()
+            if key.endswith('URL') and False:
+                print key
+                depth = 1
+                if 'scrapeDepth' in agency.keys():
+                    depth = agency['scrapeDepth']
+                scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
+        agencydb.save(agency)
+

--- /dev/null
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericOAICDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-pdf
 

--- /dev/null
+++ b/documents/scrapers/00a294de663db69062ca09aede7c0487.py
@@ -1,1 +1,47 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
 
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+    def getDate(self, content, entry, doc):
+        date = ''.join(entry.find('th').stripped_strings).strip()
+        (a, b, c) = date.partition("(")
+        date = self.remove_control_chars(a.replace("Octber", "October"))
+        print date
+        edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+        print edate
+        doc.update({'date': edate})
+        return
+    def getColumnCount(self):
+        return 4
+
+    def getTable(self, soup):
+        return soup.find(summary="List of Defence documents released under Freedom of Information requets")
+
+    def getColumns(self, columns):
+        (id, description, access, notes) = columns
+        return (id, None, description, description, notes)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201213.cfm"
+    nsi.doScrape()
+
+    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201112.cfm"
+    nsi.doScrape()
+
+    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201011.cfm"
+    nsi.doScrape()
+
+

--- a/documents/scrapers/00a294de663db69062ca09aede7c0487.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage
 

--- a/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-ACMA style
 

--- /dev/null
+++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py
@@ -1,1 +1,58 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import scrape
+from bs4 import BeautifulSoup
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getDescription(self,content, entry,doc):
+        link = None
+        links = []
+        description = ""
+        for atag in entry.find_all('a'):
+            if atag.has_key('href'):
+                link = scrape.fullurl(self.getURL(), atag['href'])
+                (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+                if htcontent != None:
+                    if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                        soup = BeautifulSoup(htcontent)
+                        row  = soup.find(id="content_div_148050")
+                        description = ''.join(row.stripped_strings)
+                        for atag in row.find_all("a"):
+                                    if atag.has_key('href'):
+                                        links.append(scrape.fullurl(link, atag['href']))
+
+        if links != []:
+                     doc.update({'links': links})
+        if description != "":
+            doc.update({ 'description': description})
+    def getColumnCount(self):
+        return 4
+
+    def getColumns(self, columns):
+        (id, date, datepub, title) = columns
+        return (id, date, title, title, None)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5"
+    nsi.doScrape()
+

--- a/documents/scrapers/1803322b27286950cab0c543168b5f21.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage log
 

--- /dev/null
+++ b/documents/scrapers/6afdde1d4ff1ad8d8cfe1a8675ea83bd.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericOAICDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/6afdde1d4ff1ad8d8cfe1a8675ea83bd.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-PDF
 

--- /dev/null
+++ b/documents/scrapers/8317df630946937864d31a4728ad8ee8.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericOAICDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/8317df630946937864d31a4728ad8ee8.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-pdf
 

--- /dev/null
+++ b/documents/scrapers/8796220032faf94501bd366763263685.py
@@ -1,1 +1,37 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
 
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getColumnCount(self):
+        return 6
+
+    def getColumns(self, columns):
+        (id, date, title, description, datepub, notes) = columns
+        return (id, date, title, description, notes)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/pmo/2011-12.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2011-12.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2012-13.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omsi/2011-12.cfm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omps/2012-13.cfm"
+    nsi.doScrape()
+

--- a/documents/scrapers/8796220032faf94501bd366763263685.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multiple pages
 

--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -3,7 +3,7 @@
 import genericScrapers
 import scrape
 from bs4 import BeautifulSoup
-import codecs 
+import codecs
 #http://www.doughellmann.com/PyMOTW/abc/
 class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
         def getDescription(self,content, entry,doc):
@@ -20,7 +20,7 @@
                                                 soup = BeautifulSoup(htcontent)
                                                 for text in soup.find(id="divFullWidthColumn").stripped_strings:
                                                     description = description + text.encode('ascii', 'ignore')
-                                                
+
                                                 for atag in soup.find(id="divFullWidthColumn").find_all("a"):
                                                       	if atag.has_key('href'):
                                                               	links.append(scrape.fullurl(link,atag['href']))
@@ -76,11 +76,10 @@
 if __name__ == '__main__':
     print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
     print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
-    #NewScraperImplementation().doScrape()
+    NewScraperImplementation().doScrape()
     print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
     print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
     osi = OldScraperImplementation()
     osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI"
     osi.doScrape()
-# old site too
 

--- /dev/null
+++ b/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.py
@@ -1,1 +1,35 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
 
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getColumnCount(self):
+        return 2
+
+    def getColumns(self, columns):
+        (date, title) = columns
+        return (title, date, title, title, None)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2012.htm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2011.htm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2010.htm"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2009.htm"
+    nsi.doScrape()
+

--- a/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage immi
 

--- /dev/null
+++ b/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericOAICDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt
+++ /dev/null
@@ -1,3 +1,1 @@
-# pdf
-http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf
 

--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -68,7 +68,8 @@
 
                             </p>
                             <ul class="nav">
-                                <li><a href="index.php">Home</a></li>
+                                <li><a href="agency.php">By Agency</a></li>
+                                <li><a href="date.php">By Date</a></li>
                                 <li><a href="disclogsList.php">List of Disclosure Logs</a></li>
                                 <li><a href="about.php">About</a></li>
 
@@ -89,7 +90,9 @@
             <footer>
                 <p>Not affiliated with or endorsed by any government agency.</p>
             </footer>
-            <script type="text/javascript">
+              <?php
+            if ($ENV != "DEV") {
+                echo "<script type='text/javascript'>
 
                 var _gaq = _gaq || [];
                 _gaq.push(['_setAccount', 'UA-12341040-4']);
@@ -106,7 +109,9 @@
                     s.parentNode.insertBefore(ga, s);
                 })();
 
-            </script>
+            </script>";
+            }
+            ?>
             <!-- Le javascript
             ================================================== -->
             <!-- Placed at the end of the document so the pages load faster -->
@@ -150,10 +155,10 @@
     $result = "";
     $result .= '<div itemscope itemtype="http://schema.org/Article">';
     $result .= '<h2> <span itemprop="datePublished">' . $row->value->date . "</span>: <span itemprop='name headline'>" . truncate($row->value->title, 120) . "</span>";
-    $result .= '(<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</h2>';
-    $result .= "<p itemprop='description articleBody text'> Title" . $row->value->title . "<br/>";
+    $result .= ' (<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</h2>';
+    $result .= "<p itemprop='description articleBody text'> Title: " . $row->value->title . "<br/>";
     if (isset($row->value->description)) {
-        $result .= str_replace("\n", "<br>", $row->value->description);
+        $result .= str_replace("\n", "<br>", preg_replace("/(^[\r\n]*|[\r\n]+)[\s\t]*[\r\n]+/", "",trim($row->value->description)));
     }
     if (isset($row->value->notes)) {
         $result .= " <br>Note: " . $row->value->notes;
@@ -163,13 +168,13 @@
     if (isset($row->value->links)) {
         $result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">';
         foreach ($row->value->links as $link) {
-            $result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href=' . $link . ' itemprop="url contentURL">' . urlencode($link) . "</a></li>";
+            $result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href=' . htmlspecialchars ($link) . ' itemprop="url contentURL">' . htmlspecialchars ( $link) . "</a></li>";
         }
 
         $result .= "</ul>";
     }
     $result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>";
-    $result .= "</div>";
+    $result .= "</div>\n";
     return $result;
 }
 

--- a/documents/view.php
+++ b/documents/view.php
@@ -1,7 +1,6 @@
 <?php
 include('template.inc.php');
-include_header_documents("");
-include_once('../include/common.inc.php');
+
 ?>
 <?php
 
@@ -17,6 +16,8 @@
 try {
   $obj = new stdClass();
     $obj->value = $foidocsdb->get($_REQUEST['id']);
+    include_header_documents($obj->value->title);
+include_once('../include/common.inc.php');
 echo displayLogEntry($obj,$idtoname);
 
 } catch (SetteeRestClientException $e) {