Experimental organization support
Experimental organization support


Former-commit-id: 0c86e9a2a6e066dc3f1550e7915892d9649ec125

--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -2,14 +2,22 @@
 import couchdb
 from ckanclient import CkanApiError
 import re
+import html2text # aaronsw :(
+import ckanapi # https://github.com/open-data/ckanapi
+
 
 class LoaderError(Exception):
     pass
 
 # Instantiate the CKAN client.
-ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',
-    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-# (use your own api_key from http://thedatahub.org/user/me )
+#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a'
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+    api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
 # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
 SYMBOLS = {
     'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
@@ -83,54 +91,114 @@
     name = re.sub('__', '_', name).lower()
     return name
 
-
+#todo "{'name': [u'Url must be purely lowercase alphanumeric (ascii) characters and these symbols: -_']}"
+# http://data.gov.au/dataset/australian-domestic-regional-and-international-airline-activity-%E2%80%93-time-series/
 def name_munge(input_name):
-    return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+    return  munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+    #[:100]
     #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
 
-couch = couchdb.Server('http://127.0.0.1:5984/')
+
+def get_licence_id(licencename):
+    map = {
+        "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
+        "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
+        'Otherpleasespecify': 'notspecified',
+        '': 'notspecified',
+        "Publicly available data": 'notspecified',
+        "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
+        "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
+        'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
+        "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
+        'CreativeCommonsAttributionCCBY25': 'cc-by',
+        "PublicDomain": 'other-pd',
+    }
+    if licencename not in map.keys():
+        raise Exception(licencename + " not found");
+    return map[licencename];
+
 docsdb = couch['disclosr-documents']
 
 if __name__ == "__main__":
+    orgs_list = []
     for doc in docsdb.view('app/datasets'):
         print doc.id
-        if doc.value['url'] != "http://data.gov.au/data/":
+
+
+        if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+
+
+
             # Collect the package metadata.
-            pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
-            tags = doc.value['metadata']["Keywords / Tags"]
-            if not hasattr(tags, '__iter__'):
-                tags = [tags]
-            [re.sub('[^a-zA-Z0-9-_]', '', tag).lower() for tag in tags]
-            package_entity = {
-                'name': pkg_name,
-                'title': doc.value['metadata']['DCTERMS.Title'],
-                'url': doc.value['metadata']['DCTERMS.Source.URI'],
-                'tags': tags,
-                'author': doc.value['metadata']["DCTERMS.Creator"],
-                'maintainer': doc.value['metadata']["DCTERMS.Creator"],
-                'licence_id': doc.value['metadata']['DCTERMS.License'], #todo licence id mapping
-                'notes': doc.value['metadata']['Description'],
-            }
+            pkg_name = filter( lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','')[:100]);
+            print pkg_name
+            #add to or create organization using direct API
+            org_name = name_munge(doc.value['metadata']["Agency"][:100])
+            if org_name not in orgs_list:
+                orgs_list = ckandirect.action.organization_list()['result']
+                #print orgs_list
+                if org_name not in orgs_list:
+                    try:
+                        print "org not found, creating "+org_name
+                        ckandirect.action.organization_create(name = org_name,   title= doc.value['metadata']["Agency"],
+                                                              description= doc.value['metadata']["Agency"])
+                        orgs_list.append(org_name)
+                    except ckanapi.ValidationError, e:
+                        print e
+                        raise LoaderError('Unexpected status')
+                else:
+                    print "org found, adding dataset to "+org_name
+
+            org = ckandirect.action.organization_show(id=org_name)
+            # todo cache org names -> id mapping
+            tags = []
+            if doc.value['agencyID'] == "AGIMO":
+                if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+                    if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+                        tags = tags + doc.value['metadata']["Keywords / Tags"]
+                    else:
+                        tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+                if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
+                    if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
+                        tags = tags + doc.value['metadata']['data.gov.au Category']
+                    else:
+                        tags = tags + [doc.value['metadata']['data.gov.au Category']]
+                tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
+                #print tags
+                package_entity = {
+                    'name': pkg_name,
+                    'title': doc.value['metadata']['DCTERMS.Title'],
+                    'url': doc.value['metadata']['DCTERMS.Source.URI'],
+                    'tags': tags, #tags are mandatory?
+                    'author': doc.value['metadata']["DCTERMS.Creator"],
+                    'maintainer': doc.value['metadata']["DCTERMS.Creator"],
+                    'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+                    'notes': html2text.html2text(doc.value['metadata']['Description']),
+                    'owner_org': org["result"]["id"]
+                    #todo add missing key values like jurasdiction
+                }
+            if doc.value['agencyID'] == "qld":
+                package_entity = doc.value['metadata']
+
             try:
-                #print doc.id
+                #print package_entity
                 ckan.package_register_post(package_entity)
             except CkanApiError, e:
-                if ckan.last_status == 409:
-                    print "already exists"
+                if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
+                    print "package already exists"
                 else:
+                    print ckan.last_message
                     raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
                         ckan.last_status, pkg_name, e.args))
-
-            print package_entity
-            #todo add to organisation (author/creator/maintainer)
-            #if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group
+            pkg = ckan.package_entity_get(pkg_name)
+
             if 'Download' in doc.value['metadata'].keys():
                 try:
-                    pkg = ckan.package_entity_get(pkg_name)
+
                     resources = pkg.get('resources', [])
                     if len(resources) < len(doc.value['metadata']['Download']):
                         for resource in doc.value['metadata']['Download']:
-                            #print resource
+
                             # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
                             # (KML/KMZ) / (Shapefile) /(Other)
                             format = "plain"
@@ -141,6 +209,7 @@
                             name = resource['href']
                             if 'name' in resource.keys():
                                 name = resource['name']
+                            print resource
                             ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
                                 format=format, size=human2bytes(resource['size'].replace(',', '')))
                     else:

--- /dev/null
+++ b/documents/datagov-merge.php
@@ -1,1 +1,26 @@
+<?php
 
+include_once("../include/common.inc.php");
+
+
+setlocale(LC_CTYPE, 'C');
+
+$db = $server->get_db('disclosr-documents');
+$datasets = Array();
+try {
+    $rows = $db->get_view("app", "datasets", null, true)->rows;
+
+    foreach ($rows as $row) {
+        //print_r($row);
+        if ($row->value->url != "http://data.gov.au/data/")
+        $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
+    }
+} catch (SetteeRestClientException $e) {
+    setteErrorHandler($e);
+}
+ksort($datasets);
+foreach ($datasets as $datasetname => $datasetkey) {
+    print "$datasetname => $datasetkey<br>\n";
+}
+?>
+

--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -13,7 +13,7 @@
     if atag.has_key('href'):
         url = scrape.fullurl(listurl, atag['href'])
         (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
-            url, "data", "AGIMO")
+            url, "data", "AGIMO", False)
         hash = scrape.mkhash(scrape.canonurl(url))
         doc = scrape.docsdb.get(hash)
         if "metadata" not in doc.keys() or True:

--- /dev/null
+++ b/documents/dataqld.py
@@ -1,1 +1,28 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
 
+from unidecode import unidecode
+import ckanclient
+
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
+
+# Get the package list.
+package_list = ckan.package_register_get()
+for package_name in package_list:
+# Get the details of a package.
+    (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+        "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
+    hash = scrape.mkhash(scrape.canonurl(url))
+    print hash
+    doc = scrape.docsdb.get(hash)
+    if "metadata" not in doc.keys() or True:
+        ckan.package_entity_get(package_name)
+        package_entity = ckan.last_message
+        doc['type'] = "dataset"
+        doc['metadata'] = package_entity
+        print package_entity
+        scrape.docsdb.save(doc)
+

--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -5,20 +5,53 @@
 
 from unidecode import unidecode
 
-listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960"
-(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
-    listurl, "gazette", "AGD")
-soup = BeautifulSoup(listhtml)
-for row in soup.find_all('tr'):
-    if row.has_key('valign'):
-	for col in tr.find_all('td'):
-		print col.string
-        #url = scrape.fullurl(listurl, atag['href'])
-        #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
-        #    url, "data", "AGIMO")
-        #hash = scrape.mkhash(scrape.canonurl(url))
-        #doc = scrape.docsdb.get(hash)
-        #print doc['metadata']
-        #scrape.docsdb.save(doc)
-        #time.sleep(2)
+items = 3950
+items = 1
+while True:
+    print str(items) + " (" +str(items/25) +" screens to go)"
+    listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
+    (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+        listurl, "gazette", "AGD", False)
+    for line in listhtml.split('\n'):
+        soup = BeautifulSoup(line)
+        #print line
+        for row in soup.find_all('tr'):
+            print line
+            if row.has_key('valign'):
+                i = 0
+                date = ""
+                id = ""
+                type = ""
+                description = ""
+                name = ""
+                url = ""
+                for col in soup.find_all('td'):
+                    #print ''.join(col.stripped_strings)
+                    if i == 0:
+                        date = ''.join(col.stripped_strings)
+                    if i == 1:
+                        id = ''.join(col.stripped_strings)
+                    if i == 2:
+                        type = ''.join(col.stripped_strings)
+                    if i == 3:
+                        description = ''.join(col.stripped_strings)
+                        for link in col.findAll('a'):
+                            if link.has_key("href"):
+                                url = link['href']
+                                name = ''.join(link.stripped_strings)
+                                print str(items) + " (" +str(items/25) +" screens to go)"
+                                print [date, id, type, description, name, url]
+                                itemurl = scrape.fullurl(listurl, url)
+                                (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+                                    itemurl, "gazette", "AGD", False)
+                                hash = scrape.mkhash(scrape.canonurl(itemurl))
+                                doc = scrape.docsdb.get(hash)
+                                doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
+                                scrape.docsdb.save(doc)
+                                #time.sleep(2)
+                    i = i + 1;
 
+    items = items - 25
+    if items <= 0:
+        break
+

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -72,7 +72,8 @@
             edate = date.today().strftime("%Y-%m-%d")
             doc = {'_id': dochash, 'agencyID': self.getAgencyID()
             , 'url': self.getURL(), 'docID': dochash,
-            "date": edate, "title": "Disclosure Log Updated", "description":  self.remove_control_chars(description), "diff": diff}
+            "date": edate, "title": "Disclosure Log Updated", 
+	    "description":  self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
             foidocsdb.save(doc)
         else:
             print "already saved"
@@ -199,11 +200,16 @@
         return table.find_all('tr')
 
     def getDate(self, content, entry, doc):
-        date = ''.join(content.stripped_strings).strip()
-        (a, b, c) = date.partition("(")
-        date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
-        print date
-        edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+        strdate = ''.join(content.stripped_strings).strip()
+        (a, b, c) = strdate.partition("(")
+        strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
+        print strdate
+        try:
+		edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+	except ValueError:
+		print >> sys.stderr, "ERROR date invalid %s " % strdate
+		print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip()
+		edate = date.today().strftime("%Y-%m-%d")  
         print edate
         doc.update({'date': edate})
         return
@@ -266,8 +272,7 @@
                                          'Summary of FOIrequest received by agency/minister',
                                          'Summary of FOI request received', 'Description of    FOI Request',
                                          "FOI request", 'Results 1 to 67 of 67']
-                            if doc['title'] not in badtitles\
-                            and doc['description'] != '':
+                            if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '':
                                 print "saving"
                                 foidocsdb.save(doc)
                         else:
@@ -277,6 +282,6 @@
                         print "header row"
 
                     else:
-                        print "ERROR number of columns incorrect"
+                        print >> sys.stderr, "ERROR number of columns incorrect"
                         print row
 

--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,10 +1,22 @@
-for f in scrapers/*.py; 
-	do echo "Processing $f file.."; 
-	python $f; 
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd $DIR
+echo "" > /tmp/disclosr-error
+for f in scrapers/*.py; do
+	echo "Processing $f file..";
+	md5=`md5sum /tmp/disclosr-error`
+	python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error;
+	md52=`md5sum /tmp/disclosr-error`
+	if [ "$md5" != "$md52" ]; then
+		echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error;
+	fi
 	if [ "$?" -ne "0" ]; then
 		echo "error";
-		sleep 2; 
+		sleep 1;
 	fi
 done
+if [ -s /tmp/disclosr-error ] ; then
+    echo "emailling logs..";
+    mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
+fi
 
 

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -7,10 +7,16 @@
 from urlparse import urljoin
 import time
 import os
+import sys
 import mimetypes
 import urllib
 import urlparse
 import socket
+
+#couch = couchdb.Server('http://192.168.1.148:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
+
 
 def mkhash(input):
     return hashlib.md5(input).hexdigest().encode("utf-8")
@@ -84,7 +90,7 @@
 def getLastAttachment(docsdb, url):
     hash = mkhash(url)
     doc = docsdb.get(hash)
-    if doc != None:
+    if doc != None and "_attachments" in doc.keys():
         last_attachment_fname = doc["_attachments"].keys()[-1]
         last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
         return last_attachment
@@ -98,21 +104,23 @@
     req = urllib2.Request(url)
     print "Fetching %s (%s)" % (url, hash)
     if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-        print "Not a valid HTTP url"
+        print >> sys.stderr, "Not a valid HTTP url"
         return (None, None, None)
     doc = docsdb.get(hash)
     if doc == None:
         doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'}
     else:
-        if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14):
+        if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
             print "Uh oh, trying to scrape URL again too soon!" + hash
-            last_attachment_fname = doc["_attachments"].keys()[-1]
-            last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
-            content = last_attachment
-            return (doc['url'], doc['mime_type'], content.read())
-        if scrape_again == False:
-            print "Not scraping this URL again as requested"
-            return (doc['url'], doc['mime_type'], content.read())
+	    if "_attachments" in doc.keys():
+	            last_attachment_fname = doc["_attachments"].keys()[-1]
+	            last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
+        	    content = last_attachment.read()
+		    mime_type = doc['mime_type']
+	    else:
+		    content = None
+		    mime_type = None
+            return (doc['url'], mime_type, content)
 
     req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
     #if there is a previous version stored in couchdb, load caching helper tags
@@ -157,13 +165,13 @@
                 #store as attachment epoch-filename
 
     except (urllib2.URLError, socket.timeout) as e:
-        print "error!"
+        print >> sys.stderr,"error!"
         error = ""
         if hasattr(e, 'reason'):
             error = "error %s in downloading %s" % (str(e.reason), url)
         elif hasattr(e, 'code'):
             error = "error %s in downloading %s" % (e.code, url)
-        print error
+        print >> sys.stderr, error
         doc['error'] = error
         docsdb.save(doc)
         return (None, None, None)
@@ -207,9 +215,6 @@
                     #print linkurl
                     scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID)
 
-#couch = couchdb.Server('http://192.168.1.148:5984/')
-#couch = couchdb.Server('http://192.168.1.113:5984/')
-couch = couchdb.Server('http://127.0.0.1:5984/')
 # select database
 agencydb = couch['disclosr-agencies']
 docsdb = couch['disclosr-documents']

--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -42,7 +42,6 @@
                         'data': {'request': '', 'session': '', 'more': ''}
 
                 }
-
-	        amonpy.exception(data)
+		#amonpy.exception(data)
 	pass
 

--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -18,13 +18,13 @@
                                         if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
                                         # http://www.crummy.com/software/BeautifulSoup/documentation.html
                                                 soup = BeautifulSoup(htcontent)
-                                                for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'):
+						rowtitle = soup.find(class_ = "wc-title").find("h1").string
+                                                if rowtitle != None:
+                                                   description = rowtitle + ": "
+                                                for row in soup.find(class_ ="wc-content").find_all('td'):
                                                         if row != None:
-								rowtitle = row.find('th').string
-                                                                if rowtitle != None:
-                                                                    description = description + "\n" + rowtitle + ": "
-                                                                for text in row.find('td').stripped_strings:
-                                                                    description = description + text
+                                                                for text in row.stripped_strings:
+                                                                    description = description + text + "\n"
                                                      		for atag in row.find_all("a"):
                                                                 	if atag.has_key('href'):
                                                                         	links.append(scrape.fullurl(link,atag['href']))
@@ -37,7 +37,7 @@
 	def getColumnCount(self):
 		return 2
 	def getTable(self,soup):
-		return soup.find(class_ = "ms-rteTable-GreyAlternating")
+		return soup.find(class_ = "ms-rteTable-default")
 	def getColumns(self,columns):
 		(date, title) = columns
 		return (title, date, title, title, None)

--- a/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
+++ b/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
@@ -7,7 +7,7 @@
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
         def getTable(self,soup):
-                return soup.find(id = "inner_content")       
+                return soup.find(class_="tborder")       
         def getColumnCount(self):
                 return 2
         def getColumns(self,columns):

--- a/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
+++ b/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
@@ -8,40 +8,14 @@
 from datetime import *
 
 #http://www.doughellmann.com/PyMOTW/abc/
-class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-        def getTable(self,soup):
-                return soup.find(class_ = "inner-column").table       
-        def getRows(self,table):
-                return table.tbody.find_all('tr',recursive=False)
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
         def getColumnCount(self):
-                return 3
-        def getColumns(self,columns):
-                (date, title, description) = columns
-                return (date, date, title, description, None)
-        def getDate(self, content, entry, doc):
-		i = 0
-		date = ""
-		for string in content.stripped_strings:
-    			if i ==1:
-				date = string
-			i = i+1
-                edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
-                print edate
-                doc.update({'date': edate})
-                return
-   	def getTitle(self, content, entry, doc):
-		i = 0
-		title = ""
-		for string in content.stripped_strings:
-    			if i < 2:
-				title = title + string
-			i = i+1
-                doc.update({'title': title})
-		#print title
-                return
+                return 0
 
 if __name__ == '__main__':
-    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
-    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2012-13.aspx
+#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2011-12.aspx
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericHTMLDisclogScraper)
     ScraperImplementation().doScrape()
 

--- a/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
+++ b/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
@@ -6,8 +6,6 @@
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-        def getTable(self,soup):
-                return soup.find(id = "content_div_50269").table
         def getColumns(self,columns):
                 (id, date, title, description, notes) = columns
                 return (id, date, title, description, notes)

--- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -21,11 +21,15 @@
         d.make_links_absolute(base_url = self.getURL())
         for table in d('table').items():
             title= table('thead').text()
-            print title
+            print self.remove_control_chars(title)
             (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
             links = table('a').map(lambda i, e: pq(e).attr('href'))
             description = descA+" "+descB
-            edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+	    try:
+	            edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+	    except ValueError:
+		    edate = date.today().strftime("%Y-%m-%d")
+		    pass
             print edate
             dochash = scrape.mkhash(self.remove_control_chars(title))
             doc = foidocsdb.get(dochash)

--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -18,10 +18,10 @@
                                         if mime_type == "text/html" or mime_type == "application/x