error reporting features
error reporting features


Former-commit-id: 4458096bdd46a0e420126ab910fbf68cbdd986f0

--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -2,43 +2,18 @@
 import couchdb
 from ckanclient import CkanApiError
 import re
+import html2text # aaronsw :(
+
 
 class LoaderError(Exception):
     pass
 
 # Instantiate the CKAN client.
 #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',    api_key='72f90359-0396-438c-804f-a26a24336747')
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+    api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
 #couch = couchdb.Server('http://127.0.0.1:5984/')
 couch = couchdb.Server('http://192.168.1.113:5984/')
-
-# http://stackoverflow.com/a/7778368/684978
-from HTMLParser import HTMLParser
-import htmlentitydefs
-
-class HTMLTextExtractor(HTMLParser):
-    def __init__(self):
-        HTMLParser.__init__(self)
-        self.result = [ ]
-
-    def handle_data(self, d):
-        self.result.append(d)
-
-    def handle_charref(self, number):
-        codepoint = int(number[1:], 16) if number[0] in (u'x', u'X') else int(number)
-        self.result.append(unichr(codepoint))
-
-    def handle_entityref(self, name):
-        codepoint = htmlentitydefs.name2codepoint[name]
-        self.result.append(unichr(codepoint))
-
-    def get_text(self):
-        return u''.join(self.result)
-
-def html_to_text(html):
-    s = HTMLTextExtractor()
-    s.feed(html)
-    return s.get_text()
 
 # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
 SYMBOLS = {
@@ -118,6 +93,7 @@
     return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
     #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
 
+
 def get_licence_id(licencename):
     map = {
         "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
@@ -131,9 +107,9 @@
         "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
         'CreativeCommonsAttributionCCBY25': 'cc-by',
         "PublicDomain": 'other-pd',
-        }
+    }
     if licencename not in map.keys():
-          raise Exception(licencename + " not found");
+        raise Exception(licencename + " not found");
     return map[licencename];
 
 docsdb = couch['disclosr-documents']
@@ -143,44 +119,82 @@
         print doc.id
         if doc.value['url'] != "http://data.gov.au/data/":
             # Collect the package metadata.
-            pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
-            tags = doc.value['metadata']["Keywords / Tags"]
-            if not hasattr(tags, '__iter__'):
-                tags = [tags]
-            [re.sub('[^a-zA-Z0-9-_()]', '', tag).replace('&', 'and').lower() for tag in tags]
-            package_entity = {
-                'name': pkg_name,
-                'title': doc.value['metadata']['DCTERMS.Title'],
-                'url': doc.value['metadata']['DCTERMS.Source.URI'],
-
-                'author': doc.value['metadata']["DCTERMS.Creator"],
-                'maintainer': doc.value['metadata']["DCTERMS.Creator"],
-                'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
-                'notes': html_to_text(doc.value['metadata']['Description']),
-            }
-            if len(tags) > 0:
-                package_entity['tags'] = tags
+            pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','');                                                                  _
+            tags = []
+            if doc.value['agencyID'] == "AGIMO":
+                if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+                    if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+                        tags = tags + doc.value['metadata']["Keywords / Tags"]
+                    else:
+                        tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+                if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
+                    if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
+                        tags = tags + doc.value['metadata']['data.gov.au Category']
+                    else:
+                        tags = tags + [doc.value['metadata']['data.gov.au Category']]
+                tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
                 print tags
+                package_entity = {
+                    'name': pkg_name,
+                    'title': doc.value['metadata']['DCTERMS.Title'],
+                    'url': doc.value['metadata']['DCTERMS.Source.URI'],
+                    'tags': tags, #tags are mandatory?
+                    'author': doc.value['metadata']["DCTERMS.Creator"],
+                    'maintainer': doc.value['metadata']["DCTERMS.Creator"],
+                    'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+                    'notes': html2text.html2text(doc.value['metadata']['Description']),
+                }
+            if doc.value['agencyID'] == "qld":
+                package_entity = doc.value['metadata']
+
             try:
-                #print doc.id
+                print package_entity
                 ckan.package_register_post(package_entity)
             except CkanApiError, e:
                 if ckan.last_status == 409:
-                    print "already exists"
+                    print "package already exists"
                 else:
                     raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
                         ckan.last_status, pkg_name, e.args))
 
-            print package_entity
-            #todo add to organisation (author/creator/maintainer) http://docs.ckan.org/en/latest/apiv3.html#examples ckan.logic.action.update.package_owner_org_update
-            #if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group
+
+            #add to group
+
+            group_name = name_munge(doc.value['metadata']["Agency"][:100])
+            try:
+                print ckan.group_entity_get(group_name)
+
+                # Update the group details
+                group_entity = ckan.last_message
+                print "group exists"
+                if 'packages' in group_entity.keys():
+                    group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
+                else:
+                    group_entity['packages'] = [pkg_name]
+                ckan.group_entity_put(group_entity)
+            except CkanApiError, e:
+                if ckan.last_status == 404:
+                    print "group does not exist, creating"
+                    group_entity = {
+                        'name': group_name,
+                        'title': doc.value['metadata']["Agency"],
+                        'description': doc.value['metadata']["Agency"],
+                        'packages': [pkg_name],
+                        # 'type': "organization" # not allowed via API, use database query
+                        # update "group" set type = 'organization';
+                        }
+                    print group_entity
+                    ckan.group_register_post(group_entity)
+                else:
+                    raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+                        ckan.last_status, pkg_name, e.args))
             if 'Download' in doc.value['metadata'].keys():
                 try:
                     pkg = ckan.package_entity_get(pkg_name)
                     resources = pkg.get('resources', [])
                     if len(resources) < len(doc.value['metadata']['Download']):
                         for resource in doc.value['metadata']['Download']:
-                            print resource
+
                             # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
                             # (KML/KMZ) / (Shapefile) /(Other)
                             format = "plain"
@@ -191,6 +205,7 @@
                             name = resource['href']
                             if 'name' in resource.keys():
                                 name = resource['name']
+                            print resource
                             ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
                                 format=format, size=human2bytes(resource['size'].replace(',', '')))
                     else:

--- /dev/null
+++ b/documents/datagov-merge.php
@@ -1,1 +1,26 @@
+<?php
 
+include_once("../include/common.inc.php");
+
+
+setlocale(LC_CTYPE, 'C');
+
+$db = $server->get_db('disclosr-documents');
+$datasets = Array();
+try {
+    $rows = $db->get_view("app", "datasets", null, true)->rows;
+
+    foreach ($rows as $row) {
+        //print_r($row);
+        if ($row->value->url != "http://data.gov.au/data/")
+        $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
+    }
+} catch (SetteeRestClientException $e) {
+    setteErrorHandler($e);
+}
+ksort($datasets);
+foreach ($datasets as $datasetname => $datasetkey) {
+    print "$datasetname => $datasetkey<br>\n";
+}
+?>
+

--- /dev/null
+++ b/documents/dataqld.py
@@ -1,1 +1,28 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
 
+from unidecode import unidecode
+import ckanclient
+
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
+
+# Get the package list.
+package_list = ckan.package_register_get()
+for package_name in package_list:
+# Get the details of a package.
+    (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+        "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
+    hash = scrape.mkhash(scrape.canonurl(url))
+    print hash
+    doc = scrape.docsdb.get(hash)
+    if "metadata" not in doc.keys() or True:
+        ckan.package_entity_get(package_name)
+        package_entity = ckan.last_message
+        doc['type'] = "dataset"
+        doc['metadata'] = package_entity
+        print package_entity
+        scrape.docsdb.save(doc)
+

--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -5,20 +5,53 @@
 
 from unidecode import unidecode
 
-listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960"
-(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
-    listurl, "gazette", "AGD")
-soup = BeautifulSoup(listhtml)
-for row in soup.find_all('tr'):
-    if row.has_key('valign'):
-	for col in tr.find_all('td'):
-		print col.string
-        #url = scrape.fullurl(listurl, atag['href'])
-        #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
-        #    url, "data", "AGIMO")
-        #hash = scrape.mkhash(scrape.canonurl(url))
-        #doc = scrape.docsdb.get(hash)
-        #print doc['metadata']
-        #scrape.docsdb.save(doc)
-        #time.sleep(2)
+items = 3950
+items = 1
+while True:
+    print str(items) + " (" +str(items/25) +" screens to go)"
+    listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
+    (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+        listurl, "gazette", "AGD", False)
+    for line in listhtml.split('\n'):
+        soup = BeautifulSoup(line)
+        #print line
+        for row in soup.find_all('tr'):
+            print line
+            if row.has_key('valign'):
+                i = 0
+                date = ""
+                id = ""
+                type = ""
+                description = ""
+                name = ""
+                url = ""
+                for col in soup.find_all('td'):
+                    #print ''.join(col.stripped_strings)
+                    if i == 0:
+                        date = ''.join(col.stripped_strings)
+                    if i == 1:
+                        id = ''.join(col.stripped_strings)
+                    if i == 2:
+                        type = ''.join(col.stripped_strings)
+                    if i == 3:
+                        description = ''.join(col.stripped_strings)
+                        for link in col.findAll('a'):
+                            if link.has_key("href"):
+                                url = link['href']
+                                name = ''.join(link.stripped_strings)
+                                print str(items) + " (" +str(items/25) +" screens to go)"
+                                print [date, id, type, description, name, url]
+                                itemurl = scrape.fullurl(listurl, url)
+                                (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+                                    itemurl, "gazette", "AGD", False)
+                                hash = scrape.mkhash(scrape.canonurl(itemurl))
+                                doc = scrape.docsdb.get(hash)
+                                doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
+                                scrape.docsdb.save(doc)
+                                #time.sleep(2)
+                    i = i + 1;
 
+    items = items - 25
+    if items <= 0:
+        break
+

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -72,7 +72,8 @@
             edate = date.today().strftime("%Y-%m-%d")
             doc = {'_id': dochash, 'agencyID': self.getAgencyID()
             , 'url': self.getURL(), 'docID': dochash,
-            "date": edate, "title": "Disclosure Log Updated", "description":  self.remove_control_chars(description), "diff": diff}
+            "date": edate, "title": "Disclosure Log Updated", 
+	    "description":  self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
             foidocsdb.save(doc)
         else:
             print "already saved"
@@ -277,6 +278,6 @@
                         print "header row"
 
                     else:
-                        print "ERROR number of columns incorrect"
+                        print >> sys.stderr, "ERROR number of columns incorrect"
                         print row
 

--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,10 +1,20 @@
-for f in scrapers/*.py; 
-	do echo "Processing $f file.."; 
-	python $f; 
+echo "" > /tmp/disclosr-error
+for f in scrapers/*.py; do
+	echo "Processing $f file..";
+	md5=`md5sum /tmp/disclosr-error`
+	python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error;
+	md52=`md5sum /tmp/disclosr-error`
+	if [ "$md5" != "$md52" ]; then
+		echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error;
+	fi
 	if [ "$?" -ne "0" ]; then
 		echo "error";
-		sleep 2; 
+		sleep 1;
 	fi
 done
+if [ -s /tmp/disclosr-error ] ; then
+    echo "emailling logs..";
+    mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
+fi
 
 

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -7,14 +7,15 @@
 from urlparse import urljoin
 import time
 import os
+import sys
 import mimetypes
 import urllib
 import urlparse
 import socket
 
 #couch = couchdb.Server('http://192.168.1.148:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
 
 
 def mkhash(input):
@@ -103,7 +104,7 @@
     req = urllib2.Request(url)
     print "Fetching %s (%s)" % (url, hash)
     if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-        print "Not a valid HTTP url"
+        print >> sys.stderr, "Not a valid HTTP url"
         return (None, None, None)
     doc = docsdb.get(hash)
     if doc == None:
@@ -159,13 +160,13 @@
                 #store as attachment epoch-filename
 
     except (urllib2.URLError, socket.timeout) as e:
-        print "error!"
+        print >> sys.stderr,"error!"
         error = ""
         if hasattr(e, 'reason'):
             error = "error %s in downloading %s" % (str(e.reason), url)
         elif hasattr(e, 'code'):
             error = "error %s in downloading %s" % (e.code, url)
-        print error
+        print >> sys.stderr, error
         doc['error'] = error
         docsdb.save(doc)
         return (None, None, None)

--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -18,13 +18,13 @@
                                         if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
                                         # http://www.crummy.com/software/BeautifulSoup/documentation.html
                                                 soup = BeautifulSoup(htcontent)
-                                                for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'):
+						rowtitle = soup.find(class_ = "wc-title").find("h1").string
+                                                if rowtitle != None:
+                                                   description = rowtitle + ": "
+                                                for row in soup.find(class_ ="wc-content").find_all('td'):
                                                         if row != None:
-								rowtitle = row.find('th').string
-                                                                if rowtitle != None:
-                                                                    description = description + "\n" + rowtitle + ": "
-                                                                for text in row.find('td').stripped_strings:
-                                                                    description = description + text
+                                                                for text in row.stripped_strings:
+                                                                    description = description + text + "\n"
                                                      		for atag in row.find_all("a"):
                                                                 	if atag.has_key('href'):
                                                                         	links.append(scrape.fullurl(link,atag['href']))
@@ -37,7 +37,7 @@
 	def getColumnCount(self):
 		return 2
 	def getTable(self,soup):
-		return soup.find(class_ = "ms-rteTable-GreyAlternating")
+		return soup.find(class_ = "ms-rteTable-default")
 	def getColumns(self,columns):
 		(date, title) = columns
 		return (title, date, title, title, None)

--- a/documents/scrapers/f2ab2908d8ee56ed8d995ef4187e75e6.py
+++ b/documents/scrapers/f2ab2908d8ee56ed8d995ef4187e75e6.py
@@ -10,7 +10,7 @@
                 (id, date, title, description, notes) = columns
                 return (id, date, title, description, notes)
         def getTable(self,soup):
-                return soup.find(id = "content").table
+                return soup.find("table")
 
 if __name__ == '__main__':
     print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)