beginning of export merge
beginning of export merge


Former-commit-id: 121a09aa3f9417f0512b4cb138190070dc9da890

--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -2,14 +2,80 @@
 import couchdb
 from ckanclient import CkanApiError
 import re
+import html2text # aaronsw :(
+
+
 class LoaderError(Exception):
     pass
+
+# Instantiate the CKAN client.
+#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+    api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
+#couch = couchdb.Server('http://127.0.0.1:5984/')
+couch = couchdb.Server('http://192.168.1.113:5984/')
+
+# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
+SYMBOLS = {
+    'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
+    'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
+                      'zetta', 'iotta'),
+    'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
+    'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
+                'zebi', 'yobi'),
+}
+
+def human2bytes(s):
+    """
+    Attempts to guess the string format based on default symbols
+    set and return the corresponding bytes as an integer.
+    When unable to recognize the format ValueError is raised.
+
+      >>> human2bytes('0 B')
+      0
+      >>> human2bytes('1 K')
+      1024
+      >>> human2bytes('1 M')
+      1048576
+      >>> human2bytes('1 Gi')
+      1073741824
+      >>> human2bytes('1 tera')
+      1099511627776
+
+      >>> human2bytes('0.5kilo')
+      512
+      >>> human2bytes('0.1  byte')
+      0
+      >>> human2bytes('1 k')  # k is an alias for K
+      1024
+      >>> human2bytes('12 foo')
+      Traceback (most recent call last):
+          ...
+      ValueError: can't interpret '12 foo'
+    """
+    init = s
+    num = ""
+    while s and s[0:1].isdigit() or s[0:1] == '.':
+        num += s[0]
+        s = s[1:]
+    num = float(num)
+    letter = s.strip()
+    for name, sset in SYMBOLS.items():
+        if letter in sset:
+            break
+    else:
+        if letter == 'k':
+            # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
+            sset = SYMBOLS['customary']
+            letter = letter.upper()
+        else:
+            raise ValueError("can't interpret %r" % init)
+    prefix = {sset[0]: 1}
+    for i, s in enumerate(sset[1:]):
+        prefix[s] = 1 << (i + 1) * 10
+    return int(num * prefix[letter])
+
 # https://github.com/okfn/ckanext-importlib
-# Instantiate the CKAN client.
-ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',
-                             api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-# (use your own api_key from http://thedatahub.org/user/me )
-
 def munge(name):
     # convert spaces to underscores
     name = re.sub(' ', '_', name).lower()
@@ -21,11 +87,31 @@
     # remove double underscores
     name = re.sub('__', '_', name).lower()
     return name
+
+
 def name_munge(input_name):
     return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
     #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
 
-couch = couchdb.Server('http://127.0.0.1:5984/')
+
+def get_licence_id(licencename):
+    map = {
+        "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
+        "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
+        'Otherpleasespecify': 'notspecified',
+        '': 'notspecified',
+        "Publicly available data": 'notspecified',
+        "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
+        "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
+        'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
+        "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
+        'CreativeCommonsAttributionCCBY25': 'cc-by',
+        "PublicDomain": 'other-pd',
+    }
+    if licencename not in map.keys():
+        raise Exception(licencename + " not found");
+    return map[licencename];
+
 docsdb = couch['disclosr-documents']
 
 if __name__ == "__main__":
@@ -33,26 +119,98 @@
         print doc.id
         if doc.value['url'] != "http://data.gov.au/data/":
             # Collect the package metadata.
-            pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
+            pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','');                                                                  _
+            tags = []
+            if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+                if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+                    tags = tags + doc.value['metadata']["Keywords / Tags"]
+                else:
+                    tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+            if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
+                if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
+                    tags = tags + doc.value['metadata']['data.gov.au Category']
+                else:
+                    tags = tags + [doc.value['metadata']['data.gov.au Category']]
+            tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
+            print tags
             package_entity = {
                 'name': pkg_name,
                 'title': doc.value['metadata']['DCTERMS.Title'],
                 'url': doc.value['metadata']['DCTERMS.Source.URI'],
-                'tags': doc.value['metadata']["Keywords / Tags"],   #todo   must be alphanumeric characters or symbols
-
+                'tags': tags, #tags are mandatory?
                 'author': doc.value['metadata']["DCTERMS.Creator"],
                 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
-                'licence_id': doc.value['metadata']['DCTERMS.License'],
-                'notes': doc.value['metadata']['Description'],
-                }
+                'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+                'notes': html2text.html2text(doc.value['metadata']['Description']),
+            }
+
             try:
+                print package_entity
                 ckan.package_register_post(package_entity)
             except CkanApiError, e:
                 if ckan.last_status == 409:
-                    print "already exists"
-                else:
-                    raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (ckan.last_status, doc.id, e.args))
-
-            print package_entity
-            ckan.add_package_resource(pkg_name, 'http://example.org/', name='Foo', resource_type='data', format='csv')
-
+                    print "package already exists"
+                else:
+                    raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
+                        ckan.last_status, pkg_name, e.args))
+
+
+            #add to group
+
+            group_name = name_munge(doc.value['metadata']["Agency"][:100])
+            try:
+                print ckan.group_entity_get(group_name)
+
+                # Update the group details
+                group_entity = ckan.last_message
+                print "group exists"
+                if 'packages' in group_entity.keys():
+                    group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
+                else:
+                    group_entity['packages'] = [pkg_name]
+                ckan.group_entity_put(group_entity)
+            except CkanApiError, e:
+                if ckan.last_status == 404:
+                    print "group does not exist, creating"
+                    group_entity = {
+                        'name': group_name,
+                        'title': doc.value['metadata']["Agency"],
+                        'description': doc.value['metadata']["Agency"],
+                        'packages': [pkg_name],
+                        # 'type': "organization" # not allowed via API, use database query
+                        # update "group" set type = 'organization';
+                        }
+                    print group_entity
+                    ckan.group_register_post(group_entity)
+                else:
+                    raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+                        ckan.last_status, pkg_name, e.args))
+            if 'Download' in doc.value['metadata'].keys():
+                try:
+                    pkg = ckan.package_entity_get(pkg_name)
+                    resources = pkg.get('resources', [])
+                    if len(resources) < len(doc.value['metadata']['Download']):
+                        for resource in doc.value['metadata']['Download']:
+
+                            # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
+                            # (KML/KMZ) / (Shapefile) /(Other)
+                            format = "plain"
+                            if resource['format'] == '(XML)':
+                                format = 'xml'
+                            if resource['format'] == '(CSV/XLS)':
+                                format = 'csv'
+                            name = resource['href']
+                            if 'name' in resource.keys():
+                                name = resource['name']
+                            print resource
+                            ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
+                                format=format, size=human2bytes(resource['size'].replace(',', '')))
+                    else:
+                        print "resources already exist"
+                except CkanApiError, e:
+                    if ckan.last_status == 404:
+                        print "parent dataset does not exist"
+                    else:
+                        raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
+                            ckan.last_status, pkg_name, e.args))
+

--- /dev/null
+++ b/documents/datagov-merge.php
@@ -1,1 +1,26 @@
+<?php
 
+include_once("../include/common.inc.php");
+
+
+setlocale(LC_CTYPE, 'C');
+
+$db = $server->get_db('disclosr-documents');
+$datasets = Array();
+try {
+    $rows = $db->get_view("app", "datasets", null, true)->rows;
+
+    foreach ($rows as $row) {
+        //print_r($row);
+        if ($row->value->url != "http://data.gov.au/data/")
+        $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
+    }
+} catch (SetteeRestClientException $e) {
+    setteErrorHandler($e);
+}
+ksort($datasets);
+foreach ($datasets as $datasetname => $datasetkey) {
+    print "$datasetname => $datasetkey<br>\n";
+}
+?>
+

--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -13,7 +13,7 @@
     if atag.has_key('href'):
         url = scrape.fullurl(listurl, atag['href'])
         (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
-            url, "data", "AGIMO")
+            url, "data", "AGIMO", False)
         hash = scrape.mkhash(scrape.canonurl(url))
         doc = scrape.docsdb.get(hash)
         if "metadata" not in doc.keys() or True:

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -12,6 +12,11 @@
 import urlparse
 import socket
 
+#couch = couchdb.Server('http://192.168.1.148:5984/')
+couch = couchdb.Server('http://192.168.1.113:5984/')
+#couch = couchdb.Server('http://127.0.0.1:5984/')
+
+
 def mkhash(input):
     return hashlib.md5(input).hexdigest().encode("utf-8")
 
@@ -104,14 +109,11 @@
     if doc == None:
         doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'}
     else:
-        if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14):
+        if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
             print "Uh oh, trying to scrape URL again too soon!" + hash
             last_attachment_fname = doc["_attachments"].keys()[-1]
             last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
             content = last_attachment
-            return (doc['url'], doc['mime_type'], content.read())
-        if scrape_again == False:
-            print "Not scraping this URL again as requested"
             return (doc['url'], doc['mime_type'], content.read())
 
     req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
@@ -207,9 +209,6 @@
                     #print linkurl
                     scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID)
 
-#couch = couchdb.Server('http://192.168.1.148:5984/')
-#couch = couchdb.Server('http://192.168.1.113:5984/')
-couch = couchdb.Server('http://127.0.0.1:5984/')
 # select database
 agencydb = couch['disclosr-agencies']
 docsdb = couch['disclosr-documents']