tool to check datagov resources
tool to check datagov resources


Former-commit-id: f406384c3ba09ba04f639abb5731511ddf02b88b

--- a/documents/datagov-export-groups.py
+++ b/documents/datagov-export-groups.py
@@ -9,7 +9,7 @@
 
 # Instantiate the CKAN client.
 #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a'
+api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
 ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
                              api_key=api_key)
 couch = couchdb.Server('http://127.0.0.1:5984/')
@@ -39,41 +39,43 @@
     groups = {}
     for doc in docsdb.view('app/datasetGroups'):
             group_name = doc.key
-            pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
-                              doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
-            if group_name in groups.keys():
-                groups[group_name] = list(set(groups[group_name] + [pkg_name]))
-            else:
-                groups[group_name] = [pkg_name]
+            if group_name != "Not specified":
+                pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+                                  doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+                if group_name in groups.keys():
+                    groups[group_name] = list(set(groups[group_name] + [pkg_name]))
+                else:
+                    groups[group_name] = [pkg_name]
 
     # add dataset to group(s)
     for group_name in groups.keys():
-        group_url = name_munge(group_name[:100])
-        print group_name
-        print groups[group_name]
-        try:
-            # Update the group details
-            group_entity = ckan.group_entity_get(group_url)
-            print "group "+group_name+" exists"
-            if 'packages' in group_entity.keys():
-                group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
-            else:
-                group_entity['packages'] = groups[group_name]
-            ckan.group_entity_put(group_entity)
-        except CkanApiError, e:
-            if ckan.last_status == 404:
-                print "group "+group_name+" does not exist, creating"
-                group_entity = {
-                    'name': group_url,
-                    'title': group_name,
-                    'description': group_name,
-                    'packages': groups[group_name]
-                }
-                #print group_entity
-                ckan.group_register_post(group_entity)
-            elif ckan.last_status == 409:
-                print "group already exists"
-            else:
-                raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
-                    ckan.last_status, pkg_name, e.args))
+        if group_name != "Not specified":
+            group_url = name_munge(group_name[:100])
+            print group_name
+            print groups[group_name]
+            try:
+                # Update the group details
+                group_entity = ckan.group_entity_get(group_url)
+                print "group "+group_name+" exists"
+                if 'packages' in group_entity.keys():
+                    group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
+                else:
+                    group_entity['packages'] = groups[group_name]
+                ckan.group_entity_put(group_entity)
+            except CkanApiError, e:
+                if ckan.last_status == 404:
+                    print "group "+group_name+" does not exist, creating"
+                    group_entity = {
+                        'name': group_url,
+                        'title': group_name,
+                        'description': group_name,
+                        'packages': groups[group_name]
+                    }
+                    #print group_entity
+                    ckan.group_register_post(group_entity)
+                elif ckan.last_status == 409:
+                    print "group already exists"
+                else:
+                    raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+                        ckan.last_status, pkg_name, e.args))
 

--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -10,13 +10,38 @@
     pass
 
 # Instantiate the CKAN client.
-#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a'
-ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
+server = 'data.disclosurelo.gs'
+
+ckan = ckanclient.CkanClient(base_location='http://'+server+'api',
                              api_key=api_key)
-ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://'+server, api_key=api_key)
 couch = couchdb.Server('http://127.0.0.1:5984/')
 #couch = couchdb.Server('http://192.168.1.113:5984/')
+
+import urllib
+import urlparse
+
+def url_fix(s, charset='utf-8'):
+    """Sometimes you get an URL by a user that just isn't a real
+    URL because it contains unsafe characters like ' ' and so on.  This
+    function can fix some of the problems in a similar way browsers
+    handle data entered by the user:
+
+    >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
+    'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
+
+    :param charset: The target charset for the URL if the url was
+                    given as unicode string.
+    """
+    if isinstance(s, unicode):
+        s = s.encode(charset, 'ignore')
+    if not urlparse.urlparse(s).scheme:
+   	s = "http://"+s
+    scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
+    path = urllib.quote(path, '/%')
+    qs = urllib.quote_plus(qs, ':&=')
+    return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
 
 # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
 SYMBOLS = {
@@ -57,6 +82,9 @@
           ...
       ValueError: can't interpret '12 foo'
     """
+    if s == None:
+	return 0
+    s = s.replace(',', '')
     init = s
     num = ""
     while s and s[0:1].isdigit() or s[0:1] == '.':
@@ -133,15 +161,26 @@
                               doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
             print pkg_name
             #add to or create organization using direct API
-            org_name = name_munge(doc.value['metadata']["Agency"][:100])
+            agency = doc.value['metadata']["Agency"]
+            if agency == "APS":
+                agency = "Australian Public Service Commission"
+            if agency == "Shared Services, Treasury Directorate":
+                agency = "Shared Services Procurement, Treasury Directorate"
+            if agency == "Treasury - Shared Services":
+                agency = "Shared Services Procurement, Treasury Directorate"
+            if agency == "Territory and Municipal Services (TAMS)":
+                agency = "Territory and Municipal Services Directorate"
+            if agency == "State Library of NSW":
+                agency = "State Library of New South Wales"
+            org_name = name_munge(agency[:100])
             if org_name not in orgs_list:
                 orgs_list = ckandirect.action.organization_list()['result']
                 #print orgs_list
                 if org_name not in orgs_list:
                     try:
                         print "org not found, creating " + org_name
-                        ckandirect.action.organization_create(name=org_name, title=doc.value['metadata']["Agency"],
-                                                              description=doc.value['metadata']["Agency"])
+                        ckandirect.action.organization_create(name=org_name, title=agency,
+                                                              description=agency)
                         orgs_list.append(org_name)
                     except ckanapi.ValidationError, e:
                         print e
@@ -156,6 +195,7 @@
             org_id = orgs_ids[org_name]
             print "org id is "+org_id
             tags = []
+            creator = doc.value['metadata']["DCTERMS.Creator"]
             if doc.value['agencyID'] == "AGIMO":
                 if len(doc.value['metadata']["Keywords / Tags"]) > 0:
                     if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
@@ -165,20 +205,26 @@
 
                 tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
                 #print tags
+                extras = []
+
+                for extra_key in doc.value['metadata'].keys():
+                    if extra_key not in ["Description","Content-Language","DCTERMS.Description", "Keywords / Tags" ,"data.gov.au Category", "Download", "Permalink","DCTERMS.Identifier"]:
+			if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "":
+	                        extras.append([extra_key, doc.value['metadata'][extra_key]])
+
                 package_entity = {
                     'name': pkg_name,
                     'title': doc.value['metadata']['DCTERMS.Title'],
                     'url': doc.value['metadata']['DCTERMS.Source.URI'],
                     'tags': tags, #tags are mandatory?
-                    'author': doc.value['metadata']["DCTERMS.Creator"],
-                    'maintainer': doc.value['metadata']["DCTERMS.Creator"],
+                    'author': creator,
+                    'maintainer': creator,
                     'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
                     'notes': html2text.html2text(doc.value['metadata']['Description']),
-                    'owner_org': org_id
-                    #todo add missing key values like jurasdiction
+                    'owner_org': org_id,
+                    'extras': extras
                 }
-            if doc.value['agencyID'] == "qld":
-                package_entity = doc.value['metadata']
+
 
             try:
                 #print package_entity
@@ -192,6 +238,7 @@
                         ckan.last_status, pkg_name, e.args))
             pkg = ckan.package_entity_get(pkg_name)
 
+
             # add resources (downloadable data files)
             if 'Download' in doc.value['metadata'].keys():
                 try:
@@ -207,13 +254,17 @@
                                 format = 'xml'
                             if resource['format'] == '(CSV/XLS)':
                                 format = 'csv'
+                            if resource['format'] == '(Shapefile)':
+                                format = 'shp'
+                            if resource['format'] == '(KML/KMZ)':
+                                format = 'kml'
                             name = resource['href']
                             if 'name' in resource.keys():
                                 name = resource['name']
                             print resource
-                            ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
+                            ckan.add_package_resource(pkg_name, url_fix(resource['href']), name=name, resource_type='data',
                                                       format=format,
-                                                      size=human2bytes(resource['size'].replace(',', '')))
+                                                      size=human2bytes(resource.get('size','0B')))
                     else:
                         print "resources already exist"
                 except CkanApiError, e:

--- /dev/null
+++ b/documents/datagov-resourcereport.py
@@ -1,1 +1,79 @@
+import couchdb
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
 
+import urllib
+import urlparse
+import httplib2
+import csv
+import ssl
+
+context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+context.verify_mode = ssl.CERT_NONE
+
+def url_fix(s, charset='utf-8'):
+    """Sometimes you get an URL by a user that just isn't a real
+    URL because it contains unsafe characters like ' ' and so on.  This
+    function can fix some of the problems in a similar way browsers
+    handle data entered by the user:
+
+    :param charset: The target charset for the URL if the url was
+                    given as unicode string.
+    """
+    if isinstance(s, unicode):
+        s = s.encode(charset, 'ignore')
+    if not urlparse.urlparse(s).scheme:
+   	s = "http://"+s
+    scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
+    path = urllib.quote(path, '/%')
+    qs = urllib.quote_plus(qs, ':&=')
+    return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
+
+# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
+SYMBOLS = {
+    'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
+    'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
+                      'zetta', 'iotta'),
+    'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
+    'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
+                'zebi', 'yobi'),
+}
+
+
+docsdb = couch['disclosr-documents']
+out = csv.writer(open("output.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
+if __name__ == "__main__":
+    for doc in docsdb.view('app/datasets'):
+        if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+            # Collect the package metadata.
+            pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+                              doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+            if 'Download' in doc.value['metadata'].keys() and len(doc.value['metadata']['Download']) > 0:
+                        for resource in doc.value['metadata']['Download']:
+                            # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
+                            # (KML/KMZ) / (Shapefile) /(Other)
+                            format = "plain"
+                            if resource['format'] == '(XML)':
+                                format = 'xml'
+                            if resource['format'] == '(CSV/XLS)':
+                                format = 'csv'
+                            if resource['format'] == '(Shapefile)':
+                                format = 'shp'
+                            if resource['format'] == '(KML/KMZ)':
+                                format = 'kml'
+                            name = resource['href']
+                            if 'name' in resource.keys():
+                                name = resource['name']
+			    if resource['href'].startswith("ftp"):
+				    out.writerow([pkg_name, url_fix(resource['href']), name,format, "ftp", ""])
+			    else:
+				    try:
+					h = httplib2.Http(disable_ssl_certificate_validation=True)
+  				        resp = h.request(url_fix(resource['href']), 'HEAD')
+					content_type = resp[0]['content-type'] if 'content-type' in resp[0].keys() else ""
+					out.writerow([pkg_name, url_fix(resource['href']), name,format, resp[0]['status'], content_type])
+				    except httplib2.ServerNotFoundError:
+					out.writerow([pkg_name, url_fix(resource['href']), name,format, "500","badurl"])
+	    else:
+		out.writerow([pkg_name])
+

--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -39,7 +39,9 @@
                                     link = item.find("a")
                                     format = item.find(property="dc:format")
                                     linkobj = {"href":link['href'].replace("/bye?","").strip(),
-                                            "format": format.string.strip(), "size": format.next_sibling.string.strip()}
+                                            "format": format.string.strip()}
+				    if format.next_sibling.string != None:
+					linkobj["size"] = format.next_sibling.string.strip()
                                     if link.string != None:
                                         linkobj["name"] = link.string.strip()
                                     doc['metadata'][last_title].append(linkobj)