From: Alex Sadleir Date: Fri, 26 Apr 2013 12:41:45 +0000 Subject: tool to check datagov resources X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=f8e2c3836140f6e9b4f226b93c8b7a5a8cda035f --- tool to check datagov resources Former-commit-id: f406384c3ba09ba04f639abb5731511ddf02b88b --- --- a/documents/datagov-export-groups.py +++ b/documents/datagov-export-groups.py @@ -9,7 +9,7 @@ # Instantiate the CKAN client. #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') -api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a' +api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc' ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', api_key=api_key) couch = couchdb.Server('http://127.0.0.1:5984/') --- a/documents/datagov-export.py +++ b/documents/datagov-export.py @@ -10,13 +10,38 @@ pass # Instantiate the CKAN client. -#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') -api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a' -ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', +api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc' +server = 'data.disclosurelo.gs' + +ckan = ckanclient.CkanClient(base_location='http://'+server+'api', api_key=api_key) -ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key) +ckandirect = ckanapi.RemoteCKAN('http://'+server, api_key=api_key) couch = couchdb.Server('http://127.0.0.1:5984/') #couch = couchdb.Server('http://192.168.1.113:5984/') + +import urllib +import urlparse + +def url_fix(s, charset='utf-8'): + """Sometimes you get an URL by a user that just isn't a real + URL because it contains unsafe characters like ' ' and so on. This + function can fix some of the problems in a similar way browsers + handle data entered by the user: + + >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)') + 'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29' + + :param charset: The target charset for the URL if the url was + given as unicode string. + """ + if isinstance(s, unicode): + s = s.encode(charset, 'ignore') + if not urlparse.urlparse(s).scheme: + s = "http://"+s + scheme, netloc, path, qs, anchor = urlparse.urlsplit(s) + path = urllib.quote(path, '/%') + qs = urllib.quote_plus(qs, ':&=') + return urlparse.urlunsplit((scheme, netloc, path, qs, anchor)) # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ SYMBOLS = { @@ -57,6 +82,9 @@ ... ValueError: can't interpret '12 foo' """ + if s == None: + return 0 + s = s.replace(',', '') init = s num = "" while s and s[0:1].isdigit() or s[0:1] == '.': @@ -177,6 +205,13 @@ tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] #print tags + extras = [] + + for extra_key in doc.value['metadata'].keys(): + if extra_key not in ["Description","Content-Language","DCTERMS.Description", "Keywords / Tags" ,"data.gov.au Category", "Download", "Permalink","DCTERMS.Identifier"]: + if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "": + extras.append([extra_key, doc.value['metadata'][extra_key]]) + package_entity = { 'name': pkg_name, 'title': doc.value['metadata']['DCTERMS.Title'], @@ -186,8 +221,8 @@ 'maintainer': creator, 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), 'notes': html2text.html2text(doc.value['metadata']['Description']), - 'owner_org': org_id - #todo add missing key values like jurasdiction + 'owner_org': org_id, + 'extras': extras } @@ -219,13 +254,17 @@ format = 'xml' if resource['format'] == '(CSV/XLS)': format = 'csv' + if resource['format'] == '(Shapefile)': + format = 'shp' + if resource['format'] == '(KML/KMZ)': + format = 'kml' name = resource['href'] if 'name' in resource.keys(): name = resource['name'] print resource - ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', + ckan.add_package_resource(pkg_name, url_fix(resource['href']), name=name, resource_type='data', format=format, - size=human2bytes(resource['size'].replace(',', ''))) + size=human2bytes(resource.get('size','0B'))) else: print "resources already exist" except CkanApiError, e: --- /dev/null +++ b/documents/datagov-resourcereport.py @@ -1,1 +1,79 @@ +import couchdb +couch = couchdb.Server('http://127.0.0.1:5984/') +#couch = couchdb.Server('http://192.168.1.113:5984/') +import urllib +import urlparse +import httplib2 +import csv +import ssl + +context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) +context.verify_mode = ssl.CERT_NONE + +def url_fix(s, charset='utf-8'): + """Sometimes you get an URL by a user that just isn't a real + URL because it contains unsafe characters like ' ' and so on. This + function can fix some of the problems in a similar way browsers + handle data entered by the user: + + :param charset: The target charset for the URL if the url was + given as unicode string. + """ + if isinstance(s, unicode): + s = s.encode(charset, 'ignore') + if not urlparse.urlparse(s).scheme: + s = "http://"+s + scheme, netloc, path, qs, anchor = urlparse.urlsplit(s) + path = urllib.quote(path, '/%') + qs = urllib.quote_plus(qs, ':&=') + return urlparse.urlunsplit((scheme, netloc, path, qs, anchor)) + +# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ +SYMBOLS = { + 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), + 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', + 'zetta', 'iotta'), + 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), + 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', + 'zebi', 'yobi'), +} + + +docsdb = couch['disclosr-documents'] +out = csv.writer(open("output.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL) +if __name__ == "__main__": + for doc in docsdb.view('app/datasets'): + if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": + # Collect the package metadata. + pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_', + doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]); + if 'Download' in doc.value['metadata'].keys() and len(doc.value['metadata']['Download']) > 0: + for resource in doc.value['metadata']['Download']: + # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html + # (KML/KMZ) / (Shapefile) /(Other) + format = "plain" + if resource['format'] == '(XML)': + format = 'xml' + if resource['format'] == '(CSV/XLS)': + format = 'csv' + if resource['format'] == '(Shapefile)': + format = 'shp' + if resource['format'] == '(KML/KMZ)': + format = 'kml' + name = resource['href'] + if 'name' in resource.keys(): + name = resource['name'] + if resource['href'].startswith("ftp"): + out.writerow([pkg_name, url_fix(resource['href']), name,format, "ftp", ""]) + else: + try: + h = httplib2.Http(disable_ssl_certificate_validation=True) + resp = h.request(url_fix(resource['href']), 'HEAD') + content_type = resp[0]['content-type'] if 'content-type' in resp[0].keys() else "" + out.writerow([pkg_name, url_fix(resource['href']), name,format, resp[0]['status'], content_type]) + except httplib2.ServerNotFoundError: + out.writerow([pkg_name, url_fix(resource['href']), name,format, "500","badurl"]) + else: + out.writerow([pkg_name]) + --- a/documents/datagov.py +++ b/documents/datagov.py @@ -39,7 +39,9 @@ link = item.find("a") format = item.find(property="dc:format") linkobj = {"href":link['href'].replace("/bye?","").strip(), - "format": format.string.strip(), "size": format.next_sibling.string.strip()} + "format": format.string.strip()} + if format.next_sibling.string != None: + linkobj["size"] = format.next_sibling.string.strip() if link.string != None: linkobj["name"] = link.string.strip() doc['metadata'][last_title].append(linkobj)