From: Maxious Date: Mon, 11 Feb 2013 04:01:14 +0000 Subject: beginning of export merge X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=9c58159563f81ffc5ef77d816bea2ff4a8621c4f --- beginning of export merge Former-commit-id: 121a09aa3f9417f0512b4cb138190070dc9da890 --- --- a/documents/datagov-export.py +++ b/documents/datagov-export.py @@ -2,14 +2,19 @@ import couchdb from ckanclient import CkanApiError import re +import html2text # aaronsw :( + class LoaderError(Exception): pass # Instantiate the CKAN client. -ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', - api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') -# (use your own api_key from http://thedatahub.org/user/me ) +#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') +ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', + api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52') +#couch = couchdb.Server('http://127.0.0.1:5984/') +couch = couchdb.Server('http://192.168.1.113:5984/') + # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ SYMBOLS = { 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), @@ -88,7 +93,25 @@ return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') -couch = couchdb.Server('http://127.0.0.1:5984/') + +def get_licence_id(licencename): + map = { + "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', + "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', + 'Otherpleasespecify': 'notspecified', + '': 'notspecified', + "Publicly available data": 'notspecified', + "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", + "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", + 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', + "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', + 'CreativeCommonsAttributionCCBY25': 'cc-by', + "PublicDomain": 'other-pd', + } + if licencename not in map.keys(): + raise Exception(licencename + " not found"); + return map[licencename]; + docsdb = couch['disclosr-documents'] if __name__ == "__main__": @@ -96,41 +119,79 @@ print doc.id if doc.value['url'] != "http://data.gov.au/data/": # Collect the package metadata. - pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100]) - tags = doc.value['metadata']["Keywords / Tags"] - if not hasattr(tags, '__iter__'): - tags = [tags] - [re.sub('[^a-zA-Z0-9-_]', '', tag).lower() for tag in tags] + pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); _ + tags = [] + if len(doc.value['metadata']["Keywords / Tags"]) > 0: + if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): + tags = tags + doc.value['metadata']["Keywords / Tags"] + else: + tags = tags + [doc.value['metadata']["Keywords / Tags"]] + if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: + if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): + tags = tags + doc.value['metadata']['data.gov.au Category'] + else: + tags = tags + [doc.value['metadata']['data.gov.au Category']] + tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] + print tags package_entity = { 'name': pkg_name, 'title': doc.value['metadata']['DCTERMS.Title'], 'url': doc.value['metadata']['DCTERMS.Source.URI'], - 'tags': tags, + 'tags': tags, #tags are mandatory? 'author': doc.value['metadata']["DCTERMS.Creator"], 'maintainer': doc.value['metadata']["DCTERMS.Creator"], - 'licence_id': doc.value['metadata']['DCTERMS.License'], #todo licence id mapping - 'notes': doc.value['metadata']['Description'], + 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), + 'notes': html2text.html2text(doc.value['metadata']['Description']), } + try: - #print doc.id + print package_entity ckan.package_register_post(package_entity) except CkanApiError, e: if ckan.last_status == 409: - print "already exists" + print "package already exists" else: raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( ckan.last_status, pkg_name, e.args)) - print package_entity - #todo add to organisation (author/creator/maintainer) - #if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group + + #add to group + + group_name = name_munge(doc.value['metadata']["Agency"][:100]) + try: + print ckan.group_entity_get(group_name) + + # Update the group details + group_entity = ckan.last_message + print "group exists" + if 'packages' in group_entity.keys(): + group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) + else: + group_entity['packages'] = [pkg_name] + ckan.group_entity_put(group_entity) + except CkanApiError, e: + if ckan.last_status == 404: + print "group does not exist, creating" + group_entity = { + 'name': group_name, + 'title': doc.value['metadata']["Agency"], + 'description': doc.value['metadata']["Agency"], + 'packages': [pkg_name], + # 'type': "organization" # not allowed via API, use database query + # update "group" set type = 'organization'; + } + print group_entity + ckan.group_register_post(group_entity) + else: + raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( + ckan.last_status, pkg_name, e.args)) if 'Download' in doc.value['metadata'].keys(): try: pkg = ckan.package_entity_get(pkg_name) resources = pkg.get('resources', []) if len(resources) < len(doc.value['metadata']['Download']): for resource in doc.value['metadata']['Download']: - #print resource + # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html # (KML/KMZ) / (Shapefile) /(Other) format = "plain" @@ -141,6 +202,7 @@ name = resource['href'] if 'name' in resource.keys(): name = resource['name'] + print resource ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', format=format, size=human2bytes(resource['size'].replace(',', ''))) else: --- /dev/null +++ b/documents/datagov-merge.php @@ -1,1 +1,26 @@ +get_db('disclosr-documents'); +$datasets = Array(); +try { + $rows = $db->get_view("app", "datasets", null, true)->rows; + + foreach ($rows as $row) { + //print_r($row); + if ($row->value->url != "http://data.gov.au/data/") + $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id; + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +ksort($datasets); +foreach ($datasets as $datasetname => $datasetkey) { + print "$datasetname => $datasetkey
\n"; +} +?> + --- a/documents/datagov.py +++ b/documents/datagov.py @@ -13,7 +13,7 @@ if atag.has_key('href'): url = scrape.fullurl(listurl, atag['href']) (url, mime_type, html) = scrape.fetchURL(scrape.docsdb, - url, "data", "AGIMO") + url, "data", "AGIMO", False) hash = scrape.mkhash(scrape.canonurl(url)) doc = scrape.docsdb.get(hash) if "metadata" not in doc.keys() or True: --- a/documents/scrape.py +++ b/documents/scrape.py @@ -12,6 +12,11 @@ import urlparse import socket +#couch = couchdb.Server('http://192.168.1.148:5984/') +couch = couchdb.Server('http://192.168.1.113:5984/') +#couch = couchdb.Server('http://127.0.0.1:5984/') + + def mkhash(input): return hashlib.md5(input).hexdigest().encode("utf-8") @@ -104,14 +109,11 @@ if doc == None: doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'} else: - if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14): + if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)): print "Uh oh, trying to scrape URL again too soon!" + hash last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment = docsdb.get_attachment(doc, last_attachment_fname) content = last_attachment - return (doc['url'], doc['mime_type'], content.read()) - if scrape_again == False: - print "Not scraping this URL again as requested" return (doc['url'], doc['mime_type'], content.read()) req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") @@ -207,9 +209,6 @@ #print linkurl scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID) -#couch = couchdb.Server('http://192.168.1.148:5984/') -#couch = couchdb.Server('http://192.168.1.113:5984/') -couch = couchdb.Server('http://127.0.0.1:5984/') # select database agencydb = couch['disclosr-agencies'] docsdb = couch['disclosr-documents']