Experimental organization support
Experimental organization support


Former-commit-id: 0c86e9a2a6e066dc3f1550e7915892d9649ec125

import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
  import html2text # aaronsw :(
  import ckanapi # https://github.com/open-data/ckanapi
   
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
# Instantiate the CKAN client. # Instantiate the CKAN client.
ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a'
# (use your own api_key from http://thedatahub.org/user/me ) ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
  api_key=api_key)
  ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key)
  couch = couchdb.Server('http://127.0.0.1:5984/')
  #couch = couchdb.Server('http://192.168.1.113:5984/')
   
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = { SYMBOLS = {
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'), 'zetta', 'iotta'),
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'), 'zebi', 'yobi'),
} }
   
def human2bytes(s): def human2bytes(s):
""" """
Attempts to guess the string format based on default symbols Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer. set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised. When unable to recognize the format ValueError is raised.
   
>>> human2bytes('0 B') >>> human2bytes('0 B')
0 0
>>> human2bytes('1 K') >>> human2bytes('1 K')
1024 1024
>>> human2bytes('1 M') >>> human2bytes('1 M')
1048576 1048576
>>> human2bytes('1 Gi') >>> human2bytes('1 Gi')
1073741824 1073741824
>>> human2bytes('1 tera') >>> human2bytes('1 tera')
1099511627776 1099511627776
   
>>> human2bytes('0.5kilo') >>> human2bytes('0.5kilo')
512 512
>>> human2bytes('0.1 byte') >>> human2bytes('0.1 byte')
0 0
>>> human2bytes('1 k') # k is an alias for K >>> human2bytes('1 k') # k is an alias for K
1024 1024
>>> human2bytes('12 foo') >>> human2bytes('12 foo')
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: can't interpret '12 foo' ValueError: can't interpret '12 foo'
""" """
init = s init = s
num = "" num = ""
while s and s[0:1].isdigit() or s[0:1] == '.': while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0] num += s[0]
s = s[1:] s = s[1:]
num = float(num) num = float(num)
letter = s.strip() letter = s.strip()
for name, sset in SYMBOLS.items(): for name, sset in SYMBOLS.items():
if letter in sset: if letter in sset:
break break
else: else:
if letter == 'k': if letter == 'k':
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
sset = SYMBOLS['customary'] sset = SYMBOLS['customary']
letter = letter.upper() letter = letter.upper()
else: else:
raise ValueError("can't interpret %r" % init) raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1} prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]): for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10 prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) return int(num * prefix[letter])
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
  #todo "{'name': [u'Url must be purely lowercase alphanumeric (ascii) characters and these symbols: -_']}"
  # http://data.gov.au/dataset/australian-domestic-regional-and-international-airline-activity-%E2%80%93-time-series/
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
  #[:100]
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
   
couch = couchdb.Server('http://127.0.0.1:5984/')  
  def get_licence_id(licencename):
  map = {
  "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
  "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
  'Otherpleasespecify': 'notspecified',
  '': 'notspecified',
  "Publicly available data": 'notspecified',
  "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
  "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
  'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
  "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
  'CreativeCommonsAttributionCCBY25': 'cc-by',
  "PublicDomain": 'other-pd',
  }
  if licencename not in map.keys():
  raise Exception(licencename + " not found");
  return map[licencename];
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
  orgs_list = []
for doc in docsdb.view('app/datasets'): for doc in docsdb.view('app/datasets'):
print doc.id print doc.id
if doc.value['url'] != "http://data.gov.au/data/":  
   
  if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
   
   
   
# Collect the package metadata. # Collect the package metadata.
pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100]) pkg_name = filter( lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','')[:100]);
tags = doc.value['metadata']["Keywords / Tags"] print pkg_name
if not hasattr(tags, '__iter__'): #add to or create organization using direct API
tags = [tags] org_name = name_munge(doc.value['metadata']["Agency"][:100])
[re.sub('[^a-zA-Z0-9-_]', '', tag).lower() for tag in tags] if org_name not in orgs_list:
package_entity = { orgs_list = ckandirect.action.organization_list()['result']
'name': pkg_name, #print orgs_list
'title': doc.value['metadata']['DCTERMS.Title'], if org_name not in orgs_list:
'url': doc.value['metadata']['DCTERMS.Source.URI'], try:
'tags': tags, print "org not found, creating "+org_name
'author': doc.value['metadata']["DCTERMS.Creator"], ckandirect.action.organization_create(name = org_name, title= doc.value['metadata']["Agency"],
'maintainer': doc.value['metadata']["DCTERMS.Creator"], description= doc.value['metadata']["Agency"])
'licence_id': doc.value['metadata']['DCTERMS.License'], #todo licence id mapping orgs_list.append(org_name)
'notes': doc.value['metadata']['Description'], except ckanapi.ValidationError, e:
} print e
  raise LoaderError('Unexpected status')
  else:
  print "org found, adding dataset to "+org_name
   
  org = ckandirect.action.organization_show(id=org_name)
  # todo cache org names -> id mapping
  tags = []
  if doc.value['agencyID'] == "AGIMO":
  if len(doc.value['metadata']["Keywords / Tags"]) > 0:
  if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
  tags = tags + doc.value['metadata']["Keywords / Tags"]
  else:
  tags = tags + [doc.value['metadata']["Keywords / Tags"]]
  if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
  if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
  tags = tags + doc.value['metadata']['data.gov.au Category']
  else:
  tags = tags + [doc.value['metadata']['data.gov.au Category']]
  tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
  #print tags
  package_entity = {
  'name': pkg_name,
  'title': doc.value['metadata']['DCTERMS.Title'],
  'url': doc.value['metadata']['DCTERMS.Source.URI'],
  'tags': tags, #tags are mandatory?
  'author': doc.value['metadata']["DCTERMS.Creator"],
  'maintainer': doc.value['metadata']["DCTERMS.Creator"],
  'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
  'notes': html2text.html2text(doc.value['metadata']['Description']),
  'owner_org': org["result"]["id"]
  #todo add missing key values like jurasdiction
  }
  if doc.value['agencyID'] == "qld":
  package_entity = doc.value['metadata']
   
try: try:
#print doc.id #print package_entity
ckan.package_register_post(package_entity) ckan.package_register_post(package_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 409: if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
print "already exists" print "package already exists"
else: else:
  print ckan.last_message
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
  pkg = ckan.package_entity_get(pkg_name)
print package_entity  
#todo add to organisation (author/creator/maintainer)  
#if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group  
if 'Download' in doc.value['metadata'].keys(): if 'Download' in doc.value['metadata'].keys():
try: try:
pkg = ckan.package_entity_get(pkg_name)  
resources = pkg.get('resources', []) resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']): if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']: for resource in doc.value['metadata']['Download']:
#print resource  
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
# (KML/KMZ) / (Shapefile) /(Other) # (KML/KMZ) / (Shapefile) /(Other)
format = "plain" format = "plain"
if resource['format'] == '(XML)': if resource['format'] == '(XML)':
format = 'xml' format = 'xml'
if resource['format'] == '(CSV/XLS)': if resource['format'] == '(CSV/XLS)':
format = 'csv' format = 'csv'
name = resource['href'] name = resource['href']
if 'name' in resource.keys(): if 'name' in resource.keys():
name = resource['name'] name = resource['name']
  print resource
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
format=format, size=human2bytes(resource['size'].replace(',', ''))) format=format, size=human2bytes(resource['size'].replace(',', '')))
else: else:
print "resources already exist" print "resources already exist"
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 404: if ckan.last_status == 404:
print "parent dataset does not exist" print "parent dataset does not exist"
else: else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
  <?php
 
  include_once("../include/common.inc.php");
 
 
  setlocale(LC_CTYPE, 'C');
 
  $db = $server->get_db('disclosr-documents');
  $datasets = Array();
  try {
  $rows = $db->get_view("app", "datasets", null, true)->rows;
 
  foreach ($rows as $row) {
  //print_r($row);
  if ($row->value->url != "http://data.gov.au/data/")
  $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
  }
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
  ksort($datasets);
  foreach ($datasets as $datasetname => $datasetkey) {
  print "$datasetname => $datasetkey<br>\n";
  }
  ?>
 
import sys, os import sys, os
import time import time
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
from unidecode import unidecode from unidecode import unidecode
   
listurl = "http://data.gov.au/data/" listurl = "http://data.gov.au/data/"
(url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb, (url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb,
listurl, "data", "AGIMO") listurl, "data", "AGIMO")
soup = BeautifulSoup(datasetlisthtml) soup = BeautifulSoup(datasetlisthtml)
for atag in soup.find_all(class_='result-title'): for atag in soup.find_all(class_='result-title'):
if atag.has_key('href'): if atag.has_key('href'):
url = scrape.fullurl(listurl, atag['href']) url = scrape.fullurl(listurl, atag['href'])
(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
url, "data", "AGIMO") url, "data", "AGIMO", False)
hash = scrape.mkhash(scrape.canonurl(url)) hash = scrape.mkhash(scrape.canonurl(url))
doc = scrape.docsdb.get(hash) doc = scrape.docsdb.get(hash)
if "metadata" not in doc.keys() or True: if "metadata" not in doc.keys() or True:
doc['type'] = "dataset" doc['type'] = "dataset"
doc['metadata'] = {} doc['metadata'] = {}
soup = BeautifulSoup(html) soup = BeautifulSoup(html)
for metatag in soup.find_all('meta'): for metatag in soup.find_all('meta'):
if metatag.has_key('name'): if metatag.has_key('name'):
doc['metadata'][metatag['name']] = metatag['content'] doc['metadata'][metatag['name']] = metatag['content']
for list in soup.find_all('dl'): for list in soup.find_all('dl'):
last_title = "" last_title = ""
for child in list.children: for child in list.children:
if str(type(child)) != "<class 'bs4.element.NavigableString'>": if str(type(child)) != "<class 'bs4.element.NavigableString'>":
if child.name == 'dt' and child.string != None: if child.name == 'dt' and child.string != None:
last_title = child.string.strip() last_title = child.string.strip()
if child.name == 'dd': if child.name == 'dd':
#print last_title #print last_title