use tempfile for datagov resource upload
use tempfile for datagov resource upload


Former-commit-id: 80eae32f084aac5c6ff933def7e5cb072c21b8fc

<?php <?php
   
require_once '../include/common.inc.php'; require_once '../include/common.inc.php';
//function createFOIDocumentsDesignDoc() { //function createFOIDocumentsDesignDoc() {
   
$foidb = $server->get_db('disclosr-foidocuments'); $foidb = $server->get_db('disclosr-foidocuments');
$obj = new stdClass(); $obj = new stdClass();
$obj->_id = "_design/" . urlencode("app"); $obj->_id = "_design/" . urlencode("app");
$obj->language = "javascript"; $obj->language = "javascript";
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; $obj->views->all->map = "function(doc) { emit(doc._id, doc); };";
$obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };"; $obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };";
$obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };"; $obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };";
$obj->views->byDateMonthYear->reduce = "_count"; $obj->views->byDateMonthYear->reduce = "_count";
$obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };"; $obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };";
$obj->views->byAgencyID->reduce = "_count"; $obj->views->byAgencyID->reduce = "_count";
$obj->views->fieldNames->map = ' $obj->views->fieldNames->map = '
function(doc) { function(doc) {
for(var propName in doc) { for(var propName in doc) {
emit(propName, doc._id); emit(propName, doc._id);
} }
   
}'; }';
$obj->views->fieldNames->reduce = 'function (key, values, rereduce) { $obj->views->fieldNames->reduce = 'function (key, values, rereduce) {
return values.length; return values.length;
}'; }';
// allow safe updates (even if slightly slower due to extra: rev-detection check). // allow safe updates (even if slightly slower due to extra: rev-detection check).
$foidb->save($obj, true); $foidb->save($obj, true);
   
   
//function createDocumentsDesignDoc() { //function createDocumentsDesignDoc() {
$docdb = $server->get_db('disclosr-documents'); $docdb = $server->get_db('disclosr-documents');
   
$obj = new stdClass(); $obj = new stdClass();
$obj->_id = "_design/" . urlencode("app"); $obj->_id = "_design/" . urlencode("app");
$obj->language = "javascript"; $obj->language = "javascript";
$obj->views->web_server->map = "function(doc) {\n emit(doc.web_server, 1);\n}"; $obj->views->web_server->map = "function(doc) {\n emit(doc.web_server, 1);\n}";
$obj->views->web_server->reduce = "_sum"; $obj->views->web_server->reduce = "_sum";
$obj->views->byAgency->map = "function(doc) {\n emit(doc.agencyID, 1);\n}"; $obj->views->byAgency->map = "function(doc) {\n emit(doc.agencyID, 1);\n}";
$obj->views->byAgency->reduce = "_sum"; $obj->views->byAgency->reduce = "_sum";
$obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}"; $obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}";
$obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}"; $obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}";
$obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}"; $obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}";
   
$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}"; $obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}";
  $obj->views->datasetGroups->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n doc.metadata[\"data.gov.au Category\"] && doc.metadata[\"data.gov.au Category\"].forEach(function(tag) {\n emit(tag, doc.url); \n });\n}\n}";
$obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}"; $obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}";
$docdb->save($obj, true); $docdb->save($obj, true);
   
   
   
   
//function createAgencyDesignDoc() { //function createAgencyDesignDoc() {
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
$obj = new stdClass(); $obj = new stdClass();
$obj->_id = "_design/" . urlencode("app"); $obj->_id = "_design/" . urlencode("app");
$obj->language = "javascript"; $obj->language = "javascript";
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; $obj->views->all->map = "function(doc) { emit(doc._id, doc); };";
$obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };";
$obj->views->byCanonicalName->map = "function(doc) { $obj->views->byCanonicalName->map = "function(doc) {
if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') {
emit(doc.name, doc); emit(doc.name, doc);
} }
};"; };";
$obj->views->byDeptStateName->map = "function(doc) { $obj->views->byDeptStateName->map = "function(doc) {
if (doc.orgType == 'FMA-DepartmentOfState') { if (doc.orgType == 'FMA-DepartmentOfState') {
emit(doc.name, doc._id); emit(doc.name, doc._id);
} }
};"; };";
$obj->views->parentOrgs->map = "function(doc) { $obj->views->parentOrgs->map = "function(doc) {
if (doc.parentOrg) { if (doc.parentOrg) {
emit(doc._id, doc.parentOrg); emit(doc._id, doc.parentOrg);
} }
};"; };";
$obj->views->byName->map = 'function(doc) { $obj->views->byName->map = 'function(doc) {
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") {
emit(doc.name, doc._id); emit(doc.name, doc._id);
if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) {
emit(doc.shortName, doc._id); emit(doc.shortName, doc._id);
} }
for (name in doc.otherNames) { for (name in doc.otherNames) {
if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) {
emit(doc.otherNames[name], doc._id); emit(doc.otherNames[name], doc._id);
} }
} }
for (name in doc.foiBodies) { for (name in doc.foiBodies) {
if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) {
emit(doc.foiBodies[name], doc._id); emit(doc.foiBodies[name], doc._id);
} }
} }
for (name in doc.positions) { for (name in doc.positions) {
if (doc.positions[name] != "" && doc.positions[name] != doc.name) { if (doc.positions[name] != "" && doc.positions[name] != doc.name) {
emit(doc.positions[name], doc._id); emit(doc.positions[name], doc._id);
} }
} }
} }
};'; };';
   
$obj->views->foiEmails->map = "function(doc) { $obj->views->foiEmails->map = "function(doc) {
emit(doc._id, doc.foiEmail); emit(doc._id, doc.foiEmail);
};"; };";
   
$obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; $obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }";
$obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; $obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };';
$obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; $obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };';
$obj->views->getScrapeRequired->map = "function(doc) { $obj->views->getScrapeRequired->map = "function(doc) {
   
var lastScrape = Date.parse(doc.metadata.lastScraped); var lastScrape = Date.parse(doc.metadata.lastScraped);
   
var today = new Date(); var today = new Date();
   
if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) {
emit(doc._id, doc); emit(doc._id, doc);
} }
   
};"; };";
$obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; $obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };";
$obj->views->getConflicts->map = "function(doc) { $obj->views->getConflicts->map = "function(doc) {
if (doc._conflicts) { if (doc._conflicts) {
emit(null, [doc._rev].concat(doc._conflicts)); emit(null, [doc._rev].concat(doc._conflicts));
} }
}"; }";
$obj->views->getStatistics->map = $obj->views->getStatistics->map =
"function(doc) { "function(doc) {
if (doc.statistics) { if (doc.statistics) {
for (var statisticSet in doc.statistics) { for (var statisticSet in doc.statistics) {
for (var statisticPeriod in doc.statistics[statisticSet]) { for (var statisticPeriod in doc.statistics[statisticSet]) {
emit([statisticSet,statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']); emit([statisticSet,statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']);
} }
} }
} }
}"; }";
$obj->views->getStatistics->reduce = '_sum'; $obj->views->getStatistics->reduce = '_sum';
// http://stackoverflow.com/questions/646628/javascript-startswith // http://stackoverflow.com/questions/646628/javascript-startswith
$obj->views->score->map = 'if(!String.prototype.startsWith){ $obj->views->score->map = 'if(!String.prototype.startsWith){
String.prototype.startsWith = function (str) { String.prototype.startsWith = function (str) {
return !this.indexOf(str); return !this.indexOf(str);
} }
} }
   
function(doc) { function(doc) {
count = 0; count = 0;
if (doc["status"] != "suspended") { if (doc["status"] != "suspended") {
for(var propName in doc) { for(var propName in doc) {
if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { if(typeof(doc[propName]) != "undefined" && doc[propName] != "") {
count++; count++;
} }
} }
portfolio = doc.parentOrg; portfolio = doc.parentOrg;
if (doc.orgType == "FMA-DepartmentOfState") { if (doc.orgType == "FMA-DepartmentOfState") {
portfolio = doc._id; portfolio = doc._id;
} }
if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") {
portfolio = doc.orgType; portfolio = doc.orgType;
} }
emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio});
} }
}'; }';
$obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ $obj->views->scoreHas->map = 'if(!String.prototype.startsWith){
String.prototype.startsWith = function (str) { String.prototype.startsWith = function (str) {
return !this.indexOf(str); return !this.indexOf(str);
} }
} }
if(!String.prototype.endsWith){ if(!String.prototype.endsWith){
String.prototype.endsWith = function(suffix) { String.prototype.endsWith = function(suffix) {
    return this.indexOf(suffix, this.length - suffix.length) !== -1;     return this.indexOf(suffix, this.length - suffix.length) !== -1;
}; };
} }
function(doc) { function(doc) {
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") {
for(var propName in doc) { for(var propName in doc) {
if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) {
emit(propName, 1); emit(propName, 1);
} }
} }
emit("total", 1); emit("total", 1);
} }
}'; }';
$obj->views->scoreHas->reduce = '_sum'; $obj->views->scoreHas->reduce = '_sum';
$obj->views->fieldNames->map = ' $obj->views->fieldNames->map = '
function(doc) { function(doc) {
for(var propName in doc) { for(var propName in doc) {
emit(propName, doc._id); emit(propName, doc._id);
} }
}'; }';
$obj->views->fieldNames->reduce = '_count'; $obj->views->fieldNames->reduce = '_count';
// allow safe updates (even if slightly slower due to extra: rev-detection check). // allow safe updates (even if slightly slower due to extra: rev-detection check).
$db->save($obj, true); $db->save($obj, true);
?> ?>
   
file:b/disclosr.iml (new)
  <?xml version="1.0" encoding="UTF-8"?>
  <module type="WEB_MODULE" version="4">
  <component name="FacetManager">
  <facet type="Python" name="Python">
  <configuration sdkName="" />
  </facet>
  </component>
  <component name="NewModuleRootManager" inherit-compiler-output="true">
  <exclude-output />
  <content url="file://$MODULE_DIR$" />
  <orderEntry type="inheritedJdk" />
  <orderEntry type="sourceFolder" forTests="false" />
  </component>
  </module>
 
 
  import ckanclient
  import couchdb
  from ckanclient import CkanApiError
  import re
 
 
  class LoaderError(Exception):
  pass
 
  # Instantiate the CKAN client.
  #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
  api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
  ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
  api_key=api_key)
  couch = couchdb.Server('http://127.0.0.1:5984/')
  #couch = couchdb.Server('http://192.168.1.113:5984/')
 
  # https://github.com/okfn/ckanext-importlib
  def munge(name):
  # convert spaces to underscores
  name = re.sub(' ', '_', name).lower()
  # convert symbols to dashes
  name = re.sub('[:]', '_-', name).lower()
  name = re.sub('[/]', '-', name).lower()
  # take out not-allowed characters
  name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
  # remove double underscores
  name = re.sub('__', '_', name).lower()
  return name
 
 
  def name_munge(input_name):
  return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
 
 
  docsdb = couch['disclosr-documents']
 
  if __name__ == "__main__":
  groups = {}
  for doc in docsdb.view('app/datasetGroups'):
  group_name = doc.key
  if group_name != "Not specified":
  pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
  doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
  if group_name in groups.keys():
  groups[group_name] = list(set(groups[group_name] + [pkg_name]))
  else:
  groups[group_name] = [pkg_name]
 
  # add dataset to group(s)
  for group_name in groups.keys():
  if group_name != "Not specified":
  group_url = name_munge(group_name[:100])
  print group_name
  print groups[group_name]
  try:
  # Update the group details
  group_entity = ckan.group_entity_get(group_url)
  print "group "+group_name+" exists"
  if 'packages' in group_entity.keys():
  group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
  else:
  group_entity['packages'] = groups[group_name]
  ckan.group_entity_put(group_entity)
  except CkanApiError, e:
  if ckan.last_status == 404:
  print "group "+group_name+" does not exist, creating"
  group_entity = {
  'name': group_url,
  'title': group_name,
  'description': group_name,
  'packages': groups[group_name]
  }
  #print group_entity
  ckan.group_register_post(group_entity)
  elif ckan.last_status == 409:
  print "group already exists"
  else:
  raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
  ckan.last_status, pkg_name, e.args))
 
  # coding=utf-8
import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
import html2text # aaronsw :( import html2text # aaronsw :(
import ckanapi # https://github.com/open-data/ckanapi import ckanapi # https://github.com/open-data/ckanapi
  import scrape
  import datetime, os, hashlib
   
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
  import tempfile
  def add_package_resource_cachedurl(ckan, package_name, url, name, format, size, **kwargs):
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  url, "dataset_resource", "AGIMO", False)
  tf = tempfile.NamedTemporaryFile()
  tfName = tf.name
  tf.seek(0)
  tf.write(content)
  tf.flush()
  add_package_resource (ckan, package_name, tfName, name=name, format=format, size=size)
   
   
   
# Instantiate the CKAN client. # Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a' server = 'data.disclosurelo.gs'
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',  
  ckan = ckanclient.CkanClient(base_location='http://' + server + '/api',
api_key=api_key) api_key=api_key)
ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key) ckandirect = ckanapi.RemoteCKAN('http://' + server, api_key=api_key)
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
#couch = couchdb.Server('http://192.168.1.113:5984/') #couch = couchdb.Server('http://192.168.1.113:5984/')
   
  import urllib
  import urlparse
   
   
  def url_fix(s, charset='utf-8'):
  """Sometimes you get an URL by a user that just isn't a real
  URL because it contains unsafe characters like ' ' and so on. This
  function can fix some of the problems in a similar way browsers
  handle data entered by the user:
   
  >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
  'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
   
  :param charset: The target charset for the URL if the url was
  given as unicode string.
  """
  if isinstance(s, unicode):
  s = s.encode(charset, 'ignore')
  if not urlparse.urlparse(s).scheme:
  s = "http://" + s
  scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
  path = urllib.quote(path, '/%')
  qs = urllib.quote_plus(qs, ':&=')
  return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
   
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = { SYMBOLS = {
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'), 'zetta', 'iotta'),
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'), 'zebi', 'yobi'),
} }
   
   
def human2bytes(s): def human2bytes(s):
""" """
Attempts to guess the string format based on default symbols Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer. set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised. When unable to recognize the format ValueError is raised.
   
>>> human2bytes('0 B') >>> human2bytes('0 B')
0 0
>>> human2bytes('1 K') >>> human2bytes('1 K')
1024 1024
>>> human2bytes('1 M') >>> human2bytes('1 M')
1048576 1048576
>>> human2bytes('1 Gi') >>> human2bytes('1 Gi')
1073741824 1073741824
>>> human2bytes('1 tera') >>> human2bytes('1 tera')
1099511627776 1099511627776
   
>>> human2bytes('0.5kilo') >>> human2bytes('0.5kilo')
512 512
>>> human2bytes('0.1 byte') >>> human2bytes('0.1 byte')
0 0
>>> human2bytes('1 k') # k is an alias for K >>> human2bytes('1 k') # k is an alias for K
1024 1024
>>> human2bytes('12 foo') >>> human2bytes('12 foo')
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: can't interpret '12 foo' ValueError: can't interpret '12 foo'
""" """
  if s == None:
  return 0
  s = s.replace(',', '')
init = s init = s
num = "" num = ""
while s and s[0:1].isdigit() or s[0:1] == '.': while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0] num += s[0]
s = s[1:] s = s[1:]
num = float(num) num = float(num)
letter = s.strip() letter = s.strip()
for name, sset in SYMBOLS.items(): for name, sset in SYMBOLS.items():
if letter in sset: if letter in sset:
break break
else: else:
if letter == 'k': if letter == 'k':
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
sset = SYMBOLS['customary'] sset = SYMBOLS['customary']
letter = letter.upper() letter = letter.upper()
else: else:
raise ValueError("can't interpret %r" % init) raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1} prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]): for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10 prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) return int(num * prefix[letter])
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
   
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
   
   
def get_licence_id(licencename): def get_licence_id(licencename):
map = { map = {
"Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
"CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
'Otherpleasespecify': 'notspecified', 'Otherpleasespecify': 'notspecified',
'': 'notspecified', '': 'notspecified',
"Publicly available data": 'notspecified', "Publicly available data": 'notspecified',
"CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
"CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
"Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
'CreativeCommonsAttributionCCBY25': 'cc-by', 'CreativeCommonsAttributionCCBY25': 'cc-by',
"PublicDomain": 'other-pd', "PublicDomain": 'other-pd',
} }
if licencename not in map.keys(): if licencename not in map.keys():
raise Exception(licencename + " not found"); raise Exception(licencename + " not found");
return map[licencename]; return map[licencename];
   
   
  gooddata = ["afl-in-victoria", "annual-budget-initiatives-by-suburb-brisbane-city-council"]
  #athletics-in-victoria-gfyl,bicycle-racks-mosman-municipal-council,boat-ramps-brisbane-city-council,brisbane-access-ratings-database,bus-stops-brisbane-city-council,cemeteries-brisbane-city-council,cfa-locations,citycycle-stations-brisbane-city-council,community-gardens-brisbane-city-council,community-halls-brisbane-city-council,cooking-classes-gfyl,court-locations-victoria,customer-service-centres-brisbane-city-council,dance-in-victoria-gfyl,disability-activity-gfyl,dog-parks-brisbane-city-council,ferry-terminals-brisbane-city-council,fishing-club-in-victoria-gfyl,fitness-centres-in-victoria-gfyl,gardens-reserves-gfyl,golf-courses-brisbane-city-council,gymnastics-in-victoria-gfyl,historic-cemeteries-brisbane-city-council,ice-skating-centres-gfyl,immunisation-clinics-brisbane-city-council,libraries-brisbane-city-council,licenced-venues-victoria,lifesaving-locations-victoria,loading-zones-brisbane-city-council,major-projects-victoria,markets-in-victoria,martial-arts-in-victoria-gfyl,melbourne-water-use-by-postcode,members-of-parliament-both-houses-nsw,members-of-the-legislative-assembly-nsw,members-of-the-legislative-council-nsw,mfb-locations-vic,ministers-of-the-nsw-parliament,mosman-local-government-area,mosman-rider-route,mosman-wwii-honour-roll,neighbourhood-houses-gfyl,news-feeds-mosman-municipal-council,off-street-car-parks-mosman-municipal-council,orienteering-clubs-gfyl,parking-meter-areas-brisbane-city-council,parks-and-reserves-mosman-municipal-council,parks-brisbane-city-council,personal-training-gfyl,picnic-areas-brisbane-city-council,playgrounds-brisbane-city-council,playgrounds-mosman-municipal-council,police-region-crime-statistics-victoria,police-service-area-crime-statistics-victoria,pony-clubs-in-victoria-gfyl,prison-locations-victoria,public-amenities-maintained-by-mosman-council,public-art-brisbane-city-council,public-internet-locations-vic,public-toilets-brisbane-city-council,racecourse-locations-victoria,recent-development-applications-mosman-municipal-council,recreation-groups-gfyl,recreational-fishing-spots,regional-business-centres-brisbane-city-council,reports-of-swooping-birds-mosman-municipal-council,restricted-parking-areas-brisbane-city-council,rollerskating-centres-in-victoria-gfyl,sailing-clubs-gfyl,school-locations-victoria,shadow-ministers-of-the-nsw-parliament,skate-parks-gfyl,sporting-clubs-and-organisations-gfyl,stakeboard-parks-brisbane-city-council,state-bodies-gfyl,street-names-brisbane-city-council,suburbs-and-adjoining-suburbs-brisbane-city-council,swimming-pools-brisbane-city-council,swimming-pools-gfyl,tennis-courts-brisbane-city-council,top-40-book-club-reads-brisbane-city-council,tracks-and-trails-gfyl,triathlon-clubs-gfyl,urban-water-restrictions-victoria,veterinary-services-in-mosman,victorian-microbreweries,volunteering-centres-services-and-groups-victoria,walking-groups-gfyl,ward-offices-brisbane-city-council,waste-collection-days-brisbane-city-council,waste-transfer-stations-brisbane-city-council,water-consumption-in-melbourne,water-sports-in-victoria-gfyl,wifi-hot-spots-brisbane-city-council,yoga-pilates-and-tai-chi-in-victoria-gfyl,2809cycling-in-new-south-wales-what-the-data-tells-us2809-and-related-data,act-barbecue-bbq-locations,act-tafe-locations,ausindustry-locations,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,australian-gas-light-company-maps,australian-gas-light-company-maps,australian-ports,australian-public-service-statistical-bulletin-2011-12,australian-public-service-statistical-bulletin-snapshot-at-december-31-2011,australian-public-service-statistical-bulletin-tables-0910,austrics-timetable-set,capital-works-call-tender-schedule,collection-item-usage-state-library-of-victoria,country-and-commodity-trade-data-spreadsheet,country-and-commodity-trade-data-spreadsheet-2,country-by-level-of-processing-trade-data-spreadsheet,crime-incident-type-and-frequency-by-capital-city-and-nationally,csiro-locations,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,department-of-finance-and-deregulation-office-locations,digitised-maps,diisr-division-locations-excluding-ausindustry-enterprise-connect-and-nmi,diisr-locations,diisr-portfolio-agency-locations-excluding-csiro,distance-to-legal-service-providers-from-disadvantaged-suburbs,enterprise-connect-locations,fire-insurance-maps-sydney-block-plans-1919-1940,fire-insurance-maps-sydney-block-plans-1919-1940,first-fleet-collection,first-fleet-collection,first-fleet-maps,first-fleet-maps,freedom-of-information-annual-estimated-costs-and-staff-time-statistical-data-2011-12,freedom-of-information-quarterly-request-and-review-statistical-data-2011-12,freedom-of-information-requests-estimated-costs-and-charges-collected-1982-83-to-2011-12,higher-education-course-completions,higher-education-enrolments,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,journey-planner-data-nt,library-catalogue-search-terms-state-library-of-victoria,location-of-act-schools,location-of-centrelink-offices,location-of-european-wasps-nests,location-of-lawyers-and-legal-service-providers-by-town,location-of-legal-assistance-service-providers,location-of-medicare-offices,location-of-medicare-offices,maps-of-the-southern-hemisphere-16th-18th-centuries,maps-of-the-southern-hemisphere-16th-18th-centuries,music-queensland,national-measurement-institute-locations,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,photographs-of-nsw-life-pre-1955,photographs-of-nsw-life-pre-1955,photographs-of-sydney-before-1885,photographs-of-sydney-before-1885,picture-queensland,plgr-28093-playgrounds-act,police-station-locations,queensland-public-libraries,rare-printed-books,rare-printed-books,real-estate-maps,regional-australia-funding-projects,sa-memory-state-library-of-south-australia,search-engine-terms-state-library-of-victoria,south-australian-photographs-state-library-of-south-australia,south-australian-sheet-music-state-library-of-south-australia,sydney-bond-store-maps-1894,sydney-bond-store-maps-1894,sydney-maps-1917,sydney-maps-1917,tafe-institute-locations-victoria,tafe-sa-campus-locations,tolt-public-toilets-act,victorian-public-library-branches-state-library-of-victoria,western-australia-public-library-network,world-war-one-photographs-by-frank-hurley,world-war-one-photographs-by-frank-hurley,citycat-timetables-brisbane-city-council,cityferry-timetables-brisbane-city-council,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,downstream-cost-calculator-model-and-data-for-199697-or-2001-prices,economics-of-australian-soil-conditions-199697-limiting-factor-or-relative-yield-min-of-ry_salt2000-,geographical-names-register-gnr-of-nsw,victorian-dryland-salinity-assessment-2000-d01cac_ramsar_final-xls,victorian-dryland-salinity-assessment-2000-d02cac_fauna_final-xls,victorian-dryland-salinity-assessment-2000-d03cac_fauna_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc04cac_hydrol_final-xls,victorian-dryland-salinity-assessment-2000-dc05cac_wetland_final-xls,victorian-dryland-salinity-assessment-2000-dc06cac_util_final-xls,victorian-dryland-salinity-assessment-2000-dc07cac_road_final-xls,victorian-dryland-salinity-assessment-2000-dc08cac_towns_final-xls,victorian-dryland-salinity-assessment-2000-dc09cac_flora_final-xls,victorian-dryland-salinity-assessment-2000-dc10cac_flora_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc12cac_infrastructure-xls,victorian-dryland-salinity-assessment-2000-dc13cac_natural_envt-xls,victorian-dryland-salinity-assessment-2000-dc14cac_agriculture-xls,victorian-dryland-salinity-assessment-2000-dc16cac_agric_cost-xls,victorian-dryland-salinity-assessment-2000-dc17cac_shallow_wt-xls,victorian-dryland-salinity-assessment-2000-dc18cac_agric_cost_time-xls,victorian-dryland-salinity-assessment-2000-dc21cac_water_resources_new-xls,victorian-dryland-salinity-assessment-2000-dc22cac_risk-xls,licensed-broadcasting-transmitter-data,nsw-crime-data,recorded-crime-dataset-nsw,crime-statistics-in-nsw-by-month,2001-02-to-2007-08-local-government-survey-victoria,2009-green-light-report,annual-statistical-reports-fire-brigades-nsw-200304,annual-statistical-reports-fire-brigades-nsw-200405,annual-statistical-reports-fire-brigades-nsw-200506,annual-statistical-reports-fire-brigades-nsw-200607,arts-on-the-map,assets-and-liabilities-of-australian-located-operations,assets-of-australian-located-operations,assets-of-australian-located-operations-by-country,assets-of-financial-institutions,back-issues-of-monthly-banking-statistics,banks-assets,banks-consolidated-group-capital,banks-consolidated-group-impaired-assets,banks-consolidated-group-off-balance-sheet-business,banks-liabilities,building-societies-selected-assets-and-liabilities,byteback2842-locations-vic,cash-management-trusts,city-of-melbourne-street-furniture-database,community-services-nsw,consolidated-exposures-immediate-and-ultimate-risk-basis,consolidated-exposures-immediate-risk-basis-foreign-claims-by-country,consolidated-exposures-immediate-risk-basis-international-claims-by-country,consolidated-exposures-ultimate-risk-basis,consolidated-exposures-ultimate-risk-basis-foreign-claims-by-country,cosolidated-exposures-immediate-risk-basis,credit-unions-selected-assets-and-liabilities,daily-net-foreign-exchange-transactions,detox-your-home,education-national-assessment-program-literacy-and-numeracy-nsw,employment-data-by-nsw-regions,excise-beer-clearance-data-updated-each-month-beer-clearance-summary-data,finance-companies-and-general-financiers-selected-assets-and-liabilities,foreign-exchange-transactions-and-holdings-of-official-reserve-assets,half-yearly-life-insurance-bulletin-december-2010,health-behaviours-in-nsw,international-liabilities-by-country-of-the-australian-located-operations-of-banks-and-rfcs,liabilities-and-assets-monthly,liabilities-and-assets-weekly,liabilities-of-australian-located-operations,life-insurance-offices-statutory-funds,managed-funds,monetary-policy-changes,money-market-corporations-selected-assets-and-liabilities,monthly-airport-traffic-data-for-top-ten-airports-january-1985-to-december-2008,monthly-banking-statistics-april-2011,monthly-banking-statistics-june-2011,monthly-banking-statistics-may-2011,open-market-operations-2009-to-current,projected-households-vic-rvic-msd-2006-2056,projected-population-by-age-and-sex-vic-rvic-msd-2006-2056,public-unit-trust,quarterly-bank-performance-statistics,quarterly-general-insurance-performance-statistics-march-2011,quarterly-superannuation-performance-march-2011,recorded-crime-dataset-nsw,residential-land-bulletin,resourcesmart-retailers,resourcesmart-retailers-vic,road-fatalities-nsw,securitisation-vehicles,selected-asset-and-liabilities-of-the-private-non-financial-sectors,seperannuation-funds-outside-life-offices,solar-report-vic,towns-in-time-victoria,vif2008-projected-population-by-5-year-age-groups-and-sex-sla-lga-ssd-sd-2006-2026,vif2008-projected-population-totals-and-components-vic-rvic-msd-2006-2056,vif2008-projected-population-totals-sla-lga-ssd-sd-2006-2026,arts-festivals-victoria,arts-organisations-victoria,arts-spaces-and-places-victoria,ausgrid-average-electricity-use,collecting-institutions-victoria,indigenous-arts-organisations-victoria,latest-coastal-weather-observations-for-coolangatta-qld,top-10-fiction-books-brisbane-city-council];
   
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
orgs_list = [] orgs_list = []
orgs_ids = {} orgs_ids = {}
for doc in docsdb.view('app/datasets'): for doc in docsdb.view('app/datasets'):
print " --- " print " --- "
print doc.id print doc.id
   
if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
   
   
# Collect the package metadata. # Collect the package metadata.
pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_', pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]); doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
print pkg_name print pkg_name
#add to or create organization using direct API if pkg_name in gooddata:
org_name = name_munge(doc.value['metadata']["Agency"][:100])  
if org_name not in orgs_list: #add to or create organization using direct API
orgs_list = ckandirect.action.organization_list()['result'] agency = doc.value['metadata']["Agency"]
#print orgs_list if agency == "APS":
  agency = "Australian Public Service Commission"
  if agency == "Shared Services, Treasury Directorate":
  agency = "Shared Services Procurement, Treasury Directorate"
  if agency == "Treasury - Shared Services":
  agency = "Shared Services Procurement, Treasury Directorate"
  if agency == "Territory and Municipal Services (TAMS)":
  agency = "Territory and Municipal Services Directorate"
  if agency == "State Library of NSW":
  agency = "State Library of New South Wales"
  org_name = name_munge(agency[:100])
if org_name not in orgs_list: if org_name not in orgs_list:
try: orgs_list = ckandirect.action.organization_list()['result']
print "org not found, creating " + org_name #print orgs_list
ckandirect.action.organization_create(name=org_name, title=doc.value['metadata']["Agency"], if org_name not in orgs_list:
description=doc.value['metadata']["Agency"]) try:
orgs_list.append(org_name) print "org not found, creating " + org_name
except ckanapi.ValidationError, e: ckandirect.action.organization_create(name=org_name, title=agency,
print e description=agency)
raise LoaderError('Unexpected status') orgs_list.append(org_name)
else: except ckanapi.ValidationError, e:
print "org found, adding dataset to " + org_name print e
  raise LoaderError('Unexpected status')
# cache org names -> id mapping  
if org_name not in orgs_ids:  
org = ckandirect.action.organization_show(id=org_name)  
orgs_ids[org_name] = org["result"]["id"]  
org_id = orgs_ids[org_name]  
print "org id is "+org_id  
tags = []  
if doc.value['agencyID'] == "AGIMO":  
if len(doc.value['metadata']["Keywords / Tags"]) > 0:  
if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):  
tags = tags + doc.value['metadata']["Keywords / Tags"]  
else: else:
tags = tags + [doc.value['metadata']["Keywords / Tags"]] print "org found, adding dataset to " + org_name
   
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] # cache org names -> id mapping
#print tags if org_name not in orgs_ids:
package_entity = { org = ckandirect.action.organization_show(id=org_name)
'name': pkg_name, orgs_ids[org_name] = org["result"]["id"]
'title': doc.value['metadata']['DCTERMS.Title'], org_id = orgs_ids[org_name]
'url': doc.value['metadata']['DCTERMS.Source.URI'], print "org id is " + org_id
'tags': tags, #tags are mandatory? tags = []
'author': doc.value['metadata']["DCTERMS.Creator"], creator = doc.value['metadata']["DCTERMS.Creator"]
'maintainer': doc.value['metadata']["DCTERMS.Creator"], if doc.value['agencyID'] == "AGIMO":
'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), if len(doc.value['metadata']["Keywords / Tags"]) > 0:
'notes': html2text.html2text(doc.value['metadata']['Description']), if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
'owner_org': org_id tags = tags + doc.value['metadata']["Keywords / Tags"]
#todo add missing key values like jurasdiction else:
} tags = tags + [doc.value['metadata']["Keywords / Tags"]]
if doc.value['agencyID'] == "qld":  
package_entity = doc.value['metadata'] tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
  #print tags
try: extras = []
#print package_entity  
ckan.package_register_post(package_entity) for extra_key in doc.value['metadata'].keys():
except CkanApiError, e: if extra_key not in ["Description", "Content-Language", "DCTERMS.Description",
if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}": "Keywords / Tags",
print "package already exists" "data.gov.au Category", "Download", "Permalink", "DCTERMS.Identifier"]:
else: if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "":
print ckan.last_message extras.append([extra_key, doc.value['metadata'][extra_key]])
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (  
ckan.last_status, pkg_name, e.args)) package_entity = {
pkg = ckan.package_entity_get(pkg_name) 'name': pkg_name,
  'title': doc.value['metadata']['DCTERMS.Title'],
# add dataset to group(s) 'url': doc.value['metadata']['DCTERMS.Source.URI'],
groups = [] 'tags': tags, #tags are mandatory?
if 'data.gov.au Category' in doc.value['metadata'].keys() and len( 'author': creator,
doc.value['metadata']['data.gov.au Category']) > 0: 'maintainer': creator,
if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
groups = groups + doc.value['metadata']['data.gov.au Category'] 'notes': html2text.html2text(doc.value['metadata']['Description']),
else: 'owner_org': org_id,
groups = groups + [doc.value['metadata']['data.gov.au Category']] 'extras': extras
  }
for group_name in groups:  
group_url = name_munge(group_name[:100])  
try: try:
# Update the group details #print package_entity
group_entity = ckan.group_entity_get(group_url) ckan.package_register_post(package_entity)
print "group "+group_name+" exists" except CkanApiError, e:
if 'packages' in group_entity.keys(): if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) print "package already exists"
else: else:
group_entity['packages'] = [pkg_name] print ckan.last_message
ckan.group_entity_put(group_entity)  
except CkanApiError, e:  
if ckan.last_status == 404:  
print "group "+group_name+" does not exist, creating"  
group_entity = {  
'name': group_url,  
'title': group_name,  
'description': group_name,  
'packages': [pkg_name]  
}  
#print group_entity  
ckan.group_register_post(group_entity)  
elif ckan.last_status == 409:  
print "group already exists"  
else:  
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (  
ckan.last_status, pkg_name, e.args))  
   
# add resources (downloadable data files)  
if 'Download' in doc.value['metadata'].keys():  
try:  
   
resources = pkg.get('resources', [])  
if len(resources) < len(doc.value['metadata']['Download']):  
for resource in doc.value['metadata']['Download']:  
   
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html  
# (KML/KMZ) / (Shapefile) /(Other)  
format = "plain"  
if resource['format'] == '(XML)':  
format = 'xml'  
if resource['format'] == '(CSV/XLS)':  
format = 'csv'  
name = resource['href']  
if 'name' in resource.keys():  
name = resource['name']  
print resource  
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',  
format=format,  
size=human2bytes(resource['size'].replace(',', '')))  
else:  
print "resources already exist"  
except CkanApiError, e:  
if ckan.last_status == 404:  
print "parent dataset does not exist"  
else:  
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
  pkg = ckan.package_entity_get(pkg_name)
   
   
  # add resources (downloadable data files)
  if 'Download' in doc.value['metadata'].keys():
  try:
   
  resources = pkg.get('resources', [])
  if len(resources) < len(doc.value['metadata']['Download']):
  for resource in doc.value['metadata']['Download']:
   
  # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
  # (KML/KMZ) / (Shapefile) /(Other)
  format = "plain"
  if resource['format'] == '(XML)':
  format = 'xml'
  if resource['format'] == '(CSV/XLS)':
  format = 'csv'
  if resource['format'] == '(Shapefile)':
  format = 'shp'
  if resource['format'] == '(KML/KMZ)':
  format = 'kml'
  name = resource['href']
  if 'name' in resource.keys():
  name = resource['name']
  print resource
  add_package_resource_cachedurl(ckan, pkg_name, url_fix(resource['href']), name,
  format,
  human2bytes(resource.get('size', '0B')),
  resource_type='data')
  else:
  print "resources already exist"
  except CkanApiError, e:
  if ckan.last_status == 404:
  print "parent dataset does not exist"
  else:
  raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
  ckan.last_status, pkg_name, e.args))
   
  import couchdb
  couch = couchdb.Server('http://127.0.0.1:5984/')
  #couch = couchdb.Server('http://192.168.1.113:5984/')
 
  import urllib
  import urlparse
  import httplib2
  import httplib
  import csv
 
 
  def url_fix(s, charset='utf-8'):
  """Sometimes you get an URL by a user that just isn't a real
  URL because it contains unsafe characters like ' ' and so on. This
  function can fix some of the problems in a similar way browsers
  handle data entered by the user:
 
  :param charset: The target charset for the URL if the url was
  given as unicode string.
  """
  if isinstance(s, unicode):
  s = s.encode(charset, 'ignore')
  if not urlparse.urlparse(s).scheme:
  s = "http://"+s
  scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
  path = urllib.quote(path, '/%')
  qs = urllib.quote_plus(qs, ':&=')
  return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
 
  # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
  SYMBOLS = {
  'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
  'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
  'zetta', 'iotta'),
  'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
  'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
  'zebi', 'yobi'),
  }
 
 
  docsdb = couch['disclosr-documents']
  out = csv.writer(open("output.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
  if __name__ == "__main__":
  for doc in docsdb.view('app/datasets'):
  if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
  # Collect the package metadata.
  pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
  doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
  if 'Download' in doc.value['metadata'].keys() and len(doc.value['metadata']['Download']) > 0:
  for resource in doc.value['metadata']['Download']:
  # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
  # (KML/KMZ) / (Shapefile) /(Other)
  format = "plain"
  if resource['format'] == '(XML)':
  format = 'xml'
  if resource['format'] == '(CSV/XLS)':
  format = 'csv'
  if resource['format'] == '(Shapefile)':
  format = 'shp'
  if resource['format'] == '(KML/KMZ)':
  format = 'kml'
  name = resource['href']
  if 'name' in resource.keys():
  name = resource['name']
  if resource['href'].startswith("ftp"):
  out.writerow([pkg_name, url_fix(resource['href']), name,format, "ftp", ""])
  else:
  try:
  h = httplib2.Http(disable_ssl_certificate_validation=True)
  resp = h.request(url_fix(resource['href']), 'HEAD')
  content_type = resp[0]['content-type'] if 'content-type' in resp[0].keys() else ""
  out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, resp[0]['status'], content_type])
  except httplib2.ServerNotFoundError:
  out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, "500","badurl"])
  except httplib.InvalidURL:
  out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, "500","badurl"])
  except httplib2.RelativeURIError:
  out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, "500","badurl"])
  else:
  out.writerow([pkg_name.encode('ascii', 'ignore')])
 
import sys, os import sys, os
import time import time
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
from unidecode import unidecode from unidecode import unidecode
   
listurl = "http://data.gov.au/data/" listurl = "http://data.gov.au/data/"
(url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb, (url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb,
listurl, "data", "AGIMO") listurl, "data", "AGIMO")
soup = BeautifulSoup(datasetlisthtml) soup = BeautifulSoup(datasetlisthtml)
for atag in soup.find_all(class_='result-title'): for atag in soup.find_all(class_='result-title'):
if atag.has_key('href'): if atag.has_key('href'):
url = scrape.fullurl(listurl, atag['href']) url = scrape.fullurl(listurl, atag['href'])
(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
url, "data", "AGIMO", False) url, "data", "AGIMO", False)
hash = scrape.mkhash(scrape.canonurl(url)) hash = scrape.mkhash(scrape.canonurl(url))
doc = scrape.docsdb.get(hash) doc = scrape.docsdb.get(hash)
if "metadata" not in doc.keys() or True: if "metadata" not in doc.keys() or True:
doc['type'] = "dataset" doc['type'] = "dataset"
doc['metadata'] = {} doc['metadata'] = {}
soup = BeautifulSoup(html) soup = BeautifulSoup(html)
for metatag in soup.find_all('meta'): for metatag in soup.find_all('meta'):
if metatag.has_key('name'): if metatag.has_key('name'):
doc['metadata'][metatag['name']] = metatag['content'] doc['metadata'][metatag['name']] = metatag['content']
for list in soup.find_all('dl'): for list in soup.find_all('dl'):
last_title = "" last_title = ""
for child in list.children: for child in list.children:
if str(type(child)) != "<class 'bs4.element.NavigableString'>": if str(type(child)) != "<class 'bs4.element.NavigableString'>":
if child.name == 'dt' and child.string != None: if child.name == 'dt' and child.string != None:
last_title = child.string.strip() last_title = child.string.strip()
if child.name == 'dd': if child.name == 'dd':
#print last_title #print last_title
if last_title == "Description": if last_title == "Description":
doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore') doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore')
elif last_title == "Download": elif last_title == "Download":
doc['metadata'][last_title] = [] doc['metadata'][last_title] = []
for item in child.find_all("li"): for item in child.find_all("li"):
link = item.find("a") link = item.find("a")
format = item.find(property="dc:format") format = item.find(property="dc:format")
linkobj = {"href":link['href'].replace("/bye?","").strip(), linkobj = {"href":link['href'].replace("/bye?","").strip(),
"format": format.string.strip(), "size": format.next_sibling.string.strip()} "format": format.string.strip()}
  if format.next_sibling.string != None:
  linkobj["size"] = format.next_sibling.string.strip()
if link.string != None: if link.string != None:
linkobj["name"] = link.string.strip() linkobj["name"] = link.string.strip()
doc['metadata'][last_title].append(linkobj) doc['metadata'][last_title].append(linkobj)
   
else: else:
atags = child.find_all('a') atags = child.find_all('a')
if len(atags) < 2: if len(atags) < 2:
[s.extract() for s in child(class_='viewAll')] [s.extract() for s in child(class_='viewAll')]
doc['metadata'][last_title] = ''.join(child.stripped_strings).strip() doc['metadata'][last_title] = ''.join(child.stripped_strings).strip()
else: else:
doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags] doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags]
print doc['metadata'] print doc['metadata']
scrape.docsdb.save(doc) scrape.docsdb.save(doc)
#time.sleep(2) #time.sleep(2)