<?php | <?php |
require_once '../include/common.inc.php'; | require_once '../include/common.inc.php'; |
//function createFOIDocumentsDesignDoc() { | //function createFOIDocumentsDesignDoc() { |
$foidb = $server->get_db('disclosr-foidocuments'); | $foidb = $server->get_db('disclosr-foidocuments'); |
$obj = new stdClass(); | $obj = new stdClass(); |
$obj->_id = "_design/" . urlencode("app"); | $obj->_id = "_design/" . urlencode("app"); |
$obj->language = "javascript"; | $obj->language = "javascript"; |
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; | $obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; |
$obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };"; | $obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };"; |
$obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };"; | $obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };"; |
$obj->views->byDateMonthYear->reduce = "_count"; | $obj->views->byDateMonthYear->reduce = "_count"; |
$obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };"; | $obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };"; |
$obj->views->byAgencyID->reduce = "_count"; | $obj->views->byAgencyID->reduce = "_count"; |
$obj->views->fieldNames->map = ' | $obj->views->fieldNames->map = ' |
function(doc) { | function(doc) { |
for(var propName in doc) { | for(var propName in doc) { |
emit(propName, doc._id); | emit(propName, doc._id); |
} | } |
}'; | }'; |
$obj->views->fieldNames->reduce = 'function (key, values, rereduce) { | $obj->views->fieldNames->reduce = 'function (key, values, rereduce) { |
return values.length; | return values.length; |
}'; | }'; |
// allow safe updates (even if slightly slower due to extra: rev-detection check). | // allow safe updates (even if slightly slower due to extra: rev-detection check). |
$foidb->save($obj, true); | $foidb->save($obj, true); |
//function createDocumentsDesignDoc() { | //function createDocumentsDesignDoc() { |
$docdb = $server->get_db('disclosr-documents'); | $docdb = $server->get_db('disclosr-documents'); |
$obj = new stdClass(); | $obj = new stdClass(); |
$obj->_id = "_design/" . urlencode("app"); | $obj->_id = "_design/" . urlencode("app"); |
$obj->language = "javascript"; | $obj->language = "javascript"; |
$obj->views->web_server->map = "function(doc) {\n emit(doc.web_server, 1);\n}"; | $obj->views->web_server->map = "function(doc) {\n emit(doc.web_server, 1);\n}"; |
$obj->views->web_server->reduce = "_sum"; | $obj->views->web_server->reduce = "_sum"; |
$obj->views->byAgency->map = "function(doc) {\n emit(doc.agencyID, 1);\n}"; | $obj->views->byAgency->map = "function(doc) {\n emit(doc.agencyID, 1);\n}"; |
$obj->views->byAgency->reduce = "_sum"; | $obj->views->byAgency->reduce = "_sum"; |
$obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}"; | $obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}"; |
$obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}"; | $obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}"; |
$obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}"; | $obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}"; |
$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}"; | $obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}"; |
$obj->views->datasetGroups->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n doc.metadata[\"data.gov.au Category\"] && doc.metadata[\"data.gov.au Category\"].forEach(function(tag) {\n emit(tag, doc.url); \n });\n}\n}"; | |
$obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}"; | $obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}"; |
$docdb->save($obj, true); | $docdb->save($obj, true); |
//function createAgencyDesignDoc() { | //function createAgencyDesignDoc() { |
$db = $server->get_db('disclosr-agencies'); | $db = $server->get_db('disclosr-agencies'); |
$obj = new stdClass(); | $obj = new stdClass(); |
$obj->_id = "_design/" . urlencode("app"); | $obj->_id = "_design/" . urlencode("app"); |
$obj->language = "javascript"; | $obj->language = "javascript"; |
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; | $obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; |
$obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; | $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; |
$obj->views->byCanonicalName->map = "function(doc) { | $obj->views->byCanonicalName->map = "function(doc) { |
if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { | if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { |
emit(doc.name, doc); | emit(doc.name, doc); |
} | } |
};"; | };"; |
$obj->views->byDeptStateName->map = "function(doc) { | $obj->views->byDeptStateName->map = "function(doc) { |
if (doc.orgType == 'FMA-DepartmentOfState') { | if (doc.orgType == 'FMA-DepartmentOfState') { |
emit(doc.name, doc._id); | emit(doc.name, doc._id); |
} | } |
};"; | };"; |
$obj->views->parentOrgs->map = "function(doc) { | $obj->views->parentOrgs->map = "function(doc) { |
if (doc.parentOrg) { | if (doc.parentOrg) { |
emit(doc._id, doc.parentOrg); | emit(doc._id, doc.parentOrg); |
} | } |
};"; | };"; |
$obj->views->byName->map = 'function(doc) { | $obj->views->byName->map = 'function(doc) { |
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { | if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { |
emit(doc.name, doc._id); | emit(doc.name, doc._id); |
if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { | if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { |
emit(doc.shortName, doc._id); | emit(doc.shortName, doc._id); |
} | } |
for (name in doc.otherNames) { | for (name in doc.otherNames) { |
if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { | if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { |
emit(doc.otherNames[name], doc._id); | emit(doc.otherNames[name], doc._id); |
} | } |
} | } |
for (name in doc.foiBodies) { | for (name in doc.foiBodies) { |
if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { | if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { |
emit(doc.foiBodies[name], doc._id); | emit(doc.foiBodies[name], doc._id); |
} | } |
} | } |
for (name in doc.positions) { | for (name in doc.positions) { |
if (doc.positions[name] != "" && doc.positions[name] != doc.name) { | if (doc.positions[name] != "" && doc.positions[name] != doc.name) { |
emit(doc.positions[name], doc._id); | emit(doc.positions[name], doc._id); |
} | } |
} | } |
} | } |
};'; | };'; |
$obj->views->foiEmails->map = "function(doc) { | $obj->views->foiEmails->map = "function(doc) { |
emit(doc._id, doc.foiEmail); | emit(doc._id, doc.foiEmail); |
};"; | };"; |
$obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; | $obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; |
$obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; | $obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; |
$obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; | $obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; |
$obj->views->getScrapeRequired->map = "function(doc) { | $obj->views->getScrapeRequired->map = "function(doc) { |
var lastScrape = Date.parse(doc.metadata.lastScraped); | var lastScrape = Date.parse(doc.metadata.lastScraped); |
var today = new Date(); | var today = new Date(); |
if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { | if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { |
emit(doc._id, doc); | emit(doc._id, doc); |
} | } |
};"; | };"; |
$obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; | $obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; |
$obj->views->getConflicts->map = "function(doc) { | $obj->views->getConflicts->map = "function(doc) { |
if (doc._conflicts) { | if (doc._conflicts) { |
emit(null, [doc._rev].concat(doc._conflicts)); | emit(null, [doc._rev].concat(doc._conflicts)); |
} | } |
}"; | }"; |
$obj->views->getStatistics->map = | $obj->views->getStatistics->map = |
"function(doc) { | "function(doc) { |
if (doc.statistics) { | if (doc.statistics) { |
for (var statisticSet in doc.statistics) { | for (var statisticSet in doc.statistics) { |
for (var statisticPeriod in doc.statistics[statisticSet]) { | for (var statisticPeriod in doc.statistics[statisticSet]) { |
emit([statisticSet,statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']); | emit([statisticSet,statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']); |
} | } |
} | } |
} | } |
}"; | }"; |
$obj->views->getStatistics->reduce = '_sum'; | $obj->views->getStatistics->reduce = '_sum'; |
// http://stackoverflow.com/questions/646628/javascript-startswith | // http://stackoverflow.com/questions/646628/javascript-startswith |
$obj->views->score->map = 'if(!String.prototype.startsWith){ | $obj->views->score->map = 'if(!String.prototype.startsWith){ |
String.prototype.startsWith = function (str) { | String.prototype.startsWith = function (str) { |
return !this.indexOf(str); | return !this.indexOf(str); |
} | } |
} | } |
function(doc) { | function(doc) { |
count = 0; | count = 0; |
if (doc["status"] != "suspended") { | if (doc["status"] != "suspended") { |
for(var propName in doc) { | for(var propName in doc) { |
if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { | if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { |
count++; | count++; |
} | } |
} | } |
portfolio = doc.parentOrg; | portfolio = doc.parentOrg; |
if (doc.orgType == "FMA-DepartmentOfState") { | if (doc.orgType == "FMA-DepartmentOfState") { |
portfolio = doc._id; | portfolio = doc._id; |
} | } |
if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { | if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { |
portfolio = doc.orgType; | portfolio = doc.orgType; |
} | } |
emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); | emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); |
} | } |
}'; | }'; |
$obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ | $obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ |
String.prototype.startsWith = function (str) { | String.prototype.startsWith = function (str) { |
return !this.indexOf(str); | return !this.indexOf(str); |
} | } |
} | } |
if(!String.prototype.endsWith){ | if(!String.prototype.endsWith){ |
String.prototype.endsWith = function(suffix) { | String.prototype.endsWith = function(suffix) { |
return this.indexOf(suffix, this.length - suffix.length) !== -1; | return this.indexOf(suffix, this.length - suffix.length) !== -1; |
}; | }; |
} | } |
function(doc) { | function(doc) { |
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { | if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { |
for(var propName in doc) { | for(var propName in doc) { |
if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { | if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { |
emit(propName, 1); | emit(propName, 1); |
} | } |
} | } |
emit("total", 1); | emit("total", 1); |
} | } |
}'; | }'; |
$obj->views->scoreHas->reduce = '_sum'; | $obj->views->scoreHas->reduce = '_sum'; |
$obj->views->fieldNames->map = ' | $obj->views->fieldNames->map = ' |
function(doc) { | function(doc) { |
for(var propName in doc) { | for(var propName in doc) { |
emit(propName, doc._id); | emit(propName, doc._id); |
} | } |
}'; | }'; |
$obj->views->fieldNames->reduce = '_count'; | $obj->views->fieldNames->reduce = '_count'; |
// allow safe updates (even if slightly slower due to extra: rev-detection check). | // allow safe updates (even if slightly slower due to extra: rev-detection check). |
$db->save($obj, true); | $db->save($obj, true); |
?> | ?> |
<?xml version="1.0" encoding="UTF-8"?> | |
<module type="WEB_MODULE" version="4"> | |
<component name="FacetManager"> | |
<facet type="Python" name="Python"> | |
<configuration sdkName="" /> | |
</facet> | |
</component> | |
<component name="NewModuleRootManager" inherit-compiler-output="true"> | |
<exclude-output /> | |
<content url="file://$MODULE_DIR$" /> | |
<orderEntry type="inheritedJdk" /> | |
<orderEntry type="sourceFolder" forTests="false" /> | |
</component> | |
</module> | |
import ckanclient | |
import couchdb | |
from ckanclient import CkanApiError | |
import re | |
class LoaderError(Exception): | |
pass | |
# Instantiate the CKAN client. | |
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') | |
api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc' | |
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', | |
api_key=api_key) | |
couch = couchdb.Server('http://127.0.0.1:5984/') | |
#couch = couchdb.Server('http://192.168.1.113:5984/') | |
# https://github.com/okfn/ckanext-importlib | |
def munge(name): | |
# convert spaces to underscores | |
name = re.sub(' ', '_', name).lower() | |
# convert symbols to dashes | |
name = re.sub('[:]', '_-', name).lower() | |
name = re.sub('[/]', '-', name).lower() | |
# take out not-allowed characters | |
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() | |
# remove double underscores | |
name = re.sub('__', '_', name).lower() | |
return name | |
def name_munge(input_name): | |
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) | |
docsdb = couch['disclosr-documents'] | |
if __name__ == "__main__": | |
groups = {} | |
for doc in docsdb.view('app/datasetGroups'): | |
group_name = doc.key | |
if group_name != "Not specified": | |
pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_', | |
doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]); | |
if group_name in groups.keys(): | |
groups[group_name] = list(set(groups[group_name] + [pkg_name])) | |
else: | |
groups[group_name] = [pkg_name] | |
# add dataset to group(s) | |
for group_name in groups.keys(): | |
if group_name != "Not specified": | |
group_url = name_munge(group_name[:100]) | |
print group_name | |
print groups[group_name] | |
try: | |
# Update the group details | |
group_entity = ckan.group_entity_get(group_url) | |
print "group "+group_name+" exists" | |
if 'packages' in group_entity.keys(): | |
group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name])) | |
else: | |
group_entity['packages'] = groups[group_name] | |
ckan.group_entity_put(group_entity) | |
except CkanApiError, e: | |
if ckan.last_status == 404: | |
print "group "+group_name+" does not exist, creating" | |
group_entity = { | |
'name': group_url, | |
'title': group_name, | |
'description': group_name, | |
'packages': groups[group_name] | |
} | |
#print group_entity | |
ckan.group_register_post(group_entity) | |
elif ckan.last_status == 409: | |
print "group already exists" | |
else: | |
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( | |
ckan.last_status, pkg_name, e.args)) | |
import ckanclient | import ckanclient |
import couchdb | import couchdb |
from ckanclient import CkanApiError | from ckanclient import CkanApiError |
import re | import re |
import html2text # aaronsw :( | import html2text # aaronsw :( |
import ckanapi # https://github.com/open-data/ckanapi | import ckanapi # https://github.com/open-data/ckanapi |
class LoaderError(Exception): | class LoaderError(Exception): |
pass | pass |
# Instantiate the CKAN client. | # Instantiate the CKAN client. |
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') | api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc' |
api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a' | server = 'data.disclosurelo.gs' |
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', | |
ckan = ckanclient.CkanClient(base_location='http://'+server+'api', | |
api_key=api_key) | api_key=api_key) |
ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key) | ckandirect = ckanapi.RemoteCKAN('http://'+server, api_key=api_key) |
couch = couchdb.Server('http://127.0.0.1:5984/') | couch = couchdb.Server('http://127.0.0.1:5984/') |
#couch = couchdb.Server('http://192.168.1.113:5984/') | #couch = couchdb.Server('http://192.168.1.113:5984/') |
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ | # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ |
SYMBOLS = { | SYMBOLS = { |
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), | 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), |
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', | 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', |
'zetta', 'iotta'), | 'zetta', 'iotta'), |
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), | 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), |
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', | 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', |
'zebi', 'yobi'), | 'zebi', 'yobi'), |
} | } |
def human2bytes(s): | def human2bytes(s): |
""" | """ |
Attempts to guess the string format based on default symbols | Attempts to guess the string format based on default symbols |
set and return the corresponding bytes as an integer. | set and return the corresponding bytes as an integer. |
When unable to recognize the format ValueError is raised. | When unable to recognize the format ValueError is raised. |
>>> human2bytes('0 B') | >>> human2bytes('0 B') |
0 | 0 |
>>> human2bytes('1 K') | >>> human2bytes('1 K') |
1024 | 1024 |
>>> human2bytes('1 M') | >>> human2bytes('1 M') |
1048576 | 1048576 |
>>> human2bytes('1 Gi') | >>> human2bytes('1 Gi') |
1073741824 | 1073741824 |
>>> human2bytes('1 tera') | >>> human2bytes('1 tera') |
1099511627776 | 1099511627776 |
>>> human2bytes('0.5kilo') | >>> human2bytes('0.5kilo') |
512 | 512 |
>>> human2bytes('0.1 byte') | >>> human2bytes('0.1 byte') |
0 | 0 |
>>> human2bytes('1 k') # k is an alias for K | >>> human2bytes('1 k') # k is an alias for K |
1024 | 1024 |
>>> human2bytes('12 foo') | >>> human2bytes('12 foo') |
Traceback (most recent call last): | Traceback (most recent call last): |
... | ... |
ValueError: can't interpret '12 foo' | ValueError: can't interpret '12 foo' |
""" | """ |
if s == None: | |
return 0 | |
s = s.replace(',', '') | |
init = s | init = s |
num = "" | num = "" |
while s and s[0:1].isdigit() or s[0:1] == '.': | while s and s[0:1].isdigit() or s[0:1] == '.': |
num += s[0] | num += s[0] |
s = s[1:] | s = s[1:] |
num = float(num) | num = float(num) |
letter = s.strip() | letter = s.strip() |
for name, sset in SYMBOLS.items(): | for name, sset in SYMBOLS.items(): |
if letter in sset: | if letter in sset: |
break | break |
else: | else: |
if letter == 'k': | if letter == 'k': |
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs | # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs |
sset = SYMBOLS['customary'] | sset = SYMBOLS['customary'] |
letter = letter.upper() | letter = letter.upper() |
else: | else: |
raise ValueError("can't interpret %r" % init) | raise ValueError("can't interpret %r" % init) |
prefix = {sset[0]: 1} | prefix = {sset[0]: 1} |
for i, s in enumerate(sset[1:]): | for i, s in enumerate(sset[1:]): |
prefix[s] = 1 << (i + 1) * 10 | prefix[s] = 1 << (i + 1) * 10 |
return int(num * prefix[letter]) | return int(num * prefix[letter]) |
# https://github.com/okfn/ckanext-importlib | # https://github.com/okfn/ckanext-importlib |
def munge(name): | def munge(name): |
# convert spaces to underscores | # convert spaces to underscores |
name = re.sub(' ', '_', name).lower() | name = re.sub(' ', '_', name).lower() |
# convert symbols to dashes | # convert symbols to dashes |
name = re.sub('[:]', '_-', name).lower() | name = re.sub('[:]', '_-', name).lower() |
name = re.sub('[/]', '-', name).lower() | name = re.sub('[/]', '-', name).lower() |
# take out not-allowed characters | # take out not-allowed characters |
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() | name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() |
# remove double underscores | # remove double underscores |
name = re.sub('__', '_', name).lower() | name = re.sub('__', '_', name).lower() |
return name | return name |
#todo "{'name': [u'Url must be purely lowercase alphanumeric (ascii) characters and these symbols: -_']}" | |
# http://data.gov.au/dataset/australian-domestic-regional-and-international-airline-activity-%E2%80%93-time-series/ | |
def name_munge(input_name): | def name_munge(input_name): |
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) | return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) |
#[:100] | |
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') | |
def get_licence_id(licencename): | def get_licence_id(licencename): |
map = { | map = { |
"Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', | "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', |
"CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', | "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', |
'Otherpleasespecify': 'notspecified', | 'Otherpleasespecify': 'notspecified', |
'': 'notspecified', | '': 'notspecified', |
"Publicly available data": 'notspecified', | "Publicly available data": 'notspecified', |
"CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", | "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", |
"CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", | "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", |
'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', | 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', |
"Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', | "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', |
'CreativeCommonsAttributionCCBY25': 'cc-by', | 'CreativeCommonsAttributionCCBY25': 'cc-by', |
"PublicDomain": 'other-pd', | "PublicDomain": 'other-pd', |
} | } |
if licencename not in map.keys(): | if licencename not in map.keys(): |
raise Exception(licencename + " not found"); | raise Exception(licencename + " not found"); |
return map[licencename]; | return map[licencename]; |
docsdb = couch['disclosr-documents'] | docsdb = couch['disclosr-documents'] |
if __name__ == "__main__": | if __name__ == "__main__": |
orgs_list = [] | orgs_list = [] |
orgs_ids = {} | |
for doc in docsdb.view('app/datasets'): | for doc in docsdb.view('app/datasets'): |
print " --- " | |
print doc.id | print doc.id |
if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": | if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": |
# Collect the package metadata. | # Collect the package metadata. |
pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_', | pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_', |
doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]); | doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]); |
print pkg_name | print pkg_name |
#add to or create organization using direct API | #add to or create organization using direct API |
org_name = name_munge(doc.value['metadata']["Agency"][:100]) | agency = doc.value['metadata']["Agency"] |
if agency == "APS": | |
agency = "Australian Public Service Commission" | |
if agency == "Shared Services, Treasury Directorate": | |
agency = "Shared Services Procurement, Treasury Directorate" | |
if agency == "Treasury - Shared Services": | |
agency = "Shared Services Procurement, Treasury Directorate" | |
if agency == "Territory and Municipal Services (TAMS)": | |
agency = "Territory and Municipal Services Directorate" | |
if agency == "State Library of NSW": | |
agency = "State Library of New South Wales" | |
org_name = name_munge(agency[:100]) | |
if org_name not in orgs_list: | if org_name not in orgs_list: |
orgs_list = ckandirect.action.organization_list()['result'] | orgs_list = ckandirect.action.organization_list()['result'] |
#print orgs_list | #print orgs_list |
if org_name not in orgs_list: | if org_name not in orgs_list: |
try: | try: |
print "org not found, creating " + org_name | print "org not found, creating " + org_name |
ckandirect.action.organization_create(name=org_name, title=doc.value['metadata']["Agency"], | ckandirect.action.organization_create(name=org_name, title=agency, |
description=doc.value['metadata']["Agency"]) | description=agency) |
orgs_list.append(org_name) | orgs_list.append(org_name) |
except ckanapi.ValidationError, e: | except ckanapi.ValidationError, e: |
print e | print e |
raise LoaderError('Unexpected status') | raise LoaderError('Unexpected status') |
else: | else: |
print "org found, adding dataset to " + org_name | print "org found, adding dataset to " + org_name |
org = ckandirect.action.organization_show(id=org_name) | # cache org names -> id mapping |
# todo cache org names -> id mapping | if org_name not in orgs_ids: |
org = ckandirect.action.organization_show(id=org_name) | |
orgs_ids[org_name] = org["result"]["id"] | |
org_id = orgs_ids[org_name] | |
print "org id is "+org_id | |
tags = [] | tags = [] |
creator = doc.value['metadata']["DCTERMS.Creator"] | |
if doc.value['agencyID'] == "AGIMO": | if doc.value['agencyID'] == "AGIMO": |
if len(doc.value['metadata']["Keywords / Tags"]) > 0: | if len(doc.value['metadata']["Keywords / Tags"]) > 0: |
if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): | if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): |
tags = tags + doc.value['metadata']["Keywords / Tags"] | tags = tags + doc.value['metadata']["Keywords / Tags"] |
else: | else: |
tags = tags + [doc.value['metadata']["Keywords / Tags"]] | tags = tags + [doc.value['metadata']["Keywords / Tags"]] |
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] | tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] |
#print tags | #print tags |
extras = [] | |
for extra_key in doc.value['metadata'].keys(): | |
if extra_key not in ["Description","Content-Language","DCTERMS.Description", "Keywords / Tags" ,"data.gov.au Category", "Download", "Permalink","DCTERMS.Identifier"]: | |
if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "": | |
extras.append([extra_key, doc.value['metadata'][extra_key]]) | |
package_entity = { | package_entity = { |
'name': pkg_name, | 'name': pkg_name, |
'title': doc.value['metadata']['DCTERMS.Title'], | 'title': doc.value['metadata']['DCTERMS.Title'], |
'url': doc.value['metadata']['DCTERMS.Source.URI'], | 'url': doc.value['metadata']['DCTERMS.Source.URI'], |
'tags': tags, #tags are mandatory? | 'tags': tags, #tags are mandatory? |
'author': doc.value['metadata']["DCTERMS.Creator"], | 'author': creator, |
'maintainer': doc.value['metadata']["DCTERMS.Creator"], | 'maintainer': creator, |
'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), | 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), |
'notes': html2text.html2text(doc.value['metadata']['Description']), | 'notes': html2text.html2text(doc.value['metadata']['Description']), |
'owner_org': org["result"]["id"] | 'owner_org': org_id, |
#todo add missing key values like jurasdiction | 'extras': extras |
} | } |
if doc.value['agencyID'] == "qld": | |
package_entity = doc.value['metadata'] | |
try: | try: |
#print package_entity | #print package_entity |
ckan.package_register_post(package_entity) | ckan.package_register_post(package_entity) |
except CkanApiError, e: | except CkanApiError, e: |
if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}": | if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}": |
print "package already exists" | print "package already exists" |
else: | else: |
print ckan.last_message | print ckan.last_message |
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( | raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( |
ckan.last_status, pkg_name, e.args)) | ckan.last_status, pkg_name, e.args)) |
pkg = ckan.package_entity_get(pkg_name) | pkg = ckan.package_entity_get(pkg_name) |
# add dataset to group(s) | |
groups = [] | |
if 'data.gov.au Category' in doc.value['metadata'].keys() and len( | |
doc.value['metadata']['data.gov.au Category']) > 0: | |
if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): | |
groups = groups + doc.value['metadata']['data.gov.au Category'] | |
else: | |
groups = groups + [doc.value['metadata']['data.gov.au Category']] | |
for group_name in groups: | |
group_url = name_munge(group_name[:100]) | |
try: | |
print ckan.group_entity_get(group_url) | |
# Update the group details | |
group_entity = ckan.last_message | |
print "group "+group_name+" exists" | |
if 'packages' in group_entity.keys(): | |
group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) | |
else: | |
group_entity['packages'] = [pkg_name] | |
ckan.group_entity_put(group_entity) | |
except CkanApiError, e: | |
if ckan.last_status == 404: | |
print "group "+group_name+" does not exist, creating" | |
group_entity = { | |
'name': group_url, | |
'title': group_name, | |
'description': group_name, | |
'packages': [pkg_name] | |
} | |
#print group_entity | |
ckan.group_register_post(group_entity) | |
elif ckan.last_status == 409: | |
print "group already exists" | |
else: | |
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( | |
ckan.last_status, pkg_name, e.args)) | |
# add resources (downloadable data files) | # add resources (downloadable data files) |
if 'Download' in doc.value['metadata'].keys(): | if 'Download' in doc.value['metadata'].keys(): |
try: | try: |
resources = pkg.get('resources', []) | resources = pkg.get('resources', []) |
if len(resources) < len(doc.value['metadata']['Download']): | if len(resources) < len(doc.value['metadata']['Download']): |
for resource in doc.value['metadata']['Download']: | for resource in doc.value['metadata']['Download']: |
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html | # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html |
# (KML/KMZ) / (Shapefile) /(Other) | # (KML/KMZ) / (Shapefile) /(Other) |
format = "plain" | format = "plain" |
if resource['format'] == '(XML)': | if resource['format'] == '(XML)': |
format = 'xml' | format = 'xml' |
if resource['format'] == '(CSV/XLS)': | if resource['format'] == '(CSV/XLS)': |
format = 'csv' | format = 'csv' |
if resource['format'] == '(Shapefile)': | |
format = 'shp' | |
if resource['format'] == '(KML/KMZ)': | |
format = 'kml' | |
name = resource['href'] | name = resource['href'] |
if 'name' in resource.keys(): | if 'name' in resource.keys(): |
name = resource['name'] | name = resource['name'] |
print resource | print resource |
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', | ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', |
format=format, | format=format, |
size=human2bytes(resource['size'].replace(',', ''))) | size=human2bytes(resource.get('size','0B'))) |
else: | else: |
print "resources already exist" | print "resources already exist" |
except CkanApiError, e: | except CkanApiError, e: |
if ckan.last_status == 404: | if ckan.last_status == 404: |
print "parent dataset does not exist" | print "parent dataset does not exist" |
else: | else: |
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( | raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( |
ckan.last_status, pkg_name, e.args)) | ckan.last_status, pkg_name, e.args)) |
import sys, os | import sys, os |
import time | import time |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
from unidecode import unidecode | from unidecode import unidecode |
listurl = "http://data.gov.au/data/" | listurl = "http://data.gov.au/data/" |
(url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb, |
listurl, "data", "AGIMO") | listurl, "data", "AGIMO") |
soup = BeautifulSoup(datasetlisthtml) | soup = BeautifulSoup(datasetlisthtml) |
for atag in soup.find_all(class_='result-title'): | for atag in soup.find_all(class_='result-title'): |
if atag.has_key('href'): | if atag.has_key('href'): |
url = scrape.fullurl(listurl, atag['href']) | url = scrape.fullurl(listurl, atag['href']) |
(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, html) = scrape.fetchURL(scrape.docsdb, |
url, "data", "AGIMO", False) | url, "data", "AGIMO", False) |
hash = scrape.mkhash(scrape.canonurl(url)) | hash = scrape.mkhash(scrape.canonurl(url)) |
doc = scrape.docsdb.get(hash) | doc = scrape.docsdb.get(hash) |
if "metadata" not in doc.keys() or True: | if "metadata" not in doc.keys() or True: |
doc['type'] = "dataset" | doc['type'] = "dataset" |
doc['metadata'] = {} | doc['metadata'] = {} |
soup = BeautifulSoup(html) | soup = BeautifulSoup(html) |
for metatag in soup.find_all('meta'): | for metatag in soup.find_all('meta'): |
if metatag.has_key('name'): | if metatag.has_key('name'): |
doc['metadata'][metatag['name']] = metatag['content'] | doc['metadata'][metatag['name']] = metatag['content'] |
for list in soup.find_all('dl'): | for list in soup.find_all('dl'): |
last_title = "" | last_title = "" |
for child in list.children: | for child in list.children: |
if str(type(child)) != "<class 'bs4.element.NavigableString'>": | if str(type(child)) != "<class 'bs4.element.NavigableString'>": |
if child.name == 'dt' and child.string != None: | if child.name == 'dt' and child.string != None: |
last_title = child.string.strip() | last_title = child.string.strip() |
if child.name == 'dd': | if child.name == 'dd': |
#print last_title | #print last_title |
if last_title == "Description": | if last_title == "Description": |
doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore') | doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore') |
elif last_title == "Download": | elif last_title == "Download": |
doc['metadata'][last_title] = [] | doc['metadata'][last_title] = [] |
for item in child.find_all("li"): | for item in child.find_all("li"): |
link = item.find("a") | link = item.find("a") |
format = item.find(property="dc:format") | format = item.find(property="dc:format") |
linkobj = {"href":link['href'].replace("/bye?","").strip(), | linkobj = {"href":link['href'].replace("/bye?","").strip(), |
"format": format.string.strip(), "size": format.next_sibling.string.strip()} | "format": format.string.strip()} |
if format.next_sibling.string != None: | |
linkobj["size"] = format.next_sibling.string.strip() | |
if link.string != None: | if link.string != None: |
linkobj["name"] = link.string.strip() | linkobj["name"] = link.string.strip() |
doc['metadata'][last_title].append(linkobj) | doc['metadata'][last_title].append(linkobj) |
else: | else: |
atags = child.find_all('a') | atags = child.find_all('a') |
if len(atags) < 2: | if len(atags) < 2: |
[s.extract() for s in child(class_='viewAll')] | [s.extract() for s in child(class_='viewAll')] |
doc['metadata'][last_title] = ''.join(child.stripped_strings).strip() | doc['metadata'][last_title] = ''.join(child.stripped_strings).strip() |
else: | else: |
doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags] | doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags] |
print doc['metadata'] | print doc['metadata'] |
scrape.docsdb.save(doc) | scrape.docsdb.save(doc) |
#time.sleep(2) | #time.sleep(2) |