export
Former-commit-id: 613905452e6bd6709f8810fd6b6ed709d2f4e5fb
--- a/admin/refreshDesignDoc.php
+++ b/admin/refreshDesignDoc.php
@@ -40,6 +40,8 @@
$obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}";
$obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}";
$obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}";
+
+$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}";
$obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}";
$docdb->save($obj, true);
--- /dev/null
+++ b/documents/datagov-export.py
@@ -1,1 +1,58 @@
+import ckanclient
+import couchdb
+from ckanclient import CkanApiError
+import re
+class LoaderError(Exception):
+ pass
+# https://github.com/okfn/ckanext-importlib
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',
+ api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+# (use your own api_key from http://thedatahub.org/user/me )
+def munge(name):
+ # convert spaces to underscores
+ name = re.sub(' ', '_', name).lower()
+ # convert symbols to dashes
+ name = re.sub('[:]', '_-', name).lower()
+ name = re.sub('[/]', '-', name).lower()
+ # take out not-allowed characters
+ name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
+ # remove double underscores
+ name = re.sub('__', '_', name).lower()
+ return name
+def name_munge(input_name):
+ return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+ #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
+
+couch = couchdb.Server('http://127.0.0.1:5984/')
+docsdb = couch['disclosr-documents']
+
+if __name__ == "__main__":
+ for doc in docsdb.view('app/datasets'):
+ print doc.id
+ if doc.value['url'] != "http://data.gov.au/data/":
+ # Collect the package metadata.
+ pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
+ package_entity = {
+ 'name': pkg_name,
+ 'title': doc.value['metadata']['DCTERMS.Title'],
+ 'url': doc.value['metadata']['DCTERMS.Source.URI'],
+ 'tags': doc.value['metadata']["Keywords / Tags"], #todo must be alphanumeric characters or symbols
+
+ 'author': doc.value['metadata']["DCTERMS.Creator"],
+ 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
+ 'licence_id': doc.value['metadata']['DCTERMS.License'],
+ 'notes': doc.value['metadata']['Description'],
+ }
+ try:
+ ckan.package_register_post(package_entity)
+ except CkanApiError, e:
+ if ckan.last_status == 409:
+ print "already exists"
+ else:
+ raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (ckan.last_status, doc.id, e.args))
+
+ print package_entity
+ ckan.add_package_resource(pkg_name, 'http://example.org/', name='Foo', resource_type='data', format='csv')
+
--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -34,7 +34,7 @@
if last_title == "Description":
doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore')
elif last_title == "Download":
- doc['metadata'][last_title] = {}
+ doc['metadata'][last_title] = []
for item in child.find_all("li"):
link = item.find("a")
format = item.find(property="dc:format")
@@ -42,7 +42,7 @@
"format": format.string.strip(), "size": format.next_sibling.string.strip()}
if link.string != None:
linkobj["name"] = link.string.strip()
- doc['metadata'][last_title][] = linkobj
+ doc['metadata'][last_title].append(linkobj)
else:
atags = child.find_all('a')
--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -1,1 +1,24 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
+from unidecode import unidecode
+
+listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960"
+(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+ listurl, "gazette", "AGD")
+soup = BeautifulSoup(listhtml)
+for row in soup.find_all('tr'):
+ if row.has_key('valign'):
+ for col in tr.find_all('td'):
+ print col.string
+ #url = scrape.fullurl(listurl, atag['href'])
+ #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ # url, "data", "AGIMO")
+ #hash = scrape.mkhash(scrape.canonurl(url))
+ #doc = scrape.docsdb.get(hash)
+ #print doc['metadata']
+ #scrape.docsdb.save(doc)
+ #time.sleep(2)
+