datagov fixes
Former-commit-id: ed3ba96db4beeb126f802a3168476e27f298aeb8
--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -2,43 +2,18 @@
import couchdb
from ckanclient import CkanApiError
import re
+import html2text # aaronsw :(
+
class LoaderError(Exception):
pass
# Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', api_key='72f90359-0396-438c-804f-a26a24336747')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
-
-# http://stackoverflow.com/a/7778368/684978
-from HTMLParser import HTMLParser
-import htmlentitydefs
-
-class HTMLTextExtractor(HTMLParser):
- def __init__(self):
- HTMLParser.__init__(self)
- self.result = [ ]
-
- def handle_data(self, d):
- self.result.append(d)
-
- def handle_charref(self, number):
- codepoint = int(number[1:], 16) if number[0] in (u'x', u'X') else int(number)
- self.result.append(unichr(codepoint))
-
- def handle_entityref(self, name):
- codepoint = htmlentitydefs.name2codepoint[name]
- self.result.append(unichr(codepoint))
-
- def get_text(self):
- return u''.join(self.result)
-
-def html_to_text(html):
- s = HTMLTextExtractor()
- s.feed(html)
- return s.get_text()
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+ api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = {
@@ -116,7 +91,9 @@
def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+ #[:100]
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
+
def get_licence_id(licencename):
map = {
@@ -131,9 +108,9 @@
"Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
'CreativeCommonsAttributionCCBY25': 'cc-by',
"PublicDomain": 'other-pd',
- }
+ }
if licencename not in map.keys():
- raise Exception(licencename + " not found");
+ raise Exception(licencename + " not found");
return map[licencename];
docsdb = couch['disclosr-documents']
@@ -141,46 +118,86 @@
if __name__ == "__main__":
for doc in docsdb.view('app/datasets'):
print doc.id
- if doc.value['url'] != "http://data.gov.au/data/":
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
# Collect the package metadata.
- pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
- tags = doc.value['metadata']["Keywords / Tags"]
- if not hasattr(tags, '__iter__'):
- tags = [tags]
- [re.sub('[^a-zA-Z0-9-_()]', '', tag).replace('&', 'and').lower() for tag in tags]
- package_entity = {
- 'name': pkg_name,
- 'title': doc.value['metadata']['DCTERMS.Title'],
- 'url': doc.value['metadata']['DCTERMS.Source.URI'],
-
- 'author': doc.value['metadata']["DCTERMS.Creator"],
- 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
- 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
- 'notes': html_to_text(doc.value['metadata']['Description']),
- }
- if len(tags) > 0:
- package_entity['tags'] = tags
+ pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','');
+ tags = []
+ if doc.value['agencyID'] == "AGIMO":
+ if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+ if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+ tags = tags + doc.value['metadata']["Keywords / Tags"]
+ else:
+ tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+ if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
+ if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
+ tags = tags + doc.value['metadata']['data.gov.au Category']
+ else:
+ tags = tags + [doc.value['metadata']['data.gov.au Category']]
+ tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
print tags
+ package_entity = {
+ 'name': pkg_name,
+ 'title': doc.value['metadata']['DCTERMS.Title'],
+ 'url': doc.value['metadata']['DCTERMS.Source.URI'],
+ 'tags': tags, #tags are mandatory?
+ 'author': doc.value['metadata']["DCTERMS.Creator"],
+ 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
+ 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+ 'notes': html2text.html2text(doc.value['metadata']['Description']),
+ }
+ if doc.value['agencyID'] == "qld":
+ package_entity = doc.value['metadata']
+
try:
- #print doc.id
+ print package_entity
ckan.package_register_post(package_entity)
except CkanApiError, e:
if ckan.last_status == 409:
- print "already exists"
+ print "package already exists"
else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args))
- print package_entity
- #todo add to organisation (author/creator/maintainer) http://docs.ckan.org/en/latest/apiv3.html#examples ckan.logic.action.update.package_owner_org_update
- #if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group
+
+ #add to group
+
+ group_name = name_munge(doc.value['metadata']["Agency"][:100])
+ try:
+ print ckan.group_entity_get(group_name)
+
+ # Update the group details
+ group_entity = ckan.last_message
+ print "group exists"
+ if 'packages' in group_entity.keys():
+ group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
+ else:
+ group_entity['packages'] = [pkg_name]
+ ckan.group_entity_put(group_entity)
+ except CkanApiError, e:
+ if ckan.last_status == 404:
+ print "group does not exist, creating"
+ group_entity = {
+ 'name': group_name,
+ 'title': doc.value['metadata']["Agency"],
+ 'description': doc.value['metadata']["Agency"],
+ 'packages': [pkg_name],
+ # 'type': "organization" # not allowed via API, use database query
+ # update "group" set type = 'organization';
+ }
+ print group_entity
+ ckan.group_register_post(group_entity)
+ elif ckan.last_status == 409:
+ print "group already exists"
+ else:
+ raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+ ckan.last_status, pkg_name, e.args))
if 'Download' in doc.value['metadata'].keys():
try:
pkg = ckan.package_entity_get(pkg_name)
resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']:
- print resource
+
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
# (KML/KMZ) / (Shapefile) /(Other)
format = "plain"
@@ -191,6 +208,7 @@
name = resource['href']
if 'name' in resource.keys():
name = resource['name']
+ print resource
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
format=format, size=human2bytes(resource['size'].replace(',', '')))
else:
--- /dev/null
+++ b/documents/datagov-merge.php
@@ -1,1 +1,26 @@
+<?php
+include_once("../include/common.inc.php");
+
+
+setlocale(LC_CTYPE, 'C');
+
+$db = $server->get_db('disclosr-documents');
+$datasets = Array();
+try {
+ $rows = $db->get_view("app", "datasets", null, true)->rows;
+
+ foreach ($rows as $row) {
+ //print_r($row);
+ if ($row->value->url != "http://data.gov.au/data/")
+ $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+ksort($datasets);
+foreach ($datasets as $datasetname => $datasetkey) {
+ print "$datasetname => $datasetkey<br>\n";
+}
+?>
+
--- /dev/null
+++ b/documents/dataqld.py
@@ -1,1 +1,28 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
+from unidecode import unidecode
+import ckanclient
+
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
+
+# Get the package list.
+package_list = ckan.package_register_get()
+for package_name in package_list:
+# Get the details of a package.
+ (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
+ hash = scrape.mkhash(scrape.canonurl(url))
+ print hash
+ doc = scrape.docsdb.get(hash)
+ if "metadata" not in doc.keys() or True:
+ ckan.package_entity_get(package_name)
+ package_entity = ckan.last_message
+ doc['type'] = "dataset"
+ doc['metadata'] = package_entity
+ print package_entity
+ scrape.docsdb.save(doc)
+
--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -5,20 +5,53 @@
from unidecode import unidecode
-listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960"
-(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
- listurl, "gazette", "AGD")
-soup = BeautifulSoup(listhtml)
-for row in soup.find_all('tr'):
- if row.has_key('valign'):
- for col in tr.find_all('td'):
- print col.string
- #url = scrape.fullurl(listurl, atag['href'])
- #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
- # url, "data", "AGIMO")
- #hash = scrape.mkhash(scrape.canonurl(url))
- #doc = scrape.docsdb.get(hash)
- #print doc['metadata']
- #scrape.docsdb.save(doc)
- #time.sleep(2)
+items = 3950
+items = 1
+while True:
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
+ (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+ listurl, "gazette", "AGD", False)
+ for line in listhtml.split('\n'):
+ soup = BeautifulSoup(line)
+ #print line
+ for row in soup.find_all('tr'):
+ print line
+ if row.has_key('valign'):
+ i = 0
+ date = ""
+ id = ""
+ type = ""
+ description = ""
+ name = ""
+ url = ""
+ for col in soup.find_all('td'):
+ #print ''.join(col.stripped_strings)
+ if i == 0:
+ date = ''.join(col.stripped_strings)
+ if i == 1:
+ id = ''.join(col.stripped_strings)
+ if i == 2:
+ type = ''.join(col.stripped_strings)
+ if i == 3:
+ description = ''.join(col.stripped_strings)
+ for link in col.findAll('a'):
+ if link.has_key("href"):
+ url = link['href']
+ name = ''.join(link.stripped_strings)
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ print [date, id, type, description, name, url]
+ itemurl = scrape.fullurl(listurl, url)
+ (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ itemurl, "gazette", "AGD", False)
+ hash = scrape.mkhash(scrape.canonurl(itemurl))
+ doc = scrape.docsdb.get(hash)
+ doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
+ scrape.docsdb.save(doc)
+ #time.sleep(2)
+ i = i + 1;
+ items = items - 25
+ if items <= 0:
+ break
+
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -201,7 +201,7 @@
def getDate(self, content, entry, doc):
date = ''.join(content.stripped_strings).strip()
(a, b, c) = date.partition("(")
- date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
+ date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012").replace("Janurary","January"))
print date
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
print edate
--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,10 +1,14 @@
-for f in scrapers/*.py;
- do echo "Processing $f file..";
- python $f;
+rm /tmp/disclosr-error
+for f in scrapers/*.py; do
+ echo "Processing $f file..";
+ python $f 2>/tmp/disclosr-error;
if [ "$?" -ne "0" ]; then
echo "error";
- sleep 2;
+ sleep 2;
fi
done
+if [ -s /tmp/disclosr-error ] ; then
+ mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
+fi
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -13,8 +13,8 @@
import socket
#couch = couchdb.Server('http://192.168.1.148:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
def mkhash(input):