tool to check datagov resources
Former-commit-id: f406384c3ba09ba04f639abb5731511ddf02b88b
--- a/admin/refreshDesignDoc.php
+++ b/admin/refreshDesignDoc.php
@@ -42,6 +42,7 @@
$obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}";
$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}";
+$obj->views->datasetGroups->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n doc.metadata[\"data.gov.au Category\"] && doc.metadata[\"data.gov.au Category\"].forEach(function(tag) {\n emit(tag, doc.url); \n });\n}\n}";
$obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}";
$docdb->save($obj, true);
--- /dev/null
+++ b/disclosr.iml
@@ -1,1 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="WEB_MODULE" version="4">
+ <component name="FacetManager">
+ <facet type="Python" name="Python">
+ <configuration sdkName="" />
+ </facet>
+ </component>
+ <component name="NewModuleRootManager" inherit-compiler-output="true">
+ <exclude-output />
+ <content url="file://$MODULE_DIR$" />
+ <orderEntry type="inheritedJdk" />
+ <orderEntry type="sourceFolder" forTests="false" />
+ </component>
+</module>
+
--- /dev/null
+++ b/documents/datagov-export-groups.py
@@ -1,1 +1,81 @@
+import ckanclient
+import couchdb
+from ckanclient import CkanApiError
+import re
+
+class LoaderError(Exception):
+ pass
+
+# Instantiate the CKAN client.
+#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+ api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
+# https://github.com/okfn/ckanext-importlib
+def munge(name):
+ # convert spaces to underscores
+ name = re.sub(' ', '_', name).lower()
+ # convert symbols to dashes
+ name = re.sub('[:]', '_-', name).lower()
+ name = re.sub('[/]', '-', name).lower()
+ # take out not-allowed characters
+ name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
+ # remove double underscores
+ name = re.sub('__', '_', name).lower()
+ return name
+
+
+def name_munge(input_name):
+ return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+
+
+docsdb = couch['disclosr-documents']
+
+if __name__ == "__main__":
+ groups = {}
+ for doc in docsdb.view('app/datasetGroups'):
+ group_name = doc.key
+ if group_name != "Not specified":
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ if group_name in groups.keys():
+ groups[group_name] = list(set(groups[group_name] + [pkg_name]))
+ else:
+ groups[group_name] = [pkg_name]
+
+ # add dataset to group(s)
+ for group_name in groups.keys():
+ if group_name != "Not specified":
+ group_url = name_munge(group_name[:100])
+ print group_name
+ print groups[group_name]
+ try:
+ # Update the group details
+ group_entity = ckan.group_entity_get(group_url)
+ print "group "+group_name+" exists"
+ if 'packages' in group_entity.keys():
+ group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
+ else:
+ group_entity['packages'] = groups[group_name]
+ ckan.group_entity_put(group_entity)
+ except CkanApiError, e:
+ if ckan.last_status == 404:
+ print "group "+group_name+" does not exist, creating"
+ group_entity = {
+ 'name': group_url,
+ 'title': group_name,
+ 'description': group_name,
+ 'packages': groups[group_name]
+ }
+ #print group_entity
+ ckan.group_register_post(group_entity)
+ elif ckan.last_status == 409:
+ print "group already exists"
+ else:
+ raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+ ckan.last_status, pkg_name, e.args))
+
--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -3,17 +3,45 @@
from ckanclient import CkanApiError
import re
import html2text # aaronsw :(
+import ckanapi # https://github.com/open-data/ckanapi
class LoaderError(Exception):
pass
# Instantiate the CKAN client.
-#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
- api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
+api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
+server = 'data.disclosurelo.gs'
+
+ckan = ckanclient.CkanClient(base_location='http://'+server+'api',
+ api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://'+server, api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
+import urllib
+import urlparse
+
+def url_fix(s, charset='utf-8'):
+ """Sometimes you get an URL by a user that just isn't a real
+ URL because it contains unsafe characters like ' ' and so on. This
+ function can fix some of the problems in a similar way browsers
+ handle data entered by the user:
+
+ >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
+ 'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
+
+ :param charset: The target charset for the URL if the url was
+ given as unicode string.
+ """
+ if isinstance(s, unicode):
+ s = s.encode(charset, 'ignore')
+ if not urlparse.urlparse(s).scheme:
+ s = "http://"+s
+ scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
+ path = urllib.quote(path, '/%')
+ qs = urllib.quote_plus(qs, ':&=')
+ return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = {
@@ -24,6 +52,7 @@
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
+
def human2bytes(s):
"""
@@ -53,6 +82,9 @@
...
ValueError: can't interpret '12 foo'
"""
+ if s == None:
+ return 0
+ s = s.replace(',', '')
init = s
num = ""
while s and s[0:1].isdigit() or s[0:1] == '.':
@@ -91,7 +123,6 @@
def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
- #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
def get_licence_id(licencename):
@@ -112,85 +143,106 @@
raise Exception(licencename + " not found");
return map[licencename];
+
docsdb = couch['disclosr-documents']
if __name__ == "__main__":
+ orgs_list = []
+ orgs_ids = {}
for doc in docsdb.view('app/datasets'):
+ print " --- "
print doc.id
- if doc.value['url'] != "http://data.gov.au/data/":
+
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+
+
# Collect the package metadata.
- pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); _
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ print pkg_name
+ #add to or create organization using direct API
+ agency = doc.value['metadata']["Agency"]
+ if agency == "APS":
+ agency = "Australian Public Service Commission"
+ if agency == "Shared Services, Treasury Directorate":
+ agency = "Shared Services Procurement, Treasury Directorate"
+ if agency == "Treasury - Shared Services":
+ agency = "Shared Services Procurement, Treasury Directorate"
+ if agency == "Territory and Municipal Services (TAMS)":
+ agency = "Territory and Municipal Services Directorate"
+ if agency == "State Library of NSW":
+ agency = "State Library of New South Wales"
+ org_name = name_munge(agency[:100])
+ if org_name not in orgs_list:
+ orgs_list = ckandirect.action.organization_list()['result']
+ #print orgs_list
+ if org_name not in orgs_list:
+ try:
+ print "org not found, creating " + org_name
+ ckandirect.action.organization_create(name=org_name, title=agency,
+ description=agency)
+ orgs_list.append(org_name)
+ except ckanapi.ValidationError, e:
+ print e
+ raise LoaderError('Unexpected status')
+ else:
+ print "org found, adding dataset to " + org_name
+
+ # cache org names -> id mapping
+ if org_name not in orgs_ids:
+ org = ckandirect.action.organization_show(id=org_name)
+ orgs_ids[org_name] = org["result"]["id"]
+ org_id = orgs_ids[org_name]
+ print "org id is "+org_id
tags = []
+ creator = doc.value['metadata']["DCTERMS.Creator"]
if doc.value['agencyID'] == "AGIMO":
if len(doc.value['metadata']["Keywords / Tags"]) > 0:
if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
tags = tags + doc.value['metadata']["Keywords / Tags"]
else:
tags = tags + [doc.value['metadata']["Keywords / Tags"]]
- if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
- if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
- tags = tags + doc.value['metadata']['data.gov.au Category']
- else:
- tags = tags + [doc.value['metadata']['data.gov.au Category']]
+
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
- print tags
+ #print tags
+ extras = []
+
+ for extra_key in doc.value['metadata'].keys():
+ if extra_key not in ["Description","Content-Language","DCTERMS.Description", "Keywords / Tags" ,"data.gov.au Category", "Download", "Permalink","DCTERMS.Identifier"]:
+ if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "":
+ extras.append([extra_key, doc.value['metadata'][extra_key]])
+
package_entity = {
'name': pkg_name,
'title': doc.value['metadata']['DCTERMS.Title'],
'url': doc.value['metadata']['DCTERMS.Source.URI'],
'tags': tags, #tags are mandatory?
- 'author': doc.value['metadata']["DCTERMS.Creator"],
- 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
+ 'author': creator,
+ 'maintainer': creator,
'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
'notes': html2text.html2text(doc.value['metadata']['Description']),
+ 'owner_org': org_id,
+ 'extras': extras
}
- if doc.value['agencyID'] == "qld":
- package_entity = doc.value['metadata']
+
try:
- print package_entity
+ #print package_entity
ckan.package_register_post(package_entity)
except CkanApiError, e:
- if ckan.last_status == 409:
+ if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
print "package already exists"
else:
+ print ckan.last_message
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args))
-
-
- #add to group
-
- group_name = name_munge(doc.value['metadata']["Agency"][:100])
- try:
- print ckan.group_entity_get(group_name)
-
- # Update the group details
- group_entity = ckan.last_message
- print "group exists"
- if 'packages' in group_entity.keys():
- group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
- else:
- group_entity['packages'] = [pkg_name]
- ckan.group_entity_put(group_entity)
- except CkanApiError, e:
- if ckan.last_status == 404:
- print "group does not exist, creating"
- group_entity = {
- 'name': group_name,
- 'title': doc.value['metadata']["Agency"],
- 'description': doc.value['metadata']["Agency"],
- 'packages': [pkg_name],
- # 'type': "organization" # not allowed via API, use database query
- # update "group" set type = 'organization';
- }
- print group_entity
- ckan.group_register_post(group_entity)
- else:
- raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
- ckan.last_status, pkg_name, e.args))
+ pkg = ckan.package_entity_get(pkg_name)
+
+
+ # add resources (downloadable data files)
if 'Download' in doc.value['metadata'].keys():
try:
- pkg = ckan.package_entity_get(pkg_name)
+
resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']:
@@ -202,12 +254,17 @@
format = 'xml'
if resource['format'] == '(CSV/XLS)':
format = 'csv'
+ if resource['format'] == '(Shapefile)':
+ format = 'shp'
+ if resource['format'] == '(KML/KMZ)':
+ format = 'kml'
name = resource['href']
if 'name' in resource.keys():
name = resource['name']
print resource
- ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
- format=format, size=human2bytes(resource['size'].replace(',', '')))
+ ckan.add_package_resource(pkg_name, url_fix(resource['href']), name=name, resource_type='data',
+ format=format,
+ size=human2bytes(resource.get('size','0B')))
else:
print "resources already exist"
except CkanApiError, e:
--- /dev/null
+++ b/documents/datagov-resourcereport.py
@@ -1,1 +1,79 @@
+import couchdb
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+import urllib
+import urlparse
+import httplib2
+import csv
+import ssl
+
+context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+context.verify_mode = ssl.CERT_NONE
+
+def url_fix(s, charset='utf-8'):
+ """Sometimes you get an URL by a user that just isn't a real
+ URL because it contains unsafe characters like ' ' and so on. This
+ function can fix some of the problems in a similar way browsers
+ handle data entered by the user:
+
+ :param charset: The target charset for the URL if the url was
+ given as unicode string.
+ """
+ if isinstance(s, unicode):
+ s = s.encode(charset, 'ignore')
+ if not urlparse.urlparse(s).scheme:
+ s = "http://"+s
+ scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
+ path = urllib.quote(path, '/%')
+ qs = urllib.quote_plus(qs, ':&=')
+ return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
+
+# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
+SYMBOLS = {
+ 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
+ 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
+ 'zetta', 'iotta'),
+ 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
+ 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
+ 'zebi', 'yobi'),
+}
+
+
+docsdb = couch['disclosr-documents']
+out = csv.writer(open("output.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
+if __name__ == "__main__":
+ for doc in docsdb.view('app/datasets'):
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+ # Collect the package metadata.
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ if 'Download' in doc.value['metadata'].keys() and len(doc.value['metadata']['Download']) > 0:
+ for resource in doc.value['metadata']['Download']:
+ # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
+ # (KML/KMZ) / (Shapefile) /(Other)
+ format = "plain"
+ if resource['format'] == '(XML)':
+ format = 'xml'
+ if resource['format'] == '(CSV/XLS)':
+ format = 'csv'
+ if resource['format'] == '(Shapefile)':
+ format = 'shp'
+ if resource['format'] == '(KML/KMZ)':
+ format = 'kml'
+ name = resource['href']
+ if 'name' in resource.keys():
+ name = resource['name']
+ if resource['href'].startswith("ftp"):
+ out.writerow([pkg_name, url_fix(resource['href']), name,format, "ftp", ""])
+ else:
+ try:
+ h = httplib2.Http(disable_ssl_certificate_validation=True)
+ resp = h.request(url_fix(resource['href']), 'HEAD')
+ content_type = resp[0]['content-type'] if 'content-type' in resp[0].keys() else ""
+ out.writerow([pkg_name, url_fix(resource['href']), name,format, resp[0]['status'], content_type])
+ except httplib2.ServerNotFoundError:
+ out.writerow([pkg_name, url_fix(resource['href']), name,format, "500","badurl"])
+ else:
+ out.writerow([pkg_name])
+
--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -39,7 +39,9 @@
link = item.find("a")
format = item.find(property="dc:format")
linkobj = {"href":link['href'].replace("/bye?","").strip(),
- "format": format.string.strip(), "size": format.next_sibling.string.strip()}
+ "format": format.string.strip()}
+ if format.next_sibling.string != None:
+ linkobj["size"] = format.next_sibling.string.strip()
if link.string != None:
linkobj["name"] = link.string.strip()
doc['metadata'][last_title].append(linkobj)
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -202,7 +202,7 @@
def getDate(self, content, entry, doc):
strdate = ''.join(content.stripped_strings).strip()
(a, b, c) = strdate.partition("(")
- strdate = self.remove_control_chars(a.replace("Octber", "October").replace("Janrurary", "January").replace("1012","2012"))
+ strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
print strdate
try:
edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,3 +1,5 @@
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd $DIR
echo "" > /tmp/disclosr-error
for f in scrapers/*.py; do
echo "Processing $f file..";
--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -42,7 +42,6 @@
'data': {'request': '', 'session': '', 'more': ''}
}
-
- amonpy.exception(data)
+ #amonpy.exception(data)
pass
--- a/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
+++ b/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
@@ -8,42 +8,14 @@
from datetime import *
#http://www.doughellmann.com/PyMOTW/abc/
-class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(class_ = "inner-column").table
- def getRows(self,table):
- return table.tbody.find_all('tr',recursive=False)
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
def getColumnCount(self):
- return 3
- def getColumns(self,columns):
- (date, title, description) = columns
- return (date, date, title, description, None)
- def getDate(self, content, entry, doc):
- i = 0
- date = ""
- for string in content.stripped_strings:
- if i ==1:
- date = string
- i = i+1
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
- print edate
- doc.update({'date': edate})
- return
- def getTitle(self, content, entry, doc):
- i = 0
- title = ""
- for string in content.stripped_strings:
- if i < 2:
- title = title + string
- i = i+1
- doc.update({'title': title})
- #print title
- return
+ return 0
if __name__ == '__main__':
#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2012-13.aspx
#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2011-12.aspx
- print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
- print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericHTMLDisclogScraper)
ScraperImplementation().doScrape()
--- a/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
+++ b/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
@@ -6,8 +6,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(id = "content_div_50269").table
def getColumns(self,columns):
(id, date, title, description, notes) = columns
return (id, date, title, description, notes)
--- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -21,7 +21,7 @@
d.make_links_absolute(base_url = self.getURL())
for table in d('table').items():
title= table('thead').text()
- print title
+ print self.remove_control_chars(title)
(idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
links = table('a').map(lambda i, e: pq(e).attr('href'))
description = descA+" "+descB
--- /dev/null
+++ b/documents/scrapers/b0ca7fddcd1c965787daea47f2d32e0a.py
@@ -1,1 +1,17 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from bs4 import BeautifulSoup
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getColumns(self,columns):
+ (id, date, title, description, notes) = columns
+ return (id, date, title, description, notes)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+