Experimental organization support
Former-commit-id: 8d69f7e95445481fa0fff75ee0def014614268c8
--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -3,6 +3,7 @@
from ckanclient import CkanApiError
import re
import html2text # aaronsw :(
+import ckanapi
class LoaderError(Exception):
@@ -10,10 +11,12 @@
# Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'aeab80a6-0a58-4026-96a4-c07e57f9c434'
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
- api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
+ api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = {
@@ -91,6 +94,7 @@
def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+ #[:100]
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
@@ -115,11 +119,12 @@
docsdb = couch['disclosr-documents']
if __name__ == "__main__":
+ orgs_list = []
for doc in docsdb.view('app/datasets'):
print doc.id
- if doc.value['url'] != "http://data.gov.au/data/":
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
# Collect the package metadata.
- pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); _
+ pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','')[:100];
tags = []
if doc.value['agencyID'] == "AGIMO":
if len(doc.value['metadata']["Keywords / Tags"]) > 0:
@@ -133,7 +138,7 @@
else:
tags = tags + [doc.value['metadata']['data.gov.au Category']]
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
- print tags
+ #print tags
package_entity = {
'name': pkg_name,
'title': doc.value['metadata']['DCTERMS.Title'],
@@ -148,49 +153,88 @@
package_entity = doc.value['metadata']
try:
- print package_entity
+ #print package_entity
ckan.package_register_post(package_entity)
except CkanApiError, e:
- if ckan.last_status == 409:
+ if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
print "package already exists"
else:
+ print ckan.last_message
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args))
-
+ pkg = ckan.package_entity_get(pkg_name)
#add to group
- group_name = name_munge(doc.value['metadata']["Agency"][:100])
+ # group_name = name_munge(doc.value['metadata']["Agency"][:100])
+ # try:
+ # print ckan.group_entity_get(group_name)
+ #
+ # # Update the group details
+ # group_entity = ckan.last_message
+ # print "group exists"
+ # if 'packages' in group_entity.keys():
+ # group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
+ # else:
+ # group_entity['packages'] = [pkg_name]
+ # ckan.group_entity_put(group_entity)
+ # except CkanApiError, e:
+ # if ckan.last_status == 404:
+ # print "group does not exist, creating"
+ # group_entity = {
+ # 'name': group_name,
+ # 'title': doc.value['metadata']["Agency"],
+ # 'description': doc.value['metadata']["Agency"],
+ # 'packages': [pkg_name],
+ # # 'type': "organization" # not allowed via API, use database query
+ # # update "group" set type = 'organization';
+ # }
+ # print group_entity
+ # ckan.group_register_post(group_entity)
+ # elif ckan.last_status == 409:
+ # print "group already exists"
+ # else:
+ # raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+ # ckan.last_status, pkg_name, e.args))
+
+ #add to or create organization using direct API
+ org_name = name_munge(doc.value['metadata']["Agency"][:100])
+ if org_name not in orgs_list:
+ orgs_list = ckandirect.action.organization_list()['result']
+ print orgs_list
+ if org_name not in orgs_list:
+ try:
+ print "org not found, creating "+org_name
+ ckandirect.action.organization_create(name = org_name, title= doc.value['metadata']["Agency"],
+ description= doc.value['metadata']["Agency"])
+ orgs_list.append(org_name)
+ except ckanapi.ValidationError, e:
+ print e
+ raise LoaderError('Unexpected status')
+
try:
- print ckan.group_entity_get(group_name)
-
- # Update the group details
- group_entity = ckan.last_message
- print "group exists"
- if 'packages' in group_entity.keys():
- group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
- else:
- group_entity['packages'] = [pkg_name]
- ckan.group_entity_put(group_entity)
- except CkanApiError, e:
- if ckan.last_status == 404:
- print "group does not exist, creating"
- group_entity = {
- 'name': group_name,
- 'title': doc.value['metadata']["Agency"],
- 'description': doc.value['metadata']["Agency"],
- 'packages': [pkg_name],
- # 'type': "organization" # not allowed via API, use database query
- # update "group" set type = 'organization';
- }
- print group_entity
- ckan.group_register_post(group_entity)
- else:
- raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
- ckan.last_status, pkg_name, e.args))
+ print "linking package to "+org_name
+
+ #brute force
+ org = ckandirect.action.organization_show(id=org_name)
+ #print org["result"]["packages"]
+ #print pkg
+ org["result"]["packages"].append({"id":pkg['id'], "name": pkg_name})
+ #print org["result"]
+ ckandirect.call_action(action="organization_update", data_dict=org["result"])
+
+ # if only this worked
+ #print ckandirect.action.package_owner_org_update(id=pkg['id'],organization_id=org["result"]["id"])
+
+ except ckanapi.ValidationError, e:
+ print e
+ raise LoaderError('Unexpected status')
+ #except ckanapi.CKANAPIError, e:
+ # print "lol"
+
if 'Download' in doc.value['metadata'].keys():
try:
- pkg = ckan.package_entity_get(pkg_name)
+
resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']:
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -202,7 +202,7 @@
def getDate(self, content, entry, doc):
strdate = ''.join(content.stripped_strings).strip()
(a, b, c) = strdate.partition("(")
- strdate = self.remove_control_chars(a.replace("Octber", "October").replace("Janrurary", "January").replace("1012","2012"))
+ strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
print strdate
try:
edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,3 +1,5 @@
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd $DIR
echo "" > /tmp/disclosr-error
for f in scrapers/*.py; do
echo "Processing $f file..";
--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -42,7 +42,6 @@
'data': {'request': '', 'session': '', 'more': ''}
}
-
- amonpy.exception(data)
+ #amonpy.exception(data)
pass
--- a/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
+++ b/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
@@ -8,42 +8,14 @@
from datetime import *
#http://www.doughellmann.com/PyMOTW/abc/
-class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(class_ = "inner-column").table
- def getRows(self,table):
- return table.tbody.find_all('tr',recursive=False)
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
def getColumnCount(self):
- return 3
- def getColumns(self,columns):
- (date, title, description) = columns
- return (date, date, title, description, None)
- def getDate(self, content, entry, doc):
- i = 0
- date = ""
- for string in content.stripped_strings:
- if i ==1:
- date = string
- i = i+1
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
- print edate
- doc.update({'date': edate})
- return
- def getTitle(self, content, entry, doc):
- i = 0
- title = ""
- for string in content.stripped_strings:
- if i < 2:
- title = title + string
- i = i+1
- doc.update({'title': title})
- #print title
- return
+ return 0
if __name__ == '__main__':
#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2012-13.aspx
#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2011-12.aspx
- print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
- print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericHTMLDisclogScraper)
ScraperImplementation().doScrape()
--- a/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
+++ b/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
@@ -6,8 +6,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(id = "content_div_50269").table
def getColumns(self,columns):
(id, date, title, description, notes) = columns
return (id, date, title, description, notes)
--- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -21,7 +21,7 @@
d.make_links_absolute(base_url = self.getURL())
for table in d('table').items():
title= table('thead').text()
- print title
+ print self.remove_control_chars(title)
(idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
links = table('a').map(lambda i, e: pq(e).attr('href'))
description = descA+" "+descB
--- /dev/null
+++ b/documents/scrapers/b0ca7fddcd1c965787daea47f2d32e0a.py
@@ -1,1 +1,17 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from bs4 import BeautifulSoup
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getColumns(self,columns):
+ (id, date, title, description, notes) = columns
+ return (id, date, title, description, notes)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+