enforce valid package url
Former-commit-id: 26accf46eff2958223ffae9a1234d4291379bce4
--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -3,6 +3,7 @@
from ckanclient import CkanApiError
import re
import html2text # aaronsw :(
+import ckanapi
class LoaderError(Exception):
@@ -10,10 +11,12 @@
# Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'aeab80a6-0a58-4026-96a4-c07e57f9c434'
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
- api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
+ api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = {
@@ -88,9 +91,11 @@
name = re.sub('__', '_', name).lower()
return name
-
+#todo "{'name': [u'Url must be purely lowercase alphanumeric (ascii) characters and these symbols: -_']}"
+# http://data.gov.au/dataset/australian-domestic-regional-and-international-airline-activity-%E2%80%93-time-series/
def name_munge(input_name):
- return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+ return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+ #[:100]
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
@@ -115,11 +120,12 @@
docsdb = couch['disclosr-documents']
if __name__ == "__main__":
+ orgs_list = []
for doc in docsdb.view('app/datasets'):
print doc.id
- if doc.value['url'] != "http://data.gov.au/data/":
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
# Collect the package metadata.
- pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); _
+ pkg_name = filter( lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','')[:100]);
tags = []
if doc.value['agencyID'] == "AGIMO":
if len(doc.value['metadata']["Keywords / Tags"]) > 0:
@@ -133,7 +139,7 @@
else:
tags = tags + [doc.value['metadata']['data.gov.au Category']]
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
- print tags
+ #print tags
package_entity = {
'name': pkg_name,
'title': doc.value['metadata']['DCTERMS.Title'],
@@ -148,15 +154,16 @@
package_entity = doc.value['metadata']
try:
- print package_entity
+ #print package_entity
ckan.package_register_post(package_entity)
except CkanApiError, e:
- if ckan.last_status == 409:
+ if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
print "package already exists"
else:
+ print ckan.last_message
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args))
-
+ pkg = ckan.package_entity_get(pkg_name)
#add to group
@@ -185,12 +192,52 @@
}
print group_entity
ckan.group_register_post(group_entity)
+ elif ckan.last_status == 409:
+ print "group already exists"
else:
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args))
+
+ # #add to or create organization using direct API
+ # org_name = name_munge(doc.value['metadata']["Agency"][:100])
+ # if org_name not in orgs_list:
+ # orgs_list = ckandirect.action.organization_list()['result']
+ # print orgs_list
+ # if org_name not in orgs_list:
+ # try:
+ # print "org not found, creating "+org_name
+ # ckandirect.action.organization_create(name = org_name, title= doc.value['metadata']["Agency"],
+ # description= doc.value['metadata']["Agency"])
+ # orgs_list.append(org_name)
+ # except ckanapi.ValidationError, e:
+ # print e
+ # raise LoaderError('Unexpected status')
+ #
+ # try:
+ # print "linking package to "+org_name
+ #
+ # #brute force
+ # org = ckandirect.action.organization_show(id=org_name)
+ # #print org["result"]["packages"]
+ # #print pkg
+ # org["result"]["packages"].append({"id":pkg['id'], "name": pkg_name})
+ # #print org["result"]
+ # ckandirect.call_action(action="organization_update", data_dict=org["result"])
+ #
+ # # if only this worked
+ # print ckandirect.action.package_owner_org_update(id=pkg['id'],organization_id=org["result"]["id"])
+ #
+ # except ckanapi.ValidationError, e:
+ # print e
+ # raise LoaderError('Unexpected status')
+ # except ckanapi.CKANAPIError, e:
+ # print "lol"
+
+
+
if 'Download' in doc.value['metadata'].keys():
try:
- pkg = ckan.package_entity_get(pkg_name)
+
resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']:
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -202,7 +202,7 @@
def getDate(self, content, entry, doc):
strdate = ''.join(content.stripped_strings).strip()
(a, b, c) = strdate.partition("(")
- strdate = self.remove_control_chars(a.replace("Octber", "October").replace("Janrurary", "January").replace("1012","2012"))
+ strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
print strdate
try:
edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -42,7 +42,6 @@
'data': {'request': '', 'session': '', 'more': ''}
}
-
- amonpy.exception(data)
+ #amonpy.exception(data)
pass
--- a/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
+++ b/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
@@ -6,8 +6,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(id = "content_div_50269").table
def getColumns(self,columns):
(id, date, title, description, notes) = columns
return (id, date, title, description, notes)
--- /dev/null
+++ b/documents/scrapers/b0ca7fddcd1c965787daea47f2d32e0a.py
@@ -1,1 +1,17 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from bs4 import BeautifulSoup
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getColumns(self,columns):
+ (id, date, title, description, notes) = columns
+ return (id, date, title, description, notes)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+