load datasets into scrapr then into ckan filestore
load datasets into scrapr then into ckan filestore


Former-commit-id: ef39f297007c1ad1e7edee2c2819723b076ae3f4

import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
   
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
# Instantiate the CKAN client. # Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a' api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
api_key=api_key) api_key=api_key)
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
#couch = couchdb.Server('http://192.168.1.113:5984/') #couch = couchdb.Server('http://192.168.1.113:5984/')
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
   
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
   
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
groups = {} groups = {}
for doc in docsdb.view('app/datasetGroups'): for doc in docsdb.view('app/datasetGroups'):
group_name = doc.key group_name = doc.key
if group_name != "Not specified": if group_name != "Not specified":
pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_', pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]); doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
if group_name in groups.keys(): if group_name in groups.keys():
groups[group_name] = list(set(groups[group_name] + [pkg_name])) groups[group_name] = list(set(groups[group_name] + [pkg_name]))
else: else:
groups[group_name] = [pkg_name] groups[group_name] = [pkg_name]
   
# add dataset to group(s) # add dataset to group(s)
for group_name in groups.keys(): for group_name in groups.keys():
if group_name != "Not specified": if group_name != "Not specified":
group_url = name_munge(group_name[:100]) group_url = name_munge(group_name[:100])
print group_name print group_name
print groups[group_name] print groups[group_name]
try: try:
# Update the group details # Update the group details
group_entity = ckan.group_entity_get(group_url) group_entity = ckan.group_entity_get(group_url)
print "group "+group_name+" exists" print "group "+group_name+" exists"
if 'packages' in group_entity.keys(): if 'packages' in group_entity.keys():
group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name])) group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
else: else:
group_entity['packages'] = groups[group_name] group_entity['packages'] = groups[group_name]
ckan.group_entity_put(group_entity) ckan.group_entity_put(group_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 404: if ckan.last_status == 404:
print "group "+group_name+" does not exist, creating" print "group "+group_name+" does not exist, creating"
group_entity = { group_entity = {
'name': group_url, 'name': group_url,
'title': group_name, 'title': group_name,
'description': group_name, 'description': group_name,
'packages': groups[group_name] 'packages': groups[group_name]
} }
#print group_entity #print group_entity
ckan.group_register_post(group_entity) ckan.group_register_post(group_entity)
elif ckan.last_status == 409: elif ckan.last_status == 409:
print "group already exists" print "group already exists"
else: else:
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
  # coding=utf-8
import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
import html2text # aaronsw :( import html2text # aaronsw :(
import ckanapi # https://github.com/open-data/ckanapi import ckanapi # https://github.com/open-data/ckanapi
  import scrape
  import datetime, os, hashlib
   
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
   
  def add_package_resource_cachedurl(ckan, package_name, url, name, format, size, **kwargs):
  # fileupload
  ts = datetime.datetime.isoformat(datetime.datetime.now()).replace(':', '').split('.')[0]
   
  file_key = os.path.join(ts, name)
   
  auth_dict = ckan.storage_auth_get('/form/' + file_key, {})
   
  fields = [(kv['name'].encode('ascii'), kv['value'].encode('ascii'))
  for kv in auth_dict['fields']]
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  url, "dataset_resource", "AGIMO", False)
   
  files = [('file', os.path.basename(file_key), content)]
   
  errcode, body = ckan._post_multipart(auth_dict['action'].encode('ascii'), fields, files)
   
  if errcode == 200:
  file_metadata = ckan.storage_metadata_get(file_key)
  (url, msg) = file_metadata['_location'], ''
  else:
  (url, msg) = '', body
  # fileupload done
   
  if url == '':
  raise CkanApiError(msg)
  m = hashlib.sha1(msg)
  #todo mime-type dectection based on content
  r = dict(name=name,
  mimetype=mime_type,
  hash=m.hexdigest(), size=size, url=url)
   
  r.update(kwargs)
  if not r.has_key('name'): r['name'] = url
   
  p = ckan.package_entity_get(package_name)
  p['resources'].append(r)
  return ckan.package_entity_put(p)
   
   
# Instantiate the CKAN client. # Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a' server = 'data.disclosurelo.gs'
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',  
  ckan = ckanclient.CkanClient(base_location='http://' + server + '/api',
api_key=api_key) api_key=api_key)
ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key) ckandirect = ckanapi.RemoteCKAN('http://' + server, api_key=api_key)
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
#couch = couchdb.Server('http://192.168.1.113:5984/') #couch = couchdb.Server('http://192.168.1.113:5984/')
   
  import urllib
  import urlparse
   
   
  def url_fix(s, charset='utf-8'):
  """Sometimes you get an URL by a user that just isn't a real
  URL because it contains unsafe characters like ' ' and so on. This
  function can fix some of the problems in a similar way browsers
  handle data entered by the user:
   
  >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
  'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
   
  :param charset: The target charset for the URL if the url was
  given as unicode string.
  """
  if isinstance(s, unicode):
  s = s.encode(charset, 'ignore')
  if not urlparse.urlparse(s).scheme:
  s = "http://" + s
  scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
  path = urllib.quote(path, '/%')
  qs = urllib.quote_plus(qs, ':&=')
  return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
   
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = { SYMBOLS = {
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'), 'zetta', 'iotta'),
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'), 'zebi', 'yobi'),
} }
   
   
def human2bytes(s): def human2bytes(s):
""" """
Attempts to guess the string format based on default symbols Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer. set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised. When unable to recognize the format ValueError is raised.
   
>>> human2bytes('0 B') >>> human2bytes('0 B')
0 0
>>> human2bytes('1 K') >>> human2bytes('1 K')
1024 1024
>>> human2bytes('1 M') >>> human2bytes('1 M')
1048576 1048576
>>> human2bytes('1 Gi') >>> human2bytes('1 Gi')
1073741824 1073741824
>>> human2bytes('1 tera') >>> human2bytes('1 tera')
1099511627776 1099511627776
   
>>> human2bytes('0.5kilo') >>> human2bytes('0.5kilo')
512 512
>>> human2bytes('0.1 byte') >>> human2bytes('0.1 byte')
0 0
>>> human2bytes('1 k') # k is an alias for K >>> human2bytes('1 k') # k is an alias for K
1024 1024
>>> human2bytes('12 foo') >>> human2bytes('12 foo')
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: can't interpret '12 foo' ValueError: can't interpret '12 foo'
""" """
  if s == None:
  return 0
  s = s.replace(',', '')
init = s init = s
num = "" num = ""
while s and s[0:1].isdigit() or s[0:1] == '.': while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0] num += s[0]
s = s[1:] s = s[1:]
num = float(num) num = float(num)
letter = s.strip() letter = s.strip()
for name, sset in SYMBOLS.items(): for name, sset in SYMBOLS.items():
if letter in sset: if letter in sset:
break break
else: else:
if letter == 'k': if letter == 'k':
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
sset = SYMBOLS['customary'] sset = SYMBOLS['customary']
letter = letter.upper() letter = letter.upper()
else: else:
raise ValueError("can't interpret %r" % init) raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1} prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]): for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10 prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) return int(num * prefix[letter])
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
   
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
   
   
def get_licence_id(licencename): def get_licence_id(licencename):
map = { map = {
"Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
"CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
'Otherpleasespecify': 'notspecified', 'Otherpleasespecify': 'notspecified',
'': 'notspecified', '': 'notspecified',
"Publicly available data": 'notspecified', "Publicly available data": 'notspecified',
"CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
"CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
"Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
'CreativeCommonsAttributionCCBY25': 'cc-by', 'CreativeCommonsAttributionCCBY25': 'cc-by',
"PublicDomain": 'other-pd', "PublicDomain": 'other-pd',
} }
if licencename not in map.keys(): if licencename not in map.keys():
raise Exception(licencename + " not found"); raise Exception(licencename + " not found");
return map[licencename]; return map[licencename];
   
   
  gooddata = ["afl-in-victoria", "annual-budget-initiatives-by-suburb-brisbane-city-council"]
  #athletics-in-victoria-gfyl,bicycle-racks-mosman-municipal-council,boat-ramps-brisbane-city-council,brisbane-access-ratings-database,bus-stops-brisbane-city-council,cemeteries-brisbane-city-council,cfa-locations,citycycle-stations-brisbane-city-council,community-gardens-brisbane-city-council,community-halls-brisbane-city-council,cooking-classes-gfyl,court-locations-victoria,customer-service-centres-brisbane-city-council,dance-in-victoria-gfyl,disability-activity-gfyl,dog-parks-brisbane-city-council,ferry-terminals-brisbane-city-council,fishing-club-in-victoria-gfyl,fitness-centres-in-victoria-gfyl,gardens-reserves-gfyl,golf-courses-brisbane-city-council,gymnastics-in-victoria-gfyl,historic-cemeteries-brisbane-city-council,ice-skating-centres-gfyl,immunisation-clinics-brisbane-city-council,libraries-brisbane-city-council,licenced-venues-victoria,lifesaving-locations-victoria,loading-zones-brisbane-city-council,major-projects-victoria,markets-in-victoria,martial-arts-in-victoria-gfyl,melbourne-water-use-by-postcode,members-of-parliament-both-houses-nsw,members-of-the-legislative-assembly-nsw,members-of-the-legislative-council-nsw,mfb-locations-vic,ministers-of-the-nsw-parliament,mosman-local-government-area,mosman-rider-route,mosman-wwii-honour-roll,neighbourhood-houses-gfyl,news-feeds-mosman-municipal-council,off-street-car-parks-mosman-municipal-council,orienteering-clubs-gfyl,parking-meter-areas-brisbane-city-council,parks-and-reserves-mosman-municipal-council,parks-brisbane-city-council,personal-training-gfyl,picnic-areas-brisbane-city-council,playgrounds-brisbane-city-council,playgrounds-mosman-municipal-council,police-region-crime-statistics-victoria,police-service-area-crime-statistics-victoria,pony-clubs-in-victoria-gfyl,prison-locations-victoria,public-amenities-maintained-by-mosman-council,public-art-brisbane-city-council,public-internet-locations-vic,public-toilets-brisbane-city-council,racecourse-locations-victoria,recent-development-applications-mosman-municipal-council,recreation-groups-gfyl,recreational-fishing-spots,regional-business-centres-brisbane-city-council,reports-of-swooping-birds-mosman-municipal-council,restricted-parking-areas-brisbane-city-council,rollerskating-centres-in-victoria-gfyl,sailing-clubs-gfyl,school-locations-victoria,shadow-ministers-of-the-nsw-parliament,skate-parks-gfyl,sporting-clubs-and-organisations-gfyl,stakeboard-parks-brisbane-city-council,state-bodies-gfyl,street-names-brisbane-city-council,suburbs-and-adjoining-suburbs-brisbane-city-council,swimming-pools-brisbane-city-council,swimming-pools-gfyl,tennis-courts-brisbane-city-council,top-40-book-club-reads-brisbane-city-council,tracks-and-trails-gfyl,triathlon-clubs-gfyl,urban-water-restrictions-victoria,veterinary-services-in-mosman,victorian-microbreweries,volunteering-centres-services-and-groups-victoria,walking-groups-gfyl,ward-offices-brisbane-city-council,waste-collection-days-brisbane-city-council,waste-transfer-stations-brisbane-city-council,water-consumption-in-melbourne,water-sports-in-victoria-gfyl,wifi-hot-spots-brisbane-city-council,yoga-pilates-and-tai-chi-in-victoria-gfyl,2809cycling-in-new-south-wales-what-the-data-tells-us2809-and-related-data,act-barbecue-bbq-locations,act-tafe-locations,ausindustry-locations,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,australian-gas-light-company-maps,australian-gas-light-company-maps,australian-ports,australian-public-service-statistical-bulletin-2011-12,australian-public-service-statistical-bulletin-snapshot-at-december-31-2011,australian-public-service-statistical-bulletin-tables-0910,austrics-timetable-set,capital-works-call-tender-schedule,collection-item-usage-state-library-of-victoria,country-and-commodity-trade-data-spreadsheet,country-and-commodity-trade-data-spreadsheet-2,country-by-level-of-processing-trade-data-spreadsheet,crime-incident-type-and-frequency-by-capital-city-and-nationally,csiro-locations,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,department-of-finance-and-deregulation-office-locations,digitised-maps,diisr-division-locations-excluding-ausindustry-enterprise-connect-and-nmi,diisr-locations,diisr-portfolio-agency-locations-excluding-csiro,distance-to-legal-service-providers-from-disadvantaged-suburbs,enterprise-connect-locations,fire-insurance-maps-sydney-block-plans-1919-1940,fire-insurance-maps-sydney-block-plans-1919-1940,first-fleet-collection,first-fleet-collection,first-fleet-maps,first-fleet-maps,freedom-of-information-annual-estimated-costs-and-staff-time-statistical-data-2011-12,freedom-of-information-quarterly-request-and-review-statistical-data-2011-12,freedom-of-information-requests-estimated-costs-and-charges-collected-1982-83-to-2011-12,higher-education-course-completions,higher-education-enrolments,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,journey-planner-data-nt,library-catalogue-search-terms-state-library-of-victoria,location-of-act-schools,location-of-centrelink-offices,location-of-european-wasps-nests,location-of-lawyers-and-legal-service-providers-by-town,location-of-legal-assistance-service-providers,location-of-medicare-offices,location-of-medicare-offices,maps-of-the-southern-hemisphere-16th-18th-centuries,maps-of-the-southern-hemisphere-16th-18th-centuries,music-queensland,national-measurement-institute-locations,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,photographs-of-nsw-life-pre-1955,photographs-of-nsw-life-pre-1955,photographs-of-sydney-before-1885,photographs-of-sydney-before-1885,picture-queensland,plgr-28093-playgrounds-act,police-station-locations,queensland-public-libraries,rare-printed-books,rare-printed-books,real-estate-maps,regional-australia-funding-projects,sa-memory-state-library-of-south-australia,search-engine-terms-state-library-of-victoria,south-australian-photographs-state-library-of-south-australia,south-australian-sheet-music-state-library-of-south-australia,sydney-bond-store-maps-1894,sydney-bond-store-maps-1894,sydney-maps-1917,sydney-maps-1917,tafe-institute-locations-victoria,tafe-sa-campus-locations,tolt-public-toilets-act,victorian-public-library-branches-state-library-of-victoria,western-australia-public-library-network,world-war-one-photographs-by-frank-hurley,world-war-one-photographs-by-frank-hurley,citycat-timetables-brisbane-city-council,cityferry-timetables-brisbane-city-council,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,downstream-cost-calculator-model-and-data-for-199697-or-2001-prices,economics-of-australian-soil-conditions-199697-limiting-factor-or-relative-yield-min-of-ry_salt2000-,geographical-names-register-gnr-of-nsw,victorian-dryland-salinity-assessment-2000-d01cac_ramsar_final-xls,victorian-dryland-salinity-assessment-2000-d02cac_fauna_final-xls,victorian-dryland-salinity-assessment-2000-d03cac_fauna_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc04cac_hydrol_final-xls,victorian-dryland-salinity-assessment-2000-dc05cac_wetland_final-xls,victorian-dryland-salinity-assessment-2000-dc06cac_util_final-xls,victorian-dryland-salinity-assessment-2000-dc07cac_road_final-xls,victorian-dryland-salinity-assessment-2000-dc08cac_towns_final-xls,victorian-dryland-salinity-assessment-2000-dc09cac_flora_final-xls,victorian-dryland-salinity-assessment-2000-dc10cac_flora_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc12cac_infrastructure-xls,victorian-dryland-salinity-assessment-2000-dc13cac_natural_envt-xls,victorian-dryland-salinity-assessment-2000-dc14cac_agriculture-xls,victorian-dryland-salinity-assessment-2000-dc16cac_agric_cost-xls,victorian-dryland-salinity-assessment-2000-dc17cac_shallow_wt-xls,victorian-dryland-salinity-assessment-2000-dc18cac_agric_cost_time-xls,victorian-dryland-salinity-assessment-2000-dc21cac_water_resources_new-xls,victorian-dryland-salinity-assessment-2000-dc22cac_risk-xls,licensed-broadcasting-transmitter-data,nsw-crime-data,recorded-crime-dataset-nsw,crime-statistics-in-nsw-by-month,2001-02-to-2007-08-local-government-survey-victoria,2009-green-light-report,annual-statistical-reports-fire-brigades-nsw-200304,annual-statistical-reports-fire-brigades-nsw-200405,annual-statistical-reports-fire-brigades-nsw-200506,annual-statistical-reports-fire-brigades-nsw-200607,arts-on-the-map,assets-and-liabilities-of-australian-located-operations,assets-of-australian-located-operations,assets-of-australian-located-operations-by-country,assets-of-financial-institutions,back-issues-of-monthly-banking-statistics,banks-assets,banks-consolidated-group-capital,banks-consolidated-group-impaired-assets,banks-consolidated-group-off-balance-sheet-business,banks-liabilities,building-societies-selected-assets-and-liabilities,byteback2842-locations-vic,cash-management-trusts,city-of-melbourne-street-furniture-database,community-services-nsw,consolidated-exposures-immediate-and-ultimate-risk-basis,consolidated-exposures-immediate-risk-basis-foreign-claims-by-country,consolidated-exposures-immediate-risk-basis-international-claims-by-country,consolidated-exposures-ultimate-risk-basis,consolidated-exposures-ultimate-risk-basis-foreign-claims-by-country,cosolidated-exposures-immediate-risk-basis,credit-unions-selected-assets-and-liabilities,daily-net-foreign-exchange-transactions,detox-your-home,education-national-assessment-program-literacy-and-numeracy-nsw,employment-data-by-nsw-regions,excise-beer-clearance-data-updated-each-month-beer-clearance-summary-data,finance-companies-and-general-financiers-selected-assets-and-liabilities,foreign-exchange-transactions-and-holdings-of-official-reserve-assets,half-yearly-life-insurance-bulletin-december-2010,health-behaviours-in-nsw,international-liabilities-by-country-of-the-australian-located-operations-of-banks-and-rfcs,liabilities-and-assets-monthly,liabilities-and-assets-weekly,liabilities-of-australian-located-operations,life-insurance-offices-statutory-funds,managed-funds,monetary-policy-changes,money-market-corporations-selected-assets-and-liabilities,monthly-airport-traffic-data-for-top-ten-airports-january-1985-to-december-2008,monthly-banking-statistics-april-2011,monthly-banking-statistics-june-2011,monthly-banking-statistics-may-2011,open-market-operations-2009-to-current,projected-households-vic-rvic-msd-2006-2056,projected-population-by-age-and-sex-vic-rvic-msd-2006-2056,public-unit-trust,quarterly-bank-performance-statistics,quarterly-general-insurance-performance-statistics-march-2011,quarterly-superannuation-performance-march-2011,recorded-crime-dataset-nsw,residential-land-bulletin,resourcesmart-retailers,resourcesmart-retailers-vic,road-fatalities-nsw,securitisation-vehicles,selected-asset-and-liabilities-of-the-private-non-financial-sectors,seperannuation-funds-outside-life-offices,solar-report-vic,towns-in-time-victoria,vif2008-projected-population-by-5-year-age-groups-and-sex-sla-lga-ssd-sd-2006-2026,vif2008-projected-population-totals-and-components-vic-rvic-msd-2006-2056,vif2008-projected-population-totals-sla-lga-ssd-sd-2006-2026,arts-festivals-victoria,arts-organisations-victoria,arts-spaces-and-places-victoria,ausgrid-average-electricity-use,collecting-institutions-victoria,indigenous-arts-organisations-victoria,latest-coastal-weather-observations-for-coolangatta-qld,top-10-fiction-books-brisbane-city-council];
   
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
orgs_list = [] orgs_list = []
orgs_ids = {} orgs_ids = {}
for doc in docsdb.view('app/datasets'): for doc in docsdb.view('app/datasets'):
print " --- " print " --- "
print doc.id print doc.id
   
if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
   
   
# Collect the package metadata. # Collect the package metadata.
pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_', pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]); doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
print pkg_name print pkg_name
#add to or create organization using direct API if pkg_name in gooddata:
agency = doc.value['metadata']["Agency"]  
if agency == "APS": #add to or create organization using direct API
agency = "Australian Public Service Commission" agency = doc.value['metadata']["Agency"]
if agency == "Shared Services, Treasury Directorate": if agency == "APS":
agency = "Shared Services Procurement, Treasury Directorate" agency = "Australian Public Service Commission"
if agency == "Treasury - Shared Services": if agency == "Shared Services, Treasury Directorate":
agency = "Shared Services Procurement, Treasury Directorate" agency = "Shared Services Procurement, Treasury Directorate"
if agency == "Territory and Municipal Services (TAMS)": if agency == "Treasury - Shared Services":
agency = "Territory and Municipal Services Directorate" agency = "Shared Services Procurement, Treasury Directorate"
if agency == "State Library of NSW": if agency == "Territory and Municipal Services (TAMS)":
agency = "State Library of New South Wales" agency = "Territory and Municipal Services Directorate"
org_name = name_munge(agency[:100]) if agency == "State Library of NSW":
if org_name not in orgs_list: agency = "State Library of New South Wales"
orgs_list = ckandirect.action.organization_list()['result'] org_name = name_munge(agency[:100])
#print orgs_list  
if org_name not in orgs_list: if org_name not in orgs_list:
try: orgs_list = ckandirect.action.organization_list()['result']
print "org not found, creating " + org_name #print orgs_list
ckandirect.action.organization_create(name=org_name, title=agency, if org_name not in orgs_list:
description=agency) try:
orgs_list.append(org_name) print "org not found, creating " + org_name
except ckanapi.ValidationError, e: ckandirect.action.organization_create(name=org_name, title=agency,
print e description=agency)
raise LoaderError('Unexpected status') orgs_list.append(org_name)
else: except ckanapi.ValidationError, e:
print "org found, adding dataset to " + org_name print e
  raise LoaderError('Unexpected status')
# cache org names -> id mapping  
if org_name not in orgs_ids:  
org = ckandirect.action.organization_show(id=org_name)  
orgs_ids[org_name] = org["result"]["id"]  
org_id = orgs_ids[org_name]  
print "org id is "+org_id  
tags = []  
creator = doc.value['metadata']["DCTERMS.Creator"]  
if doc.value['agencyID'] == "AGIMO":  
if len(doc.value['metadata']["Keywords / Tags"]) > 0:  
if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):  
tags = tags + doc.value['metadata']["Keywords / Tags"]  
else: else:
tags = tags + [doc.value['metadata']["Keywords / Tags"]] print "org found, adding dataset to " + org_name
   
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] # cache org names -> id mapping
#print tags if org_name not in orgs_ids:
package_entity = { org = ckandirect.action.organization_show(id=org_name)
'name': pkg_name, orgs_ids[org_name] = org["result"]["id"]
'title': doc.value['metadata']['DCTERMS.Title'], org_id = orgs_ids[org_name]
'url': doc.value['metadata']['DCTERMS.Source.URI'], print "org id is " + org_id
'tags': tags, #tags are mandatory? tags = []
'author': creator, creator = doc.value['metadata']["DCTERMS.Creator"]
'maintainer': creator, if doc.value['agencyID'] == "AGIMO":
'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), if len(doc.value['metadata']["Keywords / Tags"]) > 0:
'notes': html2text.html2text(doc.value['metadata']['Description']), if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
'owner_org': org_id tags = tags + doc.value['metadata']["Keywords / Tags"]
#todo add missing key values like jurasdiction else:
} tags = tags + [doc.value['metadata']["Keywords / Tags"]]
   
  tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
try: #print tags
#print package_entity extras = []
ckan.package_register_post(package_entity)  
except CkanApiError, e: for extra_key in doc.value['metadata'].keys():
if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}": if extra_key not in ["Description", "Content-Language", "DCTERMS.Description",
print "package already exists" "Keywords / Tags",
else: "data.gov.au Category", "Download", "Permalink", "DCTERMS.Identifier"]:
print ckan.last_message if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "":
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( extras.append([extra_key, doc.value['metadata'][extra_key]])
ckan.last_status, pkg_name, e.args))  
pkg = ckan.package_entity_get(pkg_name) package_entity = {
  'name': pkg_name,
  'title': doc.value['metadata']['DCTERMS.Title'],
# add resources (downloadable data files) 'url': doc.value['metadata']['DCTERMS.Source.URI'],
if 'Download' in doc.value['metadata'].keys(): 'tags': tags, #tags are mandatory?
  'author': creator,
  'maintainer': creator,
  'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
  'notes': html2text.html2text(doc.value['metadata']['Description']),
  'owner_org': org_id,
  'extras': extras
  }
   
try: try:
  #print package_entity
resources = pkg.get('resources', []) ckan.package_register_post(package_entity)
if len(resources) < len(doc.value['metadata']['Download']): except CkanApiError, e:
for resource in doc.value['metadata']['Download']: if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
  print "package already exists"
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html  
# (KML/KMZ) / (Shapefile) /(Other)  
format = "plain"  
if resource['format'] == '(XML)':  
format = 'xml'  
if resource['format'] == '(CSV/XLS)':  
format = 'csv'  
name = resource['href']  
if 'name' in resource.keys():  
name = resource['name']  
print resource  
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',  
format=format,  
size=human2bytes(resource['size'].replace(',', '')))  
else: else:
print "resources already exist" print ckan.last_message
except CkanApiError, e:  
if ckan.last_status == 404:  
print "parent dataset does not exist"  
else:  
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
  pkg = ckan.package_entity_get(pkg_name)
   
   
  # add resources (downloadable data files)
  if 'Download' in doc.value['metadata'].keys():
  try:
   
  resources = pkg.get('resources', [])
  if len(resources) < len(doc.value['metadata']['Download']):
  for resource in doc.value['metadata']['Download']:
   
  # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
  # (KML/KMZ) / (Shapefile) /(Other)
  format = "plain"
  if resource['format'] == '(XML)':
  format = 'xml'
  if resource['format'] == '(CSV/XLS)':
  format = 'csv'
  if resource['format'] == '(Shapefile)':
  format = 'shp'
  if resource['format'] == '(KML/KMZ)':
  format = 'kml'
  name = resource['href']
  if 'name' in resource.keys():
  name = resource['name']
  print resource
  add_package_resource_cachedurl(ckan, pkg_name, url_fix(resource['href']), name,
  format,
  human2bytes(resource.get('size', '0B')),
  resource_type='data')
  else:
  print "resources already exist"
  except CkanApiError, e:
  if ckan.last_status == 404:
  print "parent dataset does not exist"
  else:
  raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
  ckan.last_status, pkg_name, e.args))
   
  import couchdb
  couch = couchdb.Server('http://127.0.0.1:5984/')
  #couch = couchdb.Server('http://192.168.1.113:5984/')
 
  import urllib
  import urlparse
  import httplib2
  import httplib
  import csv
 
 
  def url_fix(s, charset='utf-8'):
  """Sometimes you get an URL by a user that just isn't a real
  URL because it contains unsafe characters like ' ' and so on. This
  function can fix some of the problems in a similar way browsers
  handle data entered by the user:
 
  :param charset: The target charset for the URL if the url was
  given as unicode string.
  """
  if isinstance(s, unicode):
  s = s.encode(charset, 'ignore')
  if not urlparse.urlparse(s).scheme:
  s = "http://"+s
  scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
  path = urllib.quote(path, '/%')
  qs = urllib.quote_plus(qs, ':&=')
  return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
 
  # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
  SYMBOLS = {
  'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
  'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
  'zetta', 'iotta'),
  'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
  'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
  'zebi', 'yobi'),
  }
 
 
  docsdb = couch['disclosr-documents']
  out = csv.writer(open("output.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
  if __name__ == "__main__":
  for doc in docsdb.view('app/datasets'):
  if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
  # Collect the package metadata.
  pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
  doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
  if 'Download' in doc.value['metadata'].keys() and len(doc.value['metadata']['Download']) > 0:
  for resource in doc.value['metadata']['Download']:
  # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
  # (KML/KMZ) / (Shapefile) /(Other)
  format = "plain"
  if resource['format'] == '(XML)':
  format = 'xml'
  if resource['format'] == '(CSV/XLS)':
  format = 'csv'
  if resource['format'] == '(Shapefile)':
  format = 'shp'
  if resource['format'] == '(KML/KMZ)':
  format = 'kml'
  name = resource['href']
  if 'name' in resource.keys():
  name = resource['name']
  if resource['href'].startswith("ftp"):
  out.writerow([pkg_name, url_fix(resource['href']), name,format, "ftp", ""])
  else:
  try:
  h = httplib2.Http(disable_ssl_certificate_validation=True)
  resp = h.request(url_fix(resource['href']), 'HEAD')
  content_type = resp[0]['content-type'] if 'content-type' in resp[0].keys() else ""
  out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, resp[0]['status'], content_type])
  except httplib2.ServerNotFoundError:
  out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, "500","badurl"])
  except httplib.InvalidURL:
  out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, "500","badurl"])
  except httplib2.RelativeURIError:
  out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, "500","badurl"])
  else:
  out.writerow([pkg_name.encode('ascii', 'ignore')])
 
import sys, os import sys, os
import time import time
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
from unidecode import unidecode from unidecode import unidecode
   
listurl = "http://data.gov.au/data/" listurl = "http://data.gov.au/data/"
(url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb, (url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb,
listurl, "data", "AGIMO") listurl, "data", "AGIMO")
soup = BeautifulSoup(datasetlisthtml) soup = BeautifulSoup(datasetlisthtml)
for atag in soup.find_all(class_='result-title'): for atag in soup.find_all(class_='result-title'):
if atag.has_key('href'): if atag.has_key('href'):
url = scrape.fullurl(listurl, atag['href']) url = scrape.fullurl(listurl, atag['href'])
(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
url, "data", "AGIMO", False) url, "data", "AGIMO", False)
hash = scrape.mkhash(scrape.canonurl(url)) hash = scrape.mkhash(scrape.canonurl(url))
doc = scrape.docsdb.get(hash) doc = scrape.docsdb.get(hash)
if "metadata" not in doc.keys() or True: if "metadata" not in doc.keys() or True:
doc['type'] = "dataset" doc['type'] = "dataset"
doc['metadata'] = {} doc['metadata'] = {}
soup = BeautifulSoup(html) soup = BeautifulSoup(html)
for metatag in soup.find_all('meta'): for metatag in soup.find_all('meta'):
if metatag.has_key('name'): if metatag.has_key('name'):
doc['metadata'][metatag['name']] = metatag['content'] doc['metadata'][metatag['name']] = metatag['content']
for list in soup.find_all('dl'): for list in soup.find_all('dl'):
last_title = "" last_title = ""
for child in list.children: for child in list.children:
if str(type(child)) != "<class 'bs4.element.NavigableString'>": if str(type(child)) != "<class 'bs4.element.NavigableString'>":
if child.name == 'dt' and child.string != None: if child.name == 'dt' and child.string != None:
last_title = child.string.strip() last_title = child.string.strip()
if child.name == 'dd': if child.name == 'dd':
#print last_title #print last_title
if last_title == "Description": if last_title == "Description":
doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore') doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore')
elif last_title == "Download": elif last_title == "Download":
doc['metadata'][last_title] = [] doc['metadata'][last_title] = []
for item in child.find_all("li"): for item in child.find_all("li"):
link = item.find("a") link = item.find("a")
format = item.find(property="dc:format") format = item.find(property="dc:format")
linkobj = {"href":link['href'].replace("/bye?","").strip(), linkobj = {"href":link['href'].replace("/bye?","").strip(),
"format": format.string.strip(), "size": format.next_sibling.string.strip()} "format": format.string.strip()}
  if format.next_sibling.string != None:
  linkobj["size"] = format.next_sibling.string.strip()
if link.string != None: if link.string != None:
linkobj["name"] = link.string.strip() linkobj["name"] = link.string.strip()
doc['metadata'][last_title].append(linkobj) doc['metadata'][last_title].append(linkobj)
   
else: else:
atags = child.find_all('a') atags = child.find_all('a')
if len(atags) < 2: if len(atags) < 2:
[s.extract() for s in child(class_='viewAll')] [s.extract() for s in child(class_='viewAll')]
doc['metadata'][last_title] = ''.join(child.stripped_strings).strip() doc['metadata'][last_title] = ''.join(child.stripped_strings).strip()
else: else:
doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags] doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags]
print doc['metadata'] print doc['metadata']
scrape.docsdb.save(doc) scrape.docsdb.save(doc)
#time.sleep(2) #time.sleep(2)