export no-html and licence
export no-html and licence


Former-commit-id: 39dafe9fefec609588df4f189c2364dae8edd246

import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
  # Instantiate the CKAN client.
  #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
  ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', api_key='72f90359-0396-438c-804f-a26a24336747')
  #couch = couchdb.Server('http://127.0.0.1:5984/')
  couch = couchdb.Server('http://192.168.1.113:5984/')
   
  # http://stackoverflow.com/a/7778368/684978
  from HTMLParser import HTMLParser
  import htmlentitydefs
   
  class HTMLTextExtractor(HTMLParser):
  def __init__(self):
  HTMLParser.__init__(self)
  self.result = [ ]
   
  def handle_data(self, d):
  self.result.append(d)
   
  def handle_charref(self, number):
  codepoint = int(number[1:], 16) if number[0] in (u'x', u'X') else int(number)
  self.result.append(unichr(codepoint))
   
  def handle_entityref(self, name):
  codepoint = htmlentitydefs.name2codepoint[name]
  self.result.append(unichr(codepoint))
   
  def get_text(self):
  return u''.join(self.result)
   
  def html_to_text(html):
  s = HTMLTextExtractor()
  s.feed(html)
  return s.get_text()
   
  # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
  SYMBOLS = {
  'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
  'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
  'zetta', 'iotta'),
  'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
  'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
  'zebi', 'yobi'),
  }
   
  def human2bytes(s):
  """
  Attempts to guess the string format based on default symbols
  set and return the corresponding bytes as an integer.
  When unable to recognize the format ValueError is raised.
   
  >>> human2bytes('0 B')
  0
  >>> human2bytes('1 K')
  1024
  >>> human2bytes('1 M')
  1048576
  >>> human2bytes('1 Gi')
  1073741824
  >>> human2bytes('1 tera')
  1099511627776
   
  >>> human2bytes('0.5kilo')
  512
  >>> human2bytes('0.1 byte')
  0
  >>> human2bytes('1 k') # k is an alias for K
  1024
  >>> human2bytes('12 foo')
  Traceback (most recent call last):
  ...
  ValueError: can't interpret '12 foo'
  """
  init = s
  num = ""
  while s and s[0:1].isdigit() or s[0:1] == '.':
  num += s[0]
  s = s[1:]
  num = float(num)
  letter = s.strip()
  for name, sset in SYMBOLS.items():
  if letter in sset:
  break
  else:
  if letter == 'k':
  # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
  sset = SYMBOLS['customary']
  letter = letter.upper()
  else:
  raise ValueError("can't interpret %r" % init)
  prefix = {sset[0]: 1}
  for i, s in enumerate(sset[1:]):
  prefix[s] = 1 << (i + 1) * 10
  return int(num * prefix[letter])
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
# Instantiate the CKAN client.  
ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',  
api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')  
# (use your own api_key from http://thedatahub.org/user/me )  
   
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
   
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
   
couch = couchdb.Server('http://127.0.0.1:5984/') def get_licence_id(licencename):
  map = {
  "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
  "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
  'Otherpleasespecify': 'notspecified',
  '': 'notspecified',
  "Publicly available data": 'notspecified',
  "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
  "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
  'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
  "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
  'CreativeCommonsAttributionCCBY25': 'cc-by',
  "PublicDomain": 'other-pd',
  }
  if licencename not in map.keys():
  raise Exception(licencename + " not found");
  return map[licencename];
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for doc in docsdb.view('app/datasets'): for doc in docsdb.view('app/datasets'):
print doc.id print doc.id
if doc.value['url'] != "http://data.gov.au/data/": if doc.value['url'] != "http://data.gov.au/data/":
# Collect the package metadata. # Collect the package metadata.
pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100]) pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
  tags = doc.value['metadata']["Keywords / Tags"]
  if not hasattr(tags, '__iter__'):
  tags = [tags]
  [re.sub('[^a-zA-Z0-9-_()]', '', tag).replace('&', 'and').lower() for tag in tags]
package_entity = { package_entity = {
'name': pkg_name, 'name': pkg_name,
'title': doc.value['metadata']['DCTERMS.Title'], 'title': doc.value['metadata']['DCTERMS.Title'],
'url': doc.value['metadata']['DCTERMS.Source.URI'], 'url': doc.value['metadata']['DCTERMS.Source.URI'],
'tags': doc.value['metadata']["Keywords / Tags"], #todo must be alphanumeric characters or symbols  
   
'author': doc.value['metadata']["DCTERMS.Creator"], 'author': doc.value['metadata']["DCTERMS.Creator"],
'maintainer': doc.value['metadata']["DCTERMS.Creator"], 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
'licence_id': doc.value['metadata']['DCTERMS.License'], 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
'notes': doc.value['metadata']['Description'], 'notes': html_to_text(doc.value['metadata']['Description']),
} }
  if len(tags) > 0:
  package_entity['tags'] = tags
  print tags
try: try:
  #print doc.id
ckan.package_register_post(package_entity) ckan.package_register_post(package_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 409: if ckan.last_status == 409:
print "already exists" print "already exists"
else: else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (ckan.last_status, doc.id, e.args)) raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
  ckan.last_status, pkg_name, e.args))
   
print package_entity print package_entity
ckan.add_package_resource(pkg_name, 'http://example.org/', name='Foo', resource_type='data', format='csv') #todo add to organisation (author/creator/maintainer) http://docs.ckan.org/en/latest/apiv3.html#examples ckan.logic.action.update.package_owner_org_update
  #if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group
  if 'Download' in doc.value['metadata'].keys():
  try:
  pkg = ckan.package_entity_get(pkg_name)
  resources = pkg.get('resources', [])
  if len(resources) < len(doc.value['metadata']['Download']):
  for resource in doc.value['metadata']['Download']:
  print resource
  # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
  # (KML/KMZ) / (Shapefile) /(Other)
  format = "plain"
  if resource['format'] == '(XML)':
  format = 'xml'
  if resource['format'] == '(CSV/XLS)':
  format = 'csv'
  name = resource['href']
  if 'name' in resource.keys():
  name = resource['name']
  ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
  format=format, size=human2bytes(resource['size'].replace(',', '')))
  else:
  print "resources already exist"
  except CkanApiError, e:
  if ckan.last_status == 404:
  print "parent dataset does not exist"
  else:
  raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
  ckan.last_status, pkg_name, e.args))
   
import sys, os import sys, os
import time import time
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
from unidecode import unidecode from unidecode import unidecode
   
listurl = "http://data.gov.au/data/" listurl = "http://data.gov.au/data/"
(url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb, (url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb,
listurl, "data", "AGIMO") listurl, "data", "AGIMO")
soup = BeautifulSoup(datasetlisthtml) soup = BeautifulSoup(datasetlisthtml)
for atag in soup.find_all(class_='result-title'): for atag in soup.find_all(class_='result-title'):
if atag.has_key('href'): if atag.has_key('href'):
url = scrape.fullurl(listurl, atag['href']) url = scrape.fullurl(listurl, atag['href'])
(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
url, "data", "AGIMO") url, "data", "AGIMO", False)
hash = scrape.mkhash(scrape.canonurl(url)) hash = scrape.mkhash(scrape.canonurl(url))
doc = scrape.docsdb.get(hash) doc = scrape.docsdb.get(hash)
if "metadata" not in doc.keys() or True: if "metadata" not in doc.keys() or True:
doc['type'] = "dataset" doc['type'] = "dataset"
doc['metadata'] = {} doc['metadata'] = {}
soup = BeautifulSoup(html) soup = BeautifulSoup(html)
for metatag in soup.find_all('meta'): for metatag in soup.find_all('meta'):
if metatag.has_key('name'): if metatag.has_key('name'):
doc['metadata'][metatag['name']] = metatag['content'] doc['metadata'][metatag['name']] = metatag['content']
for list in soup.find_all('dl'): for list in soup.find_all('dl'):
last_title = "" last_title = ""
for child in list.children: for child in list.children:
if str(type(child)) != "<class 'bs4.element.NavigableString'>": if str(type(child)) != "<class 'bs4.element.NavigableString'>":
if child.name == 'dt' and child.string != None: if child.name == 'dt' and child.string != None:
last_title = child.string.strip() last_title = child.string.strip()
if child.name == 'dd': if child.name == 'dd':
#print last_title #print last_title
if last_title == "Description": if last_title == "Description":
doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore') doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore')
elif last_title == "Download": elif last_title == "Download":
doc['metadata'][last_title] = [] doc['metadata'][last_title] = []
for item in child.find_all("li"): for item in child.find_all("li"):
link = item.find("a") link = item.find("a")
format = item.find(property="dc:format") format = item.find(property="dc:format")
linkobj = {"href":link['href'].replace("/bye?","").strip(), linkobj = {"href":link['href'].replace("/bye?","").strip(),
"format": format.string.strip(), "size": format.next_sibling.string.strip()} "format": format.string.strip(), "size": format.next_sibling.string.strip()}
if link.string != None: if link.string != None:
linkobj["name"] = link.string.strip() linkobj["name"] = link.string.strip()
doc['metadata'][last_title].append(linkobj) doc['metadata'][last_title].append(linkobj)
   
else: else:
atags = child.find_all('a') atags = child.find_all('a')
if len(atags) < 2: if len(atags) < 2:
[s.extract() for s in child(class_='viewAll')] [s.extract() for s in child(class_='viewAll')]
doc['metadata'][last_title] = ''.join(child.stripped_strings).strip() doc['metadata'][last_title] = ''.join(child.stripped_strings).strip()
else: else:
doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags] doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags]
print doc['metadata'] print doc['metadata']
scrape.docsdb.save(doc) scrape.docsdb.save(doc)
#time.sleep(2) #time.sleep(2)
   
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
import mimetypes import mimetypes
import urllib import urllib
import urlparse import urlparse
import socket import socket
   
  #couch = couchdb.Server('http://192.168.1.148:5984/')
  couch = couchdb.Server('http://192.168.1.113:5984/')
  #couch = couchdb.Server('http://127.0.0.1:5984/')
   
   
def mkhash(input): def mkhash(input):
return hashlib.md5(input).hexdigest().encode("utf-8") return hashlib.md5(input).hexdigest().encode("utf-8")
   
   
def canonurl(url): def canonurl(url):
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
if the URL looks invalid. if the URL looks invalid.
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
'http://xn--hgi.ws/' 'http://xn--hgi.ws/'
""" """
# strip spaces at the ends and ensure it's prefixed with 'scheme://' # strip spaces at the ends and ensure it's prefixed with 'scheme://'
url = url.strip() url = url.strip()
if not url: if not url:
return '' return ''
if not urlparse.urlsplit(url).scheme: if not urlparse.urlsplit(url).scheme:
url = 'http://' + url url = 'http://' + url
   
# turn it into Unicode # turn it into Unicode
#try: #try:
# url = unicode(url, 'utf-8') # url = unicode(url, 'utf-8')
#except UnicodeDecodeError: #except UnicodeDecodeError:
# return '' # bad UTF-8 chars in URL # return '' # bad UTF-8 chars in URL
   
# parse the URL into its components # parse the URL into its components
parsed = urlparse.urlsplit(url) parsed = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = parsed scheme, netloc, path, query, fragment = parsed
   
# ensure scheme is a letter followed by letters, digits, and '+-.' chars # ensure scheme is a letter followed by letters, digits, and '+-.' chars
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
return '' return ''
scheme = str(scheme) scheme = str(scheme)