Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr
Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr

Conflicts:
documents/genericScrapers.py
documents/runScrapers.sh

Former-commit-id: a6f8697ed080934b51ab7b63a3d4428ff5ccdb2b

import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
import html2text # aaronsw :( import html2text # aaronsw :(
   
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
# Instantiate the CKAN client. # Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52') api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
#couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
couch = couchdb.Server('http://192.168.1.113:5984/') #couch = couchdb.Server('http://192.168.1.113:5984/')
   
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = { SYMBOLS = {
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'), 'zetta', 'iotta'),
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'), 'zebi', 'yobi'),
} }
   
def human2bytes(s): def human2bytes(s):
""" """
Attempts to guess the string format based on default symbols Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer. set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised. When unable to recognize the format ValueError is raised.
   
>>> human2bytes('0 B') >>> human2bytes('0 B')
0 0
>>> human2bytes('1 K') >>> human2bytes('1 K')
1024 1024
>>> human2bytes('1 M') >>> human2bytes('1 M')
1048576 1048576
>>> human2bytes('1 Gi') >>> human2bytes('1 Gi')
1073741824 1073741824
>>> human2bytes('1 tera') >>> human2bytes('1 tera')
1099511627776 1099511627776
   
>>> human2bytes('0.5kilo') >>> human2bytes('0.5kilo')
512 512
>>> human2bytes('0.1 byte') >>> human2bytes('0.1 byte')
0 0
>>> human2bytes('1 k') # k is an alias for K >>> human2bytes('1 k') # k is an alias for K
1024 1024
>>> human2bytes('12 foo') >>> human2bytes('12 foo')
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: can't interpret '12 foo' ValueError: can't interpret '12 foo'
""" """
init = s init = s
num = "" num = ""
while s and s[0:1].isdigit() or s[0:1] == '.': while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0] num += s[0]
s = s[1:] s = s[1:]
num = float(num) num = float(num)
letter = s.strip() letter = s.strip()
for name, sset in SYMBOLS.items(): for name, sset in SYMBOLS.items():
if letter in sset: if letter in sset:
break break
else: else:
if letter == 'k': if letter == 'k':
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
sset = SYMBOLS['customary'] sset = SYMBOLS['customary']
letter = letter.upper() letter = letter.upper()
else: else:
raise ValueError("can't interpret %r" % init) raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1} prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]): for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10 prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) return int(num * prefix[letter])
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
   
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
  #[:100]
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
   
   
def get_licence_id(licencename): def get_licence_id(licencename):
map = { map = {
"Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
"CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
'Otherpleasespecify': 'notspecified', 'Otherpleasespecify': 'notspecified',
'': 'notspecified', '': 'notspecified',
"Publicly available data": 'notspecified', "Publicly available data": 'notspecified',
"CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
"CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
"Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
'CreativeCommonsAttributionCCBY25': 'cc-by', 'CreativeCommonsAttributionCCBY25': 'cc-by',
"PublicDomain": 'other-pd', "PublicDomain": 'other-pd',
} }
if licencename not in map.keys(): if licencename not in map.keys():
raise Exception(licencename + " not found"); raise Exception(licencename + " not found");
return map[licencename]; return map[licencename];
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for doc in docsdb.view('app/datasets'): for doc in docsdb.view('app/datasets'):
print doc.id print doc.id
if doc.value['url'] != "http://data.gov.au/data/": if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
# Collect the package metadata. # Collect the package metadata.
pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); _ pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','');
tags = [] tags = []
if doc.value['agencyID'] == "AGIMO": if doc.value['agencyID'] == "AGIMO":
if len(doc.value['metadata']["Keywords / Tags"]) > 0: if len(doc.value['metadata']["Keywords / Tags"]) > 0:
if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
tags = tags + doc.value['metadata']["Keywords / Tags"] tags = tags + doc.value['metadata']["Keywords / Tags"]
else: else:
tags = tags + [doc.value['metadata']["Keywords / Tags"]] tags = tags + [doc.value['metadata']["Keywords / Tags"]]
if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
tags = tags + doc.value['metadata']['data.gov.au Category'] tags = tags + doc.value['metadata']['data.gov.au Category']
else: else:
tags = tags + [doc.value['metadata']['data.gov.au Category']] tags = tags + [doc.value['metadata']['data.gov.au Category']]
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
print tags print tags
package_entity = { package_entity = {
'name': pkg_name, 'name': pkg_name,
'title': doc.value['metadata']['DCTERMS.Title'], 'title': doc.value['metadata']['DCTERMS.Title'],
'url': doc.value['metadata']['DCTERMS.Source.URI'], 'url': doc.value['metadata']['DCTERMS.Source.URI'],
'tags': tags, #tags are mandatory? 'tags': tags, #tags are mandatory?
'author': doc.value['metadata']["DCTERMS.Creator"], 'author': doc.value['metadata']["DCTERMS.Creator"],
'maintainer': doc.value['metadata']["DCTERMS.Creator"], 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
'notes': html2text.html2text(doc.value['metadata']['Description']), 'notes': html2text.html2text(doc.value['metadata']['Description']),
} }
if doc.value['agencyID'] == "qld": if doc.value['agencyID'] == "qld":
package_entity = doc.value['metadata'] package_entity = doc.value['metadata']
   
try: try:
print package_entity print package_entity
ckan.package_register_post(package_entity) ckan.package_register_post(package_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 409: if ckan.last_status == 409:
print "package already exists" print "package already exists"
else: else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
   
#add to group #add to group
   
group_name = name_munge(doc.value['metadata']["Agency"][:100]) group_name = name_munge(doc.value['metadata']["Agency"][:100])
try: try:
print ckan.group_entity_get(group_name) print ckan.group_entity_get(group_name)
   
# Update the group details # Update the group details
group_entity = ckan.last_message group_entity = ckan.last_message
print "group exists" print "group exists"
if 'packages' in group_entity.keys(): if 'packages' in group_entity.keys():
group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
else: else:
group_entity['packages'] = [pkg_name] group_entity['packages'] = [pkg_name]
ckan.group_entity_put(group_entity) ckan.group_entity_put(group_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 404: if ckan.last_status == 404:
print "group does not exist, creating" print "group does not exist, creating"
group_entity = { group_entity = {
'name': group_name, 'name': group_name,
'title': doc.value['metadata']["Agency"], 'title': doc.value['metadata']["Agency"],
'description': doc.value['metadata']["Agency"], 'description': doc.value['metadata']["Agency"],
'packages': [pkg_name], 'packages': [pkg_name],
# 'type': "organization" # not allowed via API, use database query # 'type': "organization" # not allowed via API, use database query
# update "group" set type = 'organization'; # update "group" set type = 'organization';
} }
print group_entity print group_entity
ckan.group_register_post(group_entity) ckan.group_register_post(group_entity)
  elif ckan.last_status == 409:
  print "group already exists"
else: else:
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
if 'Download' in doc.value['metadata'].keys(): if 'Download' in doc.value['metadata'].keys():
try: try:
pkg = ckan.package_entity_get(pkg_name) pkg = ckan.package_entity_get(pkg_name)
resources = pkg.get('resources', []) resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']): if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']: for resource in doc.value['metadata']['Download']:
   
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
# (KML/KMZ) / (Shapefile) /(Other) # (KML/KMZ) / (Shapefile) /(Other)
format = "plain" format = "plain"
if resource['format'] == '(XML)': if resource['format'] == '(XML)':
format = 'xml' format = 'xml'
if resource['format'] == '(CSV/XLS)': if resource['format'] == '(CSV/XLS)':
format = 'csv' format = 'csv'
name = resource['href'] name = resource['href']
if 'name' in resource.keys(): if 'name' in resource.keys():
name = resource['name'] name = resource['name']
print resource print resource
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
format=format, size=human2bytes(resource['size'].replace(',', ''))) format=format, size=human2bytes(resource['size'].replace(',', '')))
else: else:
print "resources already exist" print "resources already exist"
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 404: if ckan.last_status == 404:
print "parent dataset does not exist" print "parent dataset does not exist"
else: else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
import sys import sys
import os import os
   
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from time import mktime from time import mktime
import feedparser import feedparser
import abc import abc
import unicodedata import unicodedata
import re import re
import dateutil import dateutil
from dateutil.parser import * from dateutil.parser import *
from datetime import * from datetime import *
import codecs import codecs
   
import difflib import difflib
   
from StringIO import StringIO from StringIO import StringIO
   
from pdfminer.pdfparser import PDFDocument, PDFParser from pdfminer.pdfparser import PDFDocument, PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
from pdfminer.pdfdevice import PDFDevice, TagExtractor from pdfminer.pdfdevice import PDFDevice, TagExtractor
from pdfminer.converter import TextConverter from pdfminer.converter import TextConverter
from pdfminer.cmapdb import CMapDB from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams from pdfminer.layout import LAParams
   
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
   
def remove_control_chars(self, input): def remove_control_chars(self, input):
return "".join([i for i in input if ord(i) in range(32, 127)]) return "".join([i for i in input if ord(i) in range(32, 127)])
   
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
if self.agencyID is None: if self.agencyID is None:
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
return self.agencyID return self.agencyID
   
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
if self.disclogURL is None: if self.disclogURL is None:
agency = scrape.agencydb.get(self.getAgencyID()) agency = scrape.agencydb.get(self.getAgencyID())
self.disclogURL = agency['FOIDocumentsURL'] self.disclogURL = agency['FOIDocumentsURL']
return self.disclogURL return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def doScrape(self): def doScrape(self):
""" do the scraping """ """ do the scraping """
return return
   
   
class GenericHTMLDisclogScraper(GenericDisclogScraper): class GenericHTMLDisclogScraper(GenericDisclogScraper):
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
content = rcontent content = rcontent
dochash = scrape.mkhash(content) dochash = scrape.mkhash(content)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
if last_attach != None: if last_attach != None:
html_diff = difflib.HtmlDiff() html_diff = difflib.HtmlDiff()
diff = html_diff.make_table(last_attach.read().split('\n'), diff = html_diff.make_table(last_attach.read().split('\n'),
content.split('\n')) content.split('\n'))
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": &quo