datagov fixes
datagov fixes


Former-commit-id: ed3ba96db4beeb126f802a3168476e27f298aeb8

import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
import html2text # aaronsw :( import html2text # aaronsw :(
   
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
# Instantiate the CKAN client. # Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52') api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
#couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
couch = couchdb.Server('http://192.168.1.113:5984/') #couch = couchdb.Server('http://192.168.1.113:5984/')
   
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = { SYMBOLS = {
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'), 'zetta', 'iotta'),
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'), 'zebi', 'yobi'),
} }
   
def human2bytes(s): def human2bytes(s):
""" """
Attempts to guess the string format based on default symbols Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer. set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised. When unable to recognize the format ValueError is raised.
   
>>> human2bytes('0 B') >>> human2bytes('0 B')
0 0
>>> human2bytes('1 K') >>> human2bytes('1 K')
1024 1024
>>> human2bytes('1 M') >>> human2bytes('1 M')
1048576 1048576
>>> human2bytes('1 Gi') >>> human2bytes('1 Gi')
1073741824 1073741824
>>> human2bytes('1 tera') >>> human2bytes('1 tera')
1099511627776 1099511627776
   
>>> human2bytes('0.5kilo') >>> human2bytes('0.5kilo')
512 512
>>> human2bytes('0.1 byte') >>> human2bytes('0.1 byte')
0 0
>>> human2bytes('1 k') # k is an alias for K >>> human2bytes('1 k') # k is an alias for K
1024 1024
>>> human2bytes('12 foo') >>> human2bytes('12 foo')
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: can't interpret '12 foo' ValueError: can't interpret '12 foo'
""" """
init = s init = s
num = "" num = ""
while s and s[0:1].isdigit() or s[0:1] == '.': while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0] num += s[0]
s = s[1:] s = s[1:]
num = float(num) num = float(num)
letter = s.strip() letter = s.strip()
for name, sset in SYMBOLS.items(): for name, sset in SYMBOLS.items():
if letter in sset: if letter in sset:
break break
else: else:
if letter == 'k': if letter == 'k':
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
sset = SYMBOLS['customary'] sset = SYMBOLS['customary']
letter = letter.upper() letter = letter.upper()
else: else:
raise ValueError("can't interpret %r" % init) raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1} prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]): for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10 prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) return int(num * prefix[letter])
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
   
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
  #[:100]
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
   
   
def get_licence_id(licencename): def get_licence_id(licencename):
map = { map = {
"Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
"CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
'Otherpleasespecify': 'notspecified', 'Otherpleasespecify': 'notspecified',
'': 'notspecified', '': 'notspecified',
"Publicly available data": 'notspecified', "Publicly available data": 'notspecified',
"CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
"CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
"Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
'CreativeCommonsAttributionCCBY25': 'cc-by', 'CreativeCommonsAttributionCCBY25': 'cc-by',
"PublicDomain": 'other-pd', "PublicDomain": 'other-pd',
} }
if licencename not in map.keys(): if licencename not in map.keys():
raise Exception(licencename + " not found"); raise Exception(licencename + " not found");
return map[licencename]; return map[licencename];
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for doc in docsdb.view('app/datasets'): for doc in docsdb.view('app/datasets'):
print doc.id print doc.id
if doc.value['url'] != "http://data.gov.au/data/": if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
# Collect the package metadata. # Collect the package metadata.
pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); _ pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','');
tags = [] tags = []
if len(doc.value['metadata']["Keywords / Tags"]) > 0: if doc.value['agencyID'] == "AGIMO":
if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): if len(doc.value['metadata']["Keywords / Tags"]) > 0:
tags = tags + doc.value['metadata']["Keywords / Tags"] if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
else: tags = tags + doc.value['metadata']["Keywords / Tags"]
tags = tags + [doc.value['metadata']["Keywords / Tags"]] else:
if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: tags = tags + [doc.value['metadata']["Keywords / Tags"]]
if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
tags = tags + doc.value['metadata']['data.gov.au Category'] if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
else: tags = tags + doc.value['metadata']['data.gov.au Category']
tags = tags + [doc.value['metadata']['data.gov.au Category']] else:
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] tags = tags + [doc.value['metadata']['data.gov.au Category']]
print tags tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
package_entity = { print tags
'name': pkg_name, package_entity = {
'title': doc.value['metadata']['DCTERMS.Title'], 'name': pkg_name,
'url': doc.value['metadata']['DCTERMS.Source.URI'], 'title': doc.value['metadata']['DCTERMS.Title'],
'tags': tags, #tags are mandatory? 'url': doc.value['metadata']['DCTERMS.Source.URI'],
'author': doc.value['metadata']["DCTERMS.Creator"], 'tags': tags, #tags are mandatory?
'maintainer': doc.value['metadata']["DCTERMS.Creator"], 'author': doc.value['metadata']["DCTERMS.Creator"],
'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
'notes': html2text.html2text(doc.value['metadata']['Description']), 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
} 'notes': html2text.html2text(doc.value['metadata']['Description']),
  }
  if doc.value['agencyID'] == "qld":
  package_entity = doc.value['metadata']
   
try: try:
print package_entity print package_entity
ckan.package_register_post(package_entity) ckan.package_register_post(package_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 409: if ckan.last_status == 409:
print "package already exists" print "package already exists"
else: else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
   
#add to group #add to group
   
group_name = name_munge(doc.value['metadata']["Agency"][:100]) group_name = name_munge(doc.value['metadata']["Agency"][:100])
try: try:
print ckan.group_entity_get(group_name) print ckan.group_entity_get(group_name)
   
# Update the group details # Update the group details
group_entity = ckan.last_message group_entity = ckan.last_message
print "group exists" print "group exists"
if 'packages' in group_entity.keys(): if 'packages' in group_entity.keys():
group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
else: else:
group_entity['packages'] = [pkg_name] group_entity['packages'] = [pkg_name]
ckan.group_entity_put(group_entity) ckan.group_entity_put(group_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 404: if ckan.last_status == 404:
print "group does not exist, creating" print "group does not exist, creating"
group_entity = { group_entity = {
'name': group_name, 'name': group_name,
'title': doc.value['metadata']["Agency"], 'title': doc.value['metadata']["Agency"],
'description': doc.value['metadata']["Agency"], 'description': doc.value['metadata']["Agency"],
'packages': [pkg_name], 'packages': [pkg_name],
# 'type': "organization" # not allowed via API, use database query # 'type': "organization" # not allowed via API, use database query
# update "group" set type = 'organization'; # update "group" set type = 'organization';
} }
print group_entity print group_entity
ckan.group_register_post(group_entity) ckan.group_register_post(group_entity)
  elif ckan.last_status == 409:
  print "group already exists"
else: else:
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
if 'Download' in doc.value['metadata'].keys(): if 'Download' in doc.value['metadata'].keys():
try: try:
pkg = ckan.package_entity_get(pkg_name) pkg = ckan.package_entity_get(pkg_name)
resources = pkg.get('resources', []) resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']): if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']: for resource in doc.value['metadata']['Download']:
   
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
# (KML/KMZ) / (Shapefile) /(Other) # (KML/KMZ) / (Shapefile) /(Other)
format = "plain" format = "plain"
if resource['format'] == '(XML)': if resource['format'] == '(XML)':
format = 'xml' format = 'xml'
if resource['format'] == '(CSV/XLS)': if resource['format'] == '(CSV/XLS)':
format = 'csv' format = 'csv'
name = resource['href'] name = resource['href']
if 'name' in resource.keys(): if 'name' in resource.keys():
name = resource['name'] name = resource['name']
print resource print resource
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
format=format, size=human2bytes(resource['size'].replace(',', ''))) format=format, size=human2bytes(resource['size'].replace(',', '')))
else: else:
print "resources already exist" print "resources already exist"
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 404: if ckan.last_status == 404:
print "parent dataset does not exist" print "parent dataset does not exist"
else: else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
  import sys, os
  import time
  import scrape
  from bs4 import BeautifulSoup
 
  from unidecode import unidecode
  import ckanclient
 
  # Instantiate the CKAN client.
  ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
 
  # Get the package list.
  package_list = ckan.package_register_get()
  for package_name in package_list:
  # Get the details of a package.
  (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
  "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
  hash = scrape.mkhash(scrape.canonurl(url))
  print hash
  doc = scrape.docsdb.get(hash)
  if "metadata" not in doc.keys() or True:
  ckan.package_entity_get(package_name)
  package_entity = ckan.last_message
  doc['type'] = "dataset"
  doc['metadata'] = package_entity
  print package_entity
  scrape.docsdb.save(doc)
 
import sys, os import sys, os
import time import time
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
from unidecode import unidecode from unidecode import unidecode
   
listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3950" items = 3950
(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, items = 1
listurl, "gazette", "AGD") while True:
for line in listhtml.split('\n'): print str(items) + " (" +str(items/25) +" screens to go)"
soup = BeautifulSoup(line) listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
#print line (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
for row in soup.find_all('tr'): listurl, "gazette", "AGD", False)
print line for line in listhtml.split('\n'):
if row.has_key('valign'): soup = BeautifulSoup(line)
i = 0 #print line
date = "" for row in soup.find_all('tr'):
id = "" print line
type = "" if row.has_key('valign'):
description = "" i = 0
name = "" date = ""
url = "" id = ""
for col in soup.find_all('td'): type = ""
#print ''.join(col.stripped_strings) description = ""
if i == 0: name = ""
date = ''.join(col.stripped_strings) url = ""
if i == 1: for col in soup.find_all('td'):
id = ''.join(col.stripped_strings) #print ''.join(col.stripped_strings)
if i == 2: if i == 0:
type = ''.join(col.stripped_strings) date = ''.join(col.stripped_strings)
if i == 3: if i == 1:
description = ''.join(col.stripped_strings) id = ''.join(col.stripped_strings)
for link in col.findAll('a'): if i == 2:
if link.has_key("href"): type = ''.join(col.stripped_strings)
url = link['href'] if i == 3:
name = ''.join(link.stripped_strings) description = ''.join(col.stripped_strings)
print [date,id,type,description,name, url] for link in col.findAll('a'):
i = i +1; if link.has_key("href"):
#url = scrape.fullurl(listurl, atag['href']) url = link['href']
#(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, name = ''.join(link.stripped_strings)
# url, "data", "AGIMO") print str(items) + " (" +str(items/25) +" screens to go)"
#hash = scrape.mkhash(scrape.canonurl(url)) print [date, id, type, description, name, url]
#doc = scrape.docsdb.get(hash) itemurl = scrape.fullurl(listurl, url)
#print doc['metadata'] (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
#scrape.docsdb.save(doc) itemurl, "gazette", "AGD", False)
#time.sleep(2) hash = scrape.mkhash(scrape.canonurl(itemurl))
  doc = scrape.docsdb.get(hash)
  doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
  scrape.docsdb.save(doc)
  #time.sleep(2)
  i = i + 1;
   
  items = items - 25
  if items <= 0:
  break
   
import sys import sys
import os import os
   
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from time import mktime from time import mktime
import feedparser import feedparser
import abc import abc
import unicodedata import unicodedata
import re import re
import dateutil import dateutil
from dateutil.parser import * from dateutil.parser import *
from datetime import * from datetime import *
import codecs import codecs
   
import difflib import difflib
   
from StringIO import StringIO from StringIO import StringIO
   
from pdfminer.pdfparser import PDFDocument, PDFParser from pdfminer.pdfparser import PDFDocument, PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
from pdfminer.pdfdevice import PDFDevice, TagExtractor from pdfminer.pdfdevice import PDFDevice, TagExtractor
from pdfminer.converter import TextConverter from pdfminer.converter import TextConverter
from pdfminer.cmapdb import CMapDB from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams from pdfminer.layout import LAParams
   
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
   
def remove_control_chars(self, input): def remove_control_chars(self, input):
return "".join([i for i in input if ord(i) in range(32, 127)]) return "".join([i for i in input if ord(i) in range(32, 127)])
   
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
if self.agencyID is None: if self.agencyID is None:
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
return self.agencyID return self.agencyID
   
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
if self.disclogURL is None: if self.disclogURL is None:
agency = scrape.agencydb.get(self.getAgencyID()) agency = scrape.agencydb.get(self.getAgencyID())
self.disclogURL = agency['FOIDocumentsURL'] self.disclogURL = agency['FOIDocumentsURL']
return self.disclogURL return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def doScrape(self): def doScrape(self):
""" do the scraping """ """ do the scraping """
return return
   
   
class GenericHTMLDisclogScraper(GenericDisclogScraper): class GenericHTMLDisclogScraper(GenericDisclogScraper):
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
content = rcontent content = rcontent
dochash = scrape.mkhash(content) dochash = scrape.mkhash(content)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
if last_attach != None: if last_attach != None:
html_diff = difflib.HtmlDiff() html_diff = difflib.HtmlDiff()
diff = html_diff.make_table(last_attach.read().split('\n'), diff = html_diff.make_table(last_attach.read().split('\n'),
content.split('\n')) content.split('\n'))
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description), "diff": diff} "date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description), "diff": diff}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericPDFDisclogScraper(GenericDisclogScraper): class GenericPDFDisclogScraper(GenericDisclogScraper):
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
laparams = LAParams() laparams = LAParams()
rsrcmgr = PDFResourceManager(caching=True) rsrcmgr = PDFResourceManager(caching=True)
outfp = StringIO() outfp = StringIO()
device = TextConverter(rsrcmgr, outfp, codec='utf-8', device = TextConverter(rsrcmgr, outfp, codec='utf-8',
laparams=laparams) laparams=laparams)
fp = StringIO() fp = StringIO()
fp.write(content) fp.write(content)
   
process_pdf(rsrcmgr, device, fp, set(), caching=True, process_pdf(rsrcmgr, device, fp, set(), caching=True,
check_extractable=True) check_extractable=True)
description = outfp.getvalue() description = outfp.getvalue()
fp.close() fp.close()
device.close() device.close()
outfp.close() outfp.close()
dochash = scrape.mkhash(description) dochash = scrape.mkhash(description)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description)} "date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description)}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericDOCXDisclogScraper(GenericDisclogScraper): class GenericDOCXDisclogScraper(GenericDisclogScraper):
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
, self.getURL(), "foidocuments", self.getAgencyID()) , self.getURL(), "foidocuments", self.getAgencyID())
mydoc = zipfile.ZipFile(file) mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml') xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent) document = etree.fromstring(xmlcontent)
## Fetch all the text out of the document we just created ## Fetch all the text out of the document we just created
paratextlist = getdocumenttext(document) paratextlist = getdocumenttext(document)
# Make explicit unicode version # Make explicit unicode version
newparatextlist = [] newparatextlist = []
for paratext in paratextlist: for paratext in paratextlist:
newparatextlist.append(paratext.encode("utf-8")) newparatextlist.append(paratext.encode("utf-8"))
## Print our documnts test with two newlines under each paragraph ## Print our documnts test with two newlines under each paragraph
description = '\n\n'.join(newparatextlist).strip(' \t\n\r') description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
dochash = scrape.mkhash(description) dochash = scrape.mkhash(description)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
   
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = time().strftime("%Y-%m-%d") edate = time().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": description} "date": edate, "title": "Disclosure Log Updated", "description": description}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericRSSDisclogScraper(GenericDisclogScraper): class GenericRSSDisclogScraper(GenericDisclogScraper):
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
feed = feedparser.parse(content) feed = feedparser.parse(content)
for entry in feed.entries: for entry in feed.entries:
#print entry #print entry
print entry.id print entry.id
dochash = scrape.mkhash(entry.id) dochash = scrape.mkhash(entry.id)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
#print doc #print doc
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = datetime.fromtimestamp( edate = datetime.fromtimestamp(
mktime(entry.published_parsed)).strftime("%Y-%m-%d") mktime(entry.published_parsed)).strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID(), doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
'url': entry.link, 'docID': entry.id, 'url': entry.link, 'docID': entry.id,
"date": edate, "title": entry.title} "date": edate, "title": entry.title}
self.getDescription(entry, entry, doc) self.getDescription(entry, entry, doc)
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
doc.update({'description': content.summary}) doc.update({'description': content.summary})
   
return return
   
   
class GenericOAICDisclogScraper(GenericDisclogScraper): class GenericOAICDisclogScraper(GenericDisclogScraper):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
   
@abc.abstractmethod @abc.abstractmethod
def getColumns(self, columns): def getColumns(self, columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
   
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
   
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
descriptiontxt = "" descriptiontxt = ""
for string in content.stripped_strings: for string in content.stripped_strings:
descriptiontxt = descriptiontxt + " \n" + string descriptiontxt = descriptiontxt + " \n" + string
doc.update({'description': descriptiontxt}) doc.update({'description': descriptiontxt})
   
def getTitle(self, content, entry, doc): def getTitle(self, content, entry, doc):
doc.update({'title': (''.join(content.stripped_strings))}) doc.update({'title': (''.join(content.stripped_strings))})
   
def getTable(self, soup): def getTable(self, soup):
return soup.table return soup.table
   
def getRows(self, table): def getRows(self, table):
return table.find_all('tr') return table.find_all('tr')
   
def getDate(self, content, entry, doc): def getDate(self, content, entry, doc):
date = ''.join(content.stripped_strings).strip() date = ''.join(content.stripped_strings).strip()
(a, b, c) = date.partition("(") (a, b, c) = date.partition("(")
date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")) date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012").replace("Janurary","January"))
print date print date
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
print edate print edate
doc.update({'date': edate}) doc.update({'date': edate})
return return
   
def getLinks(self, content, entry, doc): def getLinks(self, content, entry, doc):
links = [] links = []
for atag in entry.find_all("a"): for atag in entry.find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(content, atag['href'])) links.append(scrape.fullurl(content, atag['href']))
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
return return
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
if content is not None: if content is not None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
print "parsing" print "parsing"
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
table = self.getTable(soup) table = self.getTable(soup)
for row in self.getRows(table): for row in self.getRows(table):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) is self.getColumnCount(): if len(columns) is self.getColumnCount():
(id, date, title, (id, date, title,
description, notes) = self.getColumns(columns) description, notes) = self.getColumns(columns)
print self.remove_control_chars( print self.remove_control_chars(
''.join(id.stripped_strings)) ''.join(id.stripped_strings))
if id.string is None: if id.string is None:
dochash = scrape.mkhash( dochash = scrape.mkhash(
self.remove_control_chars( self.remove_control_chars(
url + (''.join(date.stripped_strings)))) url + (''.join(date.stripped_strings))))
else: else:
dochash = scrape.mkhash( dochash = scrape.mkhash(
self.remove_control_chars( self.remove_control_chars(
url + (''.join(id.stripped_strings)))) url + (''.join(id.stripped_strings))))
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
   
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
doc = {'_id': dochash, doc = {'_id': dochash,
'agencyID': self.getAgencyID(), 'agencyID': self.getAgencyID(),
'url': self.getURL(), 'url': self.getURL(),
'docID': (''.join(id.stripped_strings))} 'docID': (''.join(id.stripped_strings))}
self.getLinks(self.getURL(), row, doc) self.getLinks(self.getURL(), row, doc)
self.getTitle(title, row, doc) self.getTitle(title, row, doc)
self.getDate(date, row, doc) self.getDate(date, row, doc)
self.getDescription(description, row, doc) self.getDescription(description, row, doc)
if notes is not None: if notes is not None:
doc.update({'notes': ( doc.update({'notes': (
''.join(notes.stripped_strings))}) ''.join(notes.stripped_strings))})
badtitles = ['-', 'Summary of FOI Request' badtitles = ['-', 'Summary of FOI Request'
, 'FOI request(in summary form)' , 'FOI request(in summary form)'
, 'Summary of FOI request received by the ASC', , 'Summary of FOI request received by the ASC',
'Summary of FOI request received by agency/minister', 'Summary of FOI request received by agency/minister',
'Description of Documents Requested', 'FOI request', 'Description of Documents Requested', 'FOI request',
'Description of FOI Request', 'Summary of request', 'Description', 'Summary', 'Description of FOI Request', 'Summary of request', 'Description', 'Summary',
'Summary of FOIrequest received by agency/minister', 'Summary of FOIrequest received by agency/minister',
'Summary of FOI request received', 'Description of FOI Request', 'Summary of FOI request received', 'Description of FOI Request',
"FOI request", 'Results 1 to 67 of 67'] "FOI request", 'Results 1 to 67 of 67']
if doc['title'] not in badtitles\ if doc['title'] not in badtitles\
and doc['description'] != '': and doc['description'] != '':
print "saving" print "saving"
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved " + dochash print "already saved " + dochash
   
elif len(row.find_all('th')) is self.getColumnCount(): elif len(row.find_all('th')) is self.getColumnCount():
print "header row" print "header row"
   
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row
   
for f in scrapers/*.py; rm /tmp/disclosr-error
do echo "Processing $f file.."; for f in scrapers/*.py; do
python $f; echo "Processing $f file..";
  python $f 2>/tmp/disclosr-error;
if [ "$?" -ne "0" ]; then if [ "$?" -ne "0" ]; then
echo "error"; echo "error";
sleep 2; sleep 2;
fi fi
done done
  if [ -s /tmp/disclosr-error ] ; then
  mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
  fi