export no-html and licence
export no-html and licence


Former-commit-id: 39dafe9fefec609588df4f189c2364dae8edd246

import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
# Instantiate the CKAN client. # Instantiate the CKAN client.
ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', api_key='72f90359-0396-438c-804f-a26a24336747')
# (use your own api_key from http://thedatahub.org/user/me ) #couch = couchdb.Server('http://127.0.0.1:5984/')
  couch = couchdb.Server('http://192.168.1.113:5984/')
   
  # http://stackoverflow.com/a/7778368/684978
  from HTMLParser import HTMLParser
  import htmlentitydefs
   
  class HTMLTextExtractor(HTMLParser):
  def __init__(self):
  HTMLParser.__init__(self)
  self.result = [ ]
   
  def handle_data(self, d):
  self.result.append(d)
   
  def handle_charref(self, number):
  codepoint = int(number[1:], 16) if number[0] in (u'x', u'X') else int(number)
  self.result.append(unichr(codepoint))
   
  def handle_entityref(self, name):
  codepoint = htmlentitydefs.name2codepoint[name]
  self.result.append(unichr(codepoint))
   
  def get_text(self):
  return u''.join(self.result)
   
  def html_to_text(html):
  s = HTMLTextExtractor()
  s.feed(html)
  return s.get_text()
   
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = { SYMBOLS = {
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'), 'zetta', 'iotta'),
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'), 'zebi', 'yobi'),
} }
   
def human2bytes(s): def human2bytes(s):
""" """
Attempts to guess the string format based on default symbols Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer. set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised. When unable to recognize the format ValueError is raised.
   
>>> human2bytes('0 B') >>> human2bytes('0 B')
0 0
>>> human2bytes('1 K') >>> human2bytes('1 K')
1024 1024
>>> human2bytes('1 M') >>> human2bytes('1 M')
1048576 1048576
>>> human2bytes('1 Gi') >>> human2bytes('1 Gi')
1073741824 1073741824
>>> human2bytes('1 tera') >>> human2bytes('1 tera')
1099511627776 1099511627776
   
>>> human2bytes('0.5kilo') >>> human2bytes('0.5kilo')
512 512
>>> human2bytes('0.1 byte') >>> human2bytes('0.1 byte')
0 0
>>> human2bytes('1 k') # k is an alias for K >>> human2bytes('1 k') # k is an alias for K
1024 1024
>>> human2bytes('12 foo') >>> human2bytes('12 foo')
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: can't interpret '12 foo' ValueError: can't interpret '12 foo'
""" """
init = s init = s
num = "" num = ""
while s and s[0:1].isdigit() or s[0:1] == '.': while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0] num += s[0]
s = s[1:] s = s[1:]
num = float(num) num = float(num)
letter = s.strip() letter = s.strip()
for name, sset in SYMBOLS.items(): for name, sset in SYMBOLS.items():
if letter in sset: if letter in sset:
break break
else: else:
if letter == 'k': if letter == 'k':
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
sset = SYMBOLS['customary'] sset = SYMBOLS['customary']
letter = letter.upper() letter = letter.upper()
else: else:
raise ValueError("can't interpret %r" % init) raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1} prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]): for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10 prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) return int(num * prefix[letter])
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
   
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
   
couch = couchdb.Server('http://127.0.0.1:5984/') def get_licence_id(licencename):
  map = {
  "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
  "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
  'Otherpleasespecify': 'notspecified',
  '': 'notspecified',
  "Publicly available data": 'notspecified',
  "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
  "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
  'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
  "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
  'CreativeCommonsAttributionCCBY25': 'cc-by',
  "PublicDomain": 'other-pd',
  }
  if licencename not in map.keys():
  raise Exception(licencename + " not found");
  return map[licencename];
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for doc in docsdb.view('app/datasets'): for doc in docsdb.view('app/datasets'):
print doc.id print doc.id
if doc.value['url'] != "http://data.gov.au/data/": if doc.value['url'] != "http://data.gov.au/data/":
# Collect the package metadata. # Collect the package metadata.
pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100]) pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
tags = doc.value['metadata']["Keywords / Tags"] tags = doc.value['metadata']["Keywords / Tags"]
if not hasattr(tags, '__iter__'): if not hasattr(tags, '__iter__'):
tags = [tags] tags = [tags]
[re.sub('[^a-zA-Z0-9-_]', '', tag).lower() for tag in tags] [re.sub('[^a-zA-Z0-9-_()]', '', tag).replace('&', 'and').lower() for tag in tags]
package_entity = { package_entity = {
'name': pkg_name, 'name': pkg_name,
'title': doc.value['metadata']['DCTERMS.Title'], 'title': doc.value['metadata']['DCTERMS.Title'],
'url': doc.value['metadata']['DCTERMS.Source.URI'], 'url': doc.value['metadata']['DCTERMS.Source.URI'],
'tags': tags,  
'author': doc.value['metadata']["DCTERMS.Creator"], 'author': doc.value['metadata']["DCTERMS.Creator"],
'maintainer': doc.value['metadata']["DCTERMS.Creator"], 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
'licence_id': doc.value['metadata']['DCTERMS.License'], #todo licence id mapping 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
'notes': doc.value['metadata']['Description'], 'notes': html_to_text(doc.value['metadata']['Description']),
} }
  if len(tags) > 0:
  package_entity['tags'] = tags
  print tags
try: try:
#print doc.id #print doc.id
ckan.package_register_post(package_entity) ckan.package_register_post(package_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 409: if ckan.last_status == 409:
print "already exists" print "already exists"
else: else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
print package_entity print package_entity
#todo add to organisation (author/creator/maintainer) #todo add to organisation (author/creator/maintainer) http://docs.ckan.org/en/latest/apiv3.html#examples ckan.logic.action.update.package_owner_org_update
#if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group #if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group
if 'Download' in doc.value['metadata'].keys(): if 'Download' in doc.value['metadata'].keys():
try: try:
pkg = ckan.package_entity_get(pkg_name) pkg = ckan.package_entity_get(pkg_name)
resources = pkg.get('resources', []) resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']): if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']: for resource in doc.value['metadata']['Download']:
#print resource print resource
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
# (KML/KMZ) / (Shapefile) /(Other) # (KML/KMZ) / (Shapefile) /(Other)
format = "plain" format = "plain"
if resource['format'] == '(XML)': if resource['format'] == '(XML)':
format = 'xml' format = 'xml'
if resource['format'] == '(CSV/XLS)': if resource['format'] == '(CSV/XLS)':
format = 'csv' format = 'csv'
name = resource['href'] name = resource['href']
if 'name' in resource.keys(): if 'name' in resource.keys():
name = resource['name'] name = resource['name']
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
format=format, size=human2bytes(resource['size'].replace(',', ''))) format=format, size=human2bytes(resource['size'].replace(',', '')))
else: else:
print "resources already exist" print "resources already exist"
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 404: if ckan.last_status == 404:
print "parent dataset does not exist" print "parent dataset does not exist"
else: else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
import sys, os import sys, os
import time import time
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
from unidecode import unidecode from unidecode import unidecode
   
listurl = "http://data.gov.au/data/" listurl = "http://data.gov.au/data/"
(url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb, (url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb,
listurl, "data", "AGIMO") listurl, "data", "AGIMO")
soup = BeautifulSoup(datasetlisthtml) soup = BeautifulSoup(datasetlisthtml)
for atag in soup.find_all(class_='result-title'): for atag in soup.find_all(class_='result-title'):
if atag.has_key('href'): if atag.has_key('href'):
url = scrape.fullurl(listurl, atag['href']) url = scrape.fullurl(listurl, atag['href'])
(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
url, "data", "AGIMO") url, "data", "AGIMO", False)
hash = scrape.mkhash(scrape.canonurl(url)) hash = scrape.mkhash(scrape.canonurl(url))
doc = scrape.docsdb.get(hash) doc = scrape.docsdb.get(hash)
if "metadata" not in doc.keys() or True: if "metadata" not in doc.keys() or True:
doc['type'] = "dataset" doc['type'] = "dataset"
doc['metadata'] = {} doc['metadata'] = {}
soup = BeautifulSoup(html) soup = BeautifulSoup(html)
for metatag in soup.find_all('meta'): for metatag in soup.find_all('meta'):
if metatag.has_key('name'): if metatag.has_key('name'):
doc['metadata'][metatag['name']] = metatag['content'] doc['metadata'][metatag['name']] = metatag['content']
for list in soup.find_all('dl'): for list in soup.find_all('dl'):
last_title = "" last_title = ""
for child in list.children: for child in list.children:
if str(type(child)) != "<class 'bs4.element.NavigableString'>": if str(type(child)) != "<class 'bs4.element.NavigableString'>":
if child.name == 'dt' and child.string != None: if child.name == 'dt' and child.string != None:
last_title = child.string.strip() last_title = child.string.strip()
if child.name == 'dd': if child.name == 'dd':
#print last_title #print last_title
if last_title == "Description": if last_title == "Description":
doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore') doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore')
elif last_title == "Download": elif last_title == "Download":
doc['metadata'][last_title] = [] doc['metadata'][last_title] = []
for item in child.find_all("li"): for item in child.find_all("li"):
link = item.find("a") link = item.find("a")
format = item.find(property="dc:format") format = item.find(property="dc:format")
linkobj = {"href":link['href'].replace("/bye?","").strip(), linkobj = {"href":link['href'].replace("/bye?","").strip(),
"format": format.string.strip(), "size": format.next_sibling.string.strip()} "format": format.string.strip(), "size": format.next_sibling.string.strip()}
if link.string != None: if link.string != None:
linkobj["name"] = link.string.strip() linkobj["name"] = link.string.strip()
doc['metadata'][last_title].append(linkobj) doc['metadata'][last_title].append(linkobj)
   
else: else:
atags = child.find_all('a') atags = child.find_all('a')
if len(atags) < 2: if len(atags) < 2:
[s.extract() for s in child(class_='viewAll')] [s.extract() for s in child(class_='viewAll')]
doc['metadata'][last_title] = ''.join(child.stripped_strings).strip() doc['metadata'][last_title] = ''.join(child.stripped_strings).strip()
else: else:
doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags] doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags]
print doc['metadata'] print doc['metadata']
scrape.docsdb.save(doc) scrape.docsdb.save(doc)
#time.sleep(2) #time.sleep(2)
   
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
import mimetypes import mimetypes
import urllib import urllib
import urlparse import urlparse
import socket import socket
   
  #couch = couchdb.Server('http://192.168.1.148:5984/')
  couch = couchdb.Server('http://192.168.1.113:5984/')
  #couch = couchdb.Server('http://127.0.0.1:5984/')
   
   
def mkhash(input): def mkhash(input):
return hashlib.md5(input).hexdigest().encode("utf-8") return hashlib.md5(input).hexdigest().encode("utf-8")
   
   
def canonurl(url): def canonurl(url):
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
if the URL looks invalid. if the URL looks invalid.
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
'http://xn--hgi.ws/' 'http://xn--hgi.ws/'
""" """
# strip spaces at the ends and ensure it's prefixed with 'scheme://' # strip spaces at the ends and ensure it's prefixed with 'scheme://'
url = url.strip() url = url.strip()
if not url: if not url:
return '' return ''
if not urlparse.urlsplit(url).scheme: if not urlparse.urlsplit(url).scheme:
url = 'http://' + url url = 'http://' + url
   
# turn it into Unicode # turn it into Unicode
#try: #try:
# url = unicode(url, 'utf-8') # url = unicode(url, 'utf-8')
#except UnicodeDecodeError: #except UnicodeDecodeError:
# return '' # bad UTF-8 chars in URL # return '' # bad UTF-8 chars in URL
   
# parse the URL into its components # parse the URL into its components
parsed = urlparse.urlsplit(url) parsed = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = parsed scheme, netloc, path, query, fragment = parsed
   
# ensure scheme is a letter followed by letters, digits, and '+-.' chars # ensure scheme is a letter followed by letters, digits, and '+-.' chars
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
return '' return ''
scheme = str(scheme) scheme = str(scheme)
   
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
if not match: if not match:
return '' return ''
domain, port = match.groups() domain, port = match.groups()
netloc = domain + (port if port else '') netloc = domain + (port if port else '')
netloc = netloc.encode('idna') netloc = netloc.encode('idna')
   
# ensure path is valid and convert Unicode chars to %-encoded # ensure path is valid and convert Unicode chars to %-encoded
if not path: if not path:
path = '/' # eg: 'http://google.com' -> 'http://google.com/' path = '/' # eg: 'http://google.com' -> 'http://google.com/'
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
   
# ensure query is valid # ensure query is valid
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
   
# ensure fragment is valid # ensure fragment is valid
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
   
# piece it all back together, truncating it to a maximum of 4KB # piece it all back together, truncating it to a maximum of 4KB
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
return url[:4096] return url[:4096]
   
   
def fullurl(url, href): def fullurl(url, href):
href = href.replace(" ", "%20") href = href.replace(" ", "%20")
href = re.sub('#.*$', '', href) href = re.sub('#.*$', '', href)
return urljoin(url, href) return urljoin(url, href)
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
   
def getLastAttachment(docsdb, url): def getLastAttachment(docsdb, url):
hash = mkhash(url) hash = mkhash(url)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc != None: if doc != None:
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
return last_attachment return last_attachment
else: else:
return None return None
   
   
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
url = canonurl(url) url = canonurl(url)
hash = mkhash(url) hash = mkhash(url)
req = urllib2.Request(url) req = urllib2.Request(url)
print "Fetching %s (%s)" % (url, hash) print "Fetching %s (%s)" % (url, hash)
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
print "Not a valid HTTP url" print "Not a valid HTTP url"
return (None, None, None) return (None, None, None)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc == None: if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'} doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'}
else: else:
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14): if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
print "Uh oh, trying to scrape URL again too soon!" + hash print "Uh oh, trying to scrape URL again too soon!" + hash
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
content = last_attachment content = last_attachment
return (doc['url'], doc['mime_type'], content.read())  
if scrape_again == False:  
print "Not scraping this URL again as requested"  
return (doc['url'], doc['mime_type'], content.read()) return (doc['url'], doc['mime_type'], content.read())
   
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if doc.has_key('etag'): if doc.has_key('etag'):
req.add_header("If-None-Match", doc['etag']) req.add_header("If-None-Match", doc['etag'])
if doc.has_key('last_modified'): if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", doc['last_modified']) req.add_header("If-Modified-Since", doc['last_modified'])
   
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
try: try:
url_handle = opener.open(req, None, 20) url_handle = opener.open(req, None, 20)
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
doc['etag'] = headers.getheader("ETag") doc['etag'] = headers.getheader("ETag")
doc['last_modified'] = headers.getheader("Last-Modified") doc['last_modified'] = headers.getheader("Last-Modified")
doc['date'] = headers.getheader("Date") doc['date'] = headers.getheader("Date")
doc['page_scraped'] = time.time() doc['page_scraped'] = time.time()
doc['web_server'] = headers.getheader("Server") doc['web_server'] = headers.getheader("Server")
doc['via'] = headers.getheader("Via") doc['via'] = headers.getheader("Via")
doc['powered_by'] = headers.getheader("X-Powered-By") doc['powered_by'] = headers.getheader("X-Powered-By")
doc['file_size'] = headers.getheader("Content-Length") doc['file_size'] = headers.getheader("Content-Length")
content_type = headers.getheader("Content-Type") content_type = headers.getheader("Content-Type")
if content_type != None: if content_type != None:
doc['mime_type'] = content_type.split(";")[0] doc['mime_type'] = content_type.split(";")[0]
else: else:
(type, encoding) = mimetypes.guess_type(url) (type, encoding) = mimetypes.guess_type(url)
doc['mime_type'] = type doc['mime_type'] = type
if hasattr(url_handle, 'code'): if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified" + hash print "the web page has not been modified" + hash
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
content = last_attachment content = last_attachment
return (doc['url'], doc['mime_type'], content.read()) return (doc['url'], doc['mime_type'], content.read())
else: else:
print "new webpage loaded" print "new webpage loaded"
content = url_handle.read() content = url_handle.read()
docsdb.save(doc) docsdb.save(doc)
doc = docsdb.get(hash) # need to get a _rev doc = docsdb.get(hash) # need to get a _rev
docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type']) docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type'])
return (doc['url'], doc['mime_type'], content) return (doc['url'], doc['mime_type'], content)
#store as attachment epoch-filename #store as attachment epoch-filename
   
except (urllib2.URLError, socket.timeout) as e: except (urllib2.URLError, socket.timeout) as e:
print "error!" print "error!"
error = "" error = ""
if hasattr(e, 'reason'): if hasattr(e, 'reason'):
error = "error %s in downloading %s" % (str(e.reason), url) error = "error %s in downloading %s" % (str(e.reason), url)
elif hasattr(e, 'code'): elif hasattr(e, 'code'):
error = "error %s in downloading %s" % (e.code, url) error = "error %s in downloading %s" % (e.code, url)
print error print error
doc['error'] = error doc['error'] = error
docsdb.save(doc) docsdb.save(doc)
return (None, None, None) return (None, None, None)
   
   
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
(url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID) (url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID)
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
navIDs = soup.findAll( navIDs = soup.findAll(
id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
for nav in navIDs: for nav in navIDs:
print "Removing element", nav['id'] print "Removing element", nav['id']
nav.extract() nav.extract()
navClasses = soup.findAll( navClasses = soup.findAll(
attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
for nav in navClasses: for nav in navClasses:
print "Removing element", nav['class'] print "Removing element", nav['class']
nav.extract() nav.extract()
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
linkurls = set([]) linkurls = set([])
for link in links: for link in links:
if link.has_key("href"): if link.has_key("href"):
if link['href'].startswith("http"): if link['href'].startswith("http"):
# lets not do external links for now # lets not do external links for now
# linkurls.add(link['href']) # linkurls.add(link['href'])
None None
if link['href'].startswith("mailto"): if link['href'].startswith("mailto"):
# not http # not http
None None
if link['href'].startswith("javascript"): if link['href'].startswith("javascript"):
# not http # not http
None None
else: else:
# remove anchors and spaces in urls # remove anchors and spaces in urls
linkurls.add(fullurl(url, link['href'])) linkurls.add(fullurl(url, link['href']))
for linkurl in linkurls: for linkurl in linkurls:
#print linkurl #print linkurl
scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID) scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID)
   
#couch = couchdb.Server('http://192.168.1.148:5984/')  
#couch = couchdb.Server('http://192.168.1.113:5984/')  
couch = couchdb.Server('http://127.0.0.1:5984/')  
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for row in agencydb.view('app/all'): #not recently scraped agencies view? for row in agencydb.view('app/all'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
for key in agency.keys(): for key in agency.keys():
if key == "FOIDocumentsURL" and "status" not in agency.keys() and False: if key == "FOIDocumentsURL" and "status" not in agency.keys() and False:
scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) scrapeAndStore(docsdb, agency[key], 0, key, agency['_id'])
if key == 'website' and True: if key == 'website' and True:
scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) scrapeAndStore(docsdb, agency[key], 0, key, agency['_id'])
if "metadata" not in agency.keys(): if "metadata" not in agency.keys():
agency['metadata'] = {} agency['metadata'] = {}
agency['metadata']['lastScraped'] = time.time() agency['metadata']['lastScraped'] = time.time()
if key.endswith('URL') and False: if key.endswith('URL') and False:
print key print key
depth = 1 depth = 1
if 'scrapeDepth' in agency.keys(): if 'scrapeDepth' in agency.keys():
depth = agency['scrapeDepth'] depth = agency['scrapeDepth']
scrapeAndStore(docsdb, agency[key], depth, key, agency['_id']) scrapeAndStore(docsdb, agency[key], depth, key, agency['_id'])
agencydb.save(agency) agencydb.save(agency)