<?php | <?php |
require_once '../include/common.inc.php'; | require_once '../include/common.inc.php'; |
//function createFOIDocumentsDesignDoc() { | //function createFOIDocumentsDesignDoc() { |
$foidb = $server->get_db('disclosr-foidocuments'); | $foidb = $server->get_db('disclosr-foidocuments'); |
$obj = new stdClass(); | $obj = new stdClass(); |
$obj->_id = "_design/" . urlencode("app"); | $obj->_id = "_design/" . urlencode("app"); |
$obj->language = "javascript"; | $obj->language = "javascript"; |
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; | $obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; |
$obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };"; | $obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };"; |
$obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };"; | $obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };"; |
$obj->views->byDateMonthYear->reduce = "_count"; | $obj->views->byDateMonthYear->reduce = "_count"; |
$obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };"; | $obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };"; |
$obj->views->byAgencyID->reduce = "_count"; | $obj->views->byAgencyID->reduce = "_count"; |
$obj->views->fieldNames->map = ' | $obj->views->fieldNames->map = ' |
function(doc) { | function(doc) { |
for(var propName in doc) { | for(var propName in doc) { |
emit(propName, doc._id); | emit(propName, doc._id); |
} | } |
}'; | }'; |
$obj->views->fieldNames->reduce = 'function (key, values, rereduce) { | $obj->views->fieldNames->reduce = 'function (key, values, rereduce) { |
return values.length; | return values.length; |
}'; | }'; |
// allow safe updates (even if slightly slower due to extra: rev-detection check). | // allow safe updates (even if slightly slower due to extra: rev-detection check). |
$foidb->save($obj, true); | $foidb->save($obj, true); |
//function createDocumentsDesignDoc() { | //function createDocumentsDesignDoc() { |
$docdb = $server->get_db('disclosr-documents'); | $docdb = $server->get_db('disclosr-documents'); |
$obj = new stdClass(); | $obj = new stdClass(); |
$obj->_id = "_design/" . urlencode("app"); | $obj->_id = "_design/" . urlencode("app"); |
$obj->language = "javascript"; | $obj->language = "javascript"; |
$obj->views->web_server->map = "function(doc) {\n emit(doc.web_server, 1);\n}"; | $obj->views->web_server->map = "function(doc) {\n emit(doc.web_server, 1);\n}"; |
$obj->views->web_server->reduce = "_sum"; | $obj->views->web_server->reduce = "_sum"; |
$obj->views->byAgency->map = "function(doc) {\n emit(doc.agencyID, 1);\n}"; | $obj->views->byAgency->map = "function(doc) {\n emit(doc.agencyID, 1);\n}"; |
$obj->views->byAgency->reduce = "_sum"; | $obj->views->byAgency->reduce = "_sum"; |
$obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}"; | $obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}"; |
$obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}"; | $obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}"; |
$obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}"; | $obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}"; |
$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}"; | |
$obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}"; | $obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}"; |
$docdb->save($obj, true); | $docdb->save($obj, true); |
//function createAgencyDesignDoc() { | //function createAgencyDesignDoc() { |
$db = $server->get_db('disclosr-agencies'); | $db = $server->get_db('disclosr-agencies'); |
$obj = new stdClass(); | $obj = new stdClass(); |
$obj->_id = "_design/" . urlencode("app"); | $obj->_id = "_design/" . urlencode("app"); |
$obj->language = "javascript"; | $obj->language = "javascript"; |
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; | $obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; |
$obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; | $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; |
$obj->views->byCanonicalName->map = "function(doc) { | $obj->views->byCanonicalName->map = "function(doc) { |
if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { | if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { |
emit(doc.name, doc); | emit(doc.name, doc); |
} | } |
};"; | };"; |
$obj->views->byDeptStateName->map = "function(doc) { | $obj->views->byDeptStateName->map = "function(doc) { |
if (doc.orgType == 'FMA-DepartmentOfState') { | if (doc.orgType == 'FMA-DepartmentOfState') { |
emit(doc.name, doc._id); | emit(doc.name, doc._id); |
} | } |
};"; | };"; |
$obj->views->parentOrgs->map = "function(doc) { | $obj->views->parentOrgs->map = "function(doc) { |
if (doc.parentOrg) { | if (doc.parentOrg) { |
emit(doc._id, doc.parentOrg); | emit(doc._id, doc.parentOrg); |
} | } |
};"; | };"; |
$obj->views->byName->map = 'function(doc) { | $obj->views->byName->map = 'function(doc) { |
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { | if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { |
emit(doc.name, doc._id); | emit(doc.name, doc._id); |
if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { | if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { |
emit(doc.shortName, doc._id); | emit(doc.shortName, doc._id); |
} | } |
for (name in doc.otherNames) { | for (name in doc.otherNames) { |
if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { | if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { |
emit(doc.otherNames[name], doc._id); | emit(doc.otherNames[name], doc._id); |
} | } |
} | } |
for (name in doc.foiBodies) { | for (name in doc.foiBodies) { |
if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { | if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { |
emit(doc.foiBodies[name], doc._id); | emit(doc.foiBodies[name], doc._id); |
} | } |
} | } |
for (name in doc.positions) { | for (name in doc.positions) { |
if (doc.positions[name] != "" && doc.positions[name] != doc.name) { | if (doc.positions[name] != "" && doc.positions[name] != doc.name) { |
emit(doc.positions[name], doc._id); | emit(doc.positions[name], doc._id); |
} | } |
} | } |
} | } |
};'; | };'; |
$obj->views->foiEmails->map = "function(doc) { | $obj->views->foiEmails->map = "function(doc) { |
emit(doc._id, doc.foiEmail); | emit(doc._id, doc.foiEmail); |
};"; | };"; |
$obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; | $obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; |
$obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; | $obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; |
$obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; | $obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; |
$obj->views->getScrapeRequired->map = "function(doc) { | $obj->views->getScrapeRequired->map = "function(doc) { |
var lastScrape = Date.parse(doc.metadata.lastScraped); | var lastScrape = Date.parse(doc.metadata.lastScraped); |
var today = new Date(); | var today = new Date(); |
if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { | if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { |
emit(doc._id, doc); | emit(doc._id, doc); |
} | } |
};"; | };"; |
$obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; | $obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; |
$obj->views->getConflicts->map = "function(doc) { | $obj->views->getConflicts->map = "function(doc) { |
if (doc._conflicts) { | if (doc._conflicts) { |
emit(null, [doc._rev].concat(doc._conflicts)); | emit(null, [doc._rev].concat(doc._conflicts)); |
} | } |
}"; | }"; |
$obj->views->getStatistics->map = | $obj->views->getStatistics->map = |
"function(doc) { | "function(doc) { |
if (doc.statistics) { | if (doc.statistics) { |
for (var statisticSet in doc.statistics) { | for (var statisticSet in doc.statistics) { |
for (var statisticPeriod in doc.statistics[statisticSet]) { | for (var statisticPeriod in doc.statistics[statisticSet]) { |
emit([statisticSet,statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']); | emit([statisticSet,statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']); |
} | } |
} | } |
} | } |
}"; | }"; |
$obj->views->getStatistics->reduce = '_sum'; | $obj->views->getStatistics->reduce = '_sum'; |
// http://stackoverflow.com/questions/646628/javascript-startswith | // http://stackoverflow.com/questions/646628/javascript-startswith |
$obj->views->score->map = 'if(!String.prototype.startsWith){ | $obj->views->score->map = 'if(!String.prototype.startsWith){ |
String.prototype.startsWith = function (str) { | String.prototype.startsWith = function (str) { |
return !this.indexOf(str); | return !this.indexOf(str); |
} | } |
} | } |
function(doc) { | function(doc) { |
count = 0; | count = 0; |
if (doc["status"] != "suspended") { | if (doc["status"] != "suspended") { |
for(var propName in doc) { | for(var propName in doc) { |
if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { | if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { |
count++; | count++; |
} | } |
} | } |
portfolio = doc.parentOrg; | portfolio = doc.parentOrg; |
if (doc.orgType == "FMA-DepartmentOfState") { | if (doc.orgType == "FMA-DepartmentOfState") { |
portfolio = doc._id; | portfolio = doc._id; |
} | } |
if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { | if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { |
portfolio = doc.orgType; | portfolio = doc.orgType; |
} | } |
emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); | emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); |
} | } |
}'; | }'; |
$obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ | $obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ |
String.prototype.startsWith = function (str) { | String.prototype.startsWith = function (str) { |
return !this.indexOf(str); | return !this.indexOf(str); |
} | } |
} | } |
if(!String.prototype.endsWith){ | if(!String.prototype.endsWith){ |
String.prototype.endsWith = function(suffix) { | String.prototype.endsWith = function(suffix) { |
return this.indexOf(suffix, this.length - suffix.length) !== -1; | return this.indexOf(suffix, this.length - suffix.length) !== -1; |
}; | }; |
} | } |
function(doc) { | function(doc) { |
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { | if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { |
for(var propName in doc) { | for(var propName in doc) { |
if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { | if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { |
emit(propName, 1); | emit(propName, 1); |
} | } |
} | } |
emit("total", 1); | emit("total", 1); |
} | } |
}'; | }'; |
$obj->views->scoreHas->reduce = '_sum'; | $obj->views->scoreHas->reduce = '_sum'; |
$obj->views->fieldNames->map = ' | $obj->views->fieldNames->map = ' |
function(doc) { | function(doc) { |
for(var propName in doc) { | for(var propName in doc) { |
emit(propName, doc._id); | emit(propName, doc._id); |
} | } |
}'; | }'; |
$obj->views->fieldNames->reduce = '_count'; | $obj->views->fieldNames->reduce = '_count'; |
// allow safe updates (even if slightly slower due to extra: rev-detection check). | // allow safe updates (even if slightly slower due to extra: rev-detection check). |
$db->save($obj, true); | $db->save($obj, true); |
?> | ?> |
import ckanclient | |
import couchdb | |
from ckanclient import CkanApiError | |
import re | |
import html2text # aaronsw :( | |
class LoaderError(Exception): | |
pass | |
# Instantiate the CKAN client. | |
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') | |
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', | |
api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52') | |
#couch = couchdb.Server('http://127.0.0.1:5984/') | |
couch = couchdb.Server('http://192.168.1.113:5984/') | |
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ | |
SYMBOLS = { | |
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), | |
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', | |
'zetta', 'iotta'), | |
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), | |
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', | |
'zebi', 'yobi'), | |
} | |
def human2bytes(s): | |
""" | |
Attempts to guess the string format based on default symbols | |
set and return the corresponding bytes as an integer. | |
When unable to recognize the format ValueError is raised. | |
>>> human2bytes('0 B') | |
0 | |
>>> human2bytes('1 K') | |
1024 | |
>>> human2bytes('1 M') | |
1048576 | |
>>> human2bytes('1 Gi') | |
1073741824 | |
>>> human2bytes('1 tera') | |
1099511627776 | |
>>> human2bytes('0.5kilo') | |
512 | |
>>> human2bytes('0.1 byte') | |
0 | |
>>> human2bytes('1 k') # k is an alias for K | |
1024 | |
>>> human2bytes('12 foo') | |
Traceback (most recent call last): | |
... | |
ValueError: can't interpret '12 foo' | |
""" | |
init = s | |
num = "" | |
while s and s[0:1].isdigit() or s[0:1] == '.': | |
num += s[0] | |
s = s[1:] | |
num = float(num) | |
letter = s.strip() | |
for name, sset in SYMBOLS.items(): | |
if letter in sset: | |
break | |
else: | |
if letter == 'k': | |
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs | |
sset = SYMBOLS['customary'] | |
letter = letter.upper() | |
else: | |
raise ValueError("can't interpret %r" % init) | |
prefix = {sset[0]: 1} | |
for i, s in enumerate(sset[1:]): | |
prefix[s] = 1 << (i + 1) * 10 | |
return int(num * prefix[letter]) | |
# https://github.com/okfn/ckanext-importlib | |
def munge(name): | |
# convert spaces to underscores | |
name = re.sub(' ', '_', name).lower() | |
# convert symbols to dashes | |
name = re.sub('[:]', '_-', name).lower() | |
name = re.sub('[/]', '-', name).lower() | |
# take out not-allowed characters | |
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() | |
# remove double underscores | |
name = re.sub('__', '_', name).lower() | |
return name | |
def name_munge(input_name): | |
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) | |
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') | |
def get_licence_id(licencename): | |
map = { | |
"Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', | |
"CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', | |
'Otherpleasespecify': 'notspecified', | |
'': 'notspecified', | |
"Publicly available data": 'notspecified', | |
"CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", | |
"CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", | |
'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', | |
"Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', | |
'CreativeCommonsAttributionCCBY25': 'cc-by', | |
"PublicDomain": 'other-pd', | |
} | |
if licencename not in map.keys(): | |
raise Exception(licencename + " not found"); | |
return map[licencename]; | |
docsdb = couch['disclosr-documents'] | |
if __name__ == "__main__": | |
for doc in docsdb.view('app/datasets'): | |
print doc.id | |
if doc.value['url'] != "http://data.gov.au/data/": | |
# Collect the package metadata. | |
pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); _ | |
tags = [] | |
if len(doc.value['metadata']["Keywords / Tags"]) > 0: | |
if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): | |
tags = tags + doc.value['metadata']["Keywords / Tags"] | |
else: | |
tags = tags + [doc.value['metadata']["Keywords / Tags"]] | |
if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: | |
if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): | |
tags = tags + doc.value['metadata']['data.gov.au Category'] | |
else: | |
tags = tags + [doc.value['metadata']['data.gov.au Category']] | |
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] | |
print tags | |
package_entity = { | |
'name': pkg_name, | |
'title': doc.value['metadata']['DCTERMS.Title'], | |
'url': doc.value['metadata']['DCTERMS.Source.URI'], | |
'tags': tags, #tags are mandatory? | |
'author': doc.value['metadata']["DCTERMS.Creator"], | |
'maintainer': doc.value['metadata']["DCTERMS.Creator"], | |
'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), | |
'notes': html2text.html2text(doc.value['metadata']['Description']), | |
} | |
try: | |
print package_entity | |
ckan.package_register_post(package_entity) | |
except CkanApiError, e: | |
if ckan.last_status == 409: | |
print "package already exists" | |
else: | |
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( | |
ckan.last_status, pkg_name, e.args)) | |
#add to group | |
group_name = name_munge(doc.value['metadata']["Agency"][:100]) | |
try: | |
print ckan.group_entity_get(group_name) | |
# Update the group details | |
group_entity = ckan.last_message | |
print "group exists" | |
if 'packages' in group_entity.keys(): | |
group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) | |
else: | |
group_entity['packages'] = [pkg_name] | |
ckan.group_entity_put(group_entity) | |
except CkanApiError, e: | |
if ckan.last_status == 404: | |
print "group does not exist, creating" | |
group_entity = { | |
'name': group_name, | |
'title': doc.value['metadata']["Agency"], | |
'description': doc.value['metadata']["Agency"], | |
'packages': [pkg_name], | |
# 'type': "organization" # not allowed via API, use database query | |
# update "group" set type = 'organization'; | |
} | |
print group_entity | |
ckan.group_register_post(group_entity) | |
else: | |
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( | |
ckan.last_status, pkg_name, e.args)) | |
if 'Download' in doc.value['metadata'].keys(): | |
try: | |
pkg = ckan.package_entity_get(pkg_name) | |
resources = pkg.get('resources', []) | |
if len(resources) < len(doc.value['metadata']['Download']): | |
for resource in doc.value['metadata']['Download']: | |
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html | |
# (KML/KMZ) / (Shapefile) /(Other) | |
format = "plain" | |
if resource['format'] == '(XML)': | |
format = 'xml' | |
if resource['format'] == '(CSV/XLS)': | |
format = 'csv' | |
name = resource['href'] | |
if 'name' in resource.keys(): | |
name = resource['name'] | |
print resource | |
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', | |
format=format, size=human2bytes(resource['size'].replace(',', ''))) | |
else: | |
print "resources already exist" | |
except CkanApiError, e: | |
if ckan.last_status == 404: | |
print "parent dataset does not exist" | |
else: | |
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( | |
ckan.last_status, pkg_name, e.args)) | |
<?php | |
include_once("../include/common.inc.php"); | |
setlocale(LC_CTYPE, 'C'); | |
$db = $server->get_db('disclosr-documents'); | |
$datasets = Array(); | |
try { | |
$rows = $db->get_view("app", "datasets", null, true)->rows; | |
foreach ($rows as $row) { | |
//print_r($row); | |
if ($row->value->url != "http://data.gov.au/data/") | |
$datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id; | |
} | |
} catch (SetteeRestClientException $e) { | |
setteErrorHandler($e); | |
} | |
ksort($datasets); | |
foreach ($datasets as $datasetname => $datasetkey) { | |
print "$datasetname => $datasetkey<br>\n"; | |
} | |
?> | |
import sys, os | import sys, os |
import time | import time |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
from unidecode import unidecode | from unidecode import unidecode |
listurl = "http://data.gov.au/data/" | listurl = "http://data.gov.au/data/" |
(url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb, |
listurl, "data", "AGIMO") | listurl, "data", "AGIMO") |
soup = BeautifulSoup(datasetlisthtml) | soup = BeautifulSoup(datasetlisthtml) |
for atag in soup.find_all(class_='result-title'): | for atag in soup.find_all(class_='result-title'): |
if atag.has_key('href'): | if atag.has_key('href'): |
url = scrape.fullurl(listurl, atag['href']) | url = scrape.fullurl(listurl, atag['href']) |
(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, html) = scrape.fetchURL(scrape.docsdb, |
url, "data", "AGIMO") | url, "data", "AGIMO", False) |
hash = scrape.mkhash(scrape.canonurl(url)) | hash = scrape.mkhash(scrape.canonurl(url)) |
doc = scrape.docsdb.get(hash) | doc = scrape.docsdb.get(hash) |
if "metadata" not in doc.keys() or True: | if "metadata" not in doc.keys() or True: |
doc['type'] = "dataset" | doc['type'] = "dataset" |
doc['metadata'] = {} | doc['metadata'] = {} |
soup = BeautifulSoup(html) | soup = BeautifulSoup(html) |
for metatag in soup.find_all('meta'): | for metatag in soup.find_all('meta'): |
if metatag.has_key('name'): | if metatag.has_key('name'): |
doc['metadata'][metatag['name']] = metatag['content'] | doc['metadata'][metatag['name']] = metatag['content'] |
for list in soup.find_all('dl'): | for list in soup.find_all('dl'): |
last_title = "" | last_title = "" |
for child in list.children: | for child in list.children: |
if str(type(child)) != "<class 'bs4.element.NavigableString'>": | if str(type(child)) != "<class 'bs4.element.NavigableString'>": |
if child.name == 'dt' and child.string != None: | if child.name == 'dt' and child.string != None: |
last_title = child.string.strip() | last_title = child.string.strip() |
if child.name == 'dd': | if child.name == 'dd': |
#print last_title | #print last_title |
if last_title == "Description": | if last_title == "Description": |
doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore') | doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore') |
elif last_title == "Download": | elif last_title == "Download": |
doc['metadata'][last_title] = {} | doc['metadata'][last_title] = [] |
for item in child.find_all("li"): | for item in child.find_all("li"): |
link = item.find("a") | link = item.find("a") |
format = item.find(property="dc:format") | format = item.find(property="dc:format") |
linkobj = {"href":link['href'].replace("/bye?","").strip(), | linkobj = {"href":link['href'].replace("/bye?","").strip(), |
"format": format.string.strip(), "size": format.next_sibling.string.strip()} | "format": format.string.strip(), "size": format.next_sibling.string.strip()} |
if link.string != None: | if link.string != None: |
linkobj["name"] = link.string.strip() | linkobj["name"] = link.string.strip() |
doc['metadata'][last_title][] = linkobj | doc['metadata'][last_title].append(linkobj) |
else: | else: |
atags = child.find_all('a') | atags = child.find_all('a') |
if len(atags) < 2: | if len(atags) < 2: |
[s.extract() for s in child(class_='viewAll')] | [s.extract() for s in child(class_='viewAll')] |
doc['metadata'][last_title] = ''.join(child.stripped_strings).strip() | doc['metadata'][last_title] = ''.join(child.stripped_strings).strip() |
else: | else: |
doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags] | doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags] |
print doc['metadata'] | print doc['metadata'] |
scrape.docsdb.save(doc) | scrape.docsdb.save(doc) |
#time.sleep(2) | #time.sleep(2) |
import sys, os | |
import time | |
import scrape | |
from bs4 import BeautifulSoup | |
from unidecode import unidecode | |
items = 3950 | |
items = 1 | |
while True: | |
print str(items) + " (" +str(items/25) +" screens to go)" | |
listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items) | |
(listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, | |
listurl, "gazette", "AGD", False) | |
for line in listhtml.split('\n'): | |
soup = BeautifulSoup(line) | |
#print line | |
for row in soup.find_all('tr'): | |
print line | |
if row.has_key('valign'): | |
i = 0 | |
date = "" | |
id = "" | |
type = "" | |
description = "" | |
name = "" | |
url = "" | |
for col in soup.find_all('td'): | |
#print ''.join(col.stripped_strings) | |
if i == 0: | |
date = ''.join(col.stripped_strings) | |
if i == 1: | |
id = ''.join(col.stripped_strings) | |
if i == 2: | |
type = ''.join(col.stripped_strings) | |
if i == 3: | |
description = ''.join(col.stripped_strings) | |
for link in col.findAll('a'): | |
if link.has_key("href"): | |
url = link['href'] | |
name = ''.join(link.stripped_strings) | |
print str(items) + " (" +str(items/25) +" screens to go)" | |
print [date, id, type, description, name, url] | |
itemurl = scrape.fullurl(listurl, url) | |
(itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb, | |
itemurl, "gazette", "AGD", False) | |
hash = scrape.mkhash(scrape.canonurl(itemurl)) | |
doc = scrape.docsdb.get(hash) | |
doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url} | |
scrape.docsdb.save(doc) | |
#time.sleep(2) | |
i = i + 1; | |
items = items - 25 | |
if items <= 0: | |
break | |
#http://packages.python.org/CouchDB/client.html | #http://packages.python.org/CouchDB/client.html |
import couchdb | import couchdb |
import urllib2 | import urllib2 |
from BeautifulSoup import BeautifulSoup | from BeautifulSoup import BeautifulSoup |
import re | import re |
import hashlib | import hashlib |
from urlparse import urljoin | from urlparse import urljoin |
import time | import time |
import os | import os |
import mimetypes | import mimetypes |
import urllib | import urllib |
import urlparse | import urlparse |
import socket | import socket |
#couch = couchdb.Server('http://192.168.1.148:5984/') | |
couch = couchdb.Server('http://192.168.1.113:5984/') | |
#couch = couchdb.Server('http://127.0.0.1:5984/') | |
def mkhash(input): | def mkhash(input): |
return hashlib.md5(input).hexdigest().encode("utf-8") | return hashlib.md5(input).hexdigest().encode("utf-8") |
def canonurl(url): | def canonurl(url): |
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' | r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' |
if the URL looks invalid. | if the URL looks invalid. |
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws | >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws |
'http://xn--hgi.ws/' | 'http://xn--hgi.ws/' |
""" | """ |
# strip spaces at the ends and ensure it's prefixed with 'scheme://' | # strip spaces at the ends and ensure it's prefixed with 'scheme://' |
url = url.strip() | url = url.strip() |
if not url: | if not url: |
return '' | return '' |
if not urlparse.urlsplit(url).scheme: | if not urlparse.urlsplit(url).scheme: |
url = 'http://' + url | url = 'http://' + url |
# turn it into Unicode | # turn it into Unicode |
#try: | #try: |
# url = unicode(url, 'utf-8') | # url = unicode(url, 'utf-8') |
#except UnicodeDecodeError: | #except UnicodeDecodeError: |
# return '' # bad UTF-8 chars in URL | # return '' # bad UTF-8 chars in URL |
# parse the URL into its components | # parse the URL into its components |
parsed = urlparse.urlsplit(url) | parsed = urlparse.urlsplit(url) |
scheme, netloc, path, query, fragment = parsed | scheme, netloc, path, query, fragment = parsed |
# ensure scheme is a letter followed by letters, digits, and '+-.' chars | # ensure scheme is a letter followed by letters, digits, and '+-.' chars |
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): | if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): |
return '' | return '' |
scheme = str(scheme) | scheme = str(scheme) |
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] | # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] |
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) | match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) |
if not match: | if not match: |
return '' | return '' |
domain, port = match.groups() | domain, port = match.groups() |
netloc = domain + (port if port else '') | netloc = domain + (port if port else '') |
netloc = netloc.encode('idna') | netloc = netloc.encode('idna') |
# ensure path is valid and convert Unicode chars to %-encoded | # ensure path is valid and convert Unicode chars to %-encoded |
if not path: | if not path: |
path = '/' # eg: 'http://google.com' -> 'http://google.com/' | path = '/' # eg: 'http://google.com' -> 'http://google.com/' |
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') | path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') |
# ensure query is valid | # ensure query is valid |
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') | query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') |
# ensure fragment is valid | # ensure fragment is valid |
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) | fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) |
# piece it all back together, truncating it to a maximum of 4KB | # piece it all back together, truncating it to a maximum of 4KB |
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) | url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) |
return url[:4096] | return url[:4096] |
def fullurl(url, href): | def fullurl(url, href): |
href = href.replace(" ", "%20") | href = href.replace(" ", "%20") |
href = re.sub('#.*$', '', href) | href = re.sub('#.*$', '', href) |
return urljoin(url, href) | return urljoin(url, href) |
#http://diveintopython.org/http_web_services/etags.html | #http://diveintopython.org/http_web_services/etags.html |
class NotModifiedHandler(urllib2.BaseHandler): | class NotModifiedHandler(urllib2.BaseHandler): |
def http_error_304(self, req, fp, code, message, headers): | def http_error_304(self, req, fp, code, message, headers): |
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) | addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
addinfourl.code = code | addinfourl.code = code |
return addinfourl | return addinfourl |
def getLastAttachment(docsdb, url): | def getLastAttachment(docsdb, url): |
hash = mkhash(url) | hash = mkhash(url) |
doc = docsdb.get(hash) | doc = docsdb.get(hash) |
if doc != None: | if doc != None: |
last_attachment_fname = doc["_attachments"].keys()[-1] | last_attachment_fname = doc["_attachments"].keys()[-1] |
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) | last_attachment = docsdb.get_attachment(doc, last_attachment_fname) |
return last_attachment | return last_attachment |
else: | else: |
return None | return None |
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): | def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): |
url = canonurl(url) | url = canonurl(url) |
hash = mkhash(url) | hash = mkhash(url) |
req = urllib2.Request(url) | req = urllib2.Request(url) |
print "Fetching %s (%s)" % (url, hash) | print "Fetching %s (%s)" % (url, hash) |
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": | if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": |
print "Not a valid HTTP url" | print "Not a valid HTTP url" |
return (None, None, None) | return (None, None, None) |
doc = docsdb.get(hash) | doc = docsdb.get(hash) |
if doc == None: | if doc == None: |
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'} | doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'} |
else: | else: |
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14): | if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)): |
print "Uh oh, trying to scrape URL again too soon!" + hash | print "Uh oh, trying to scrape URL again too soon!" + hash |
last_attachment_fname = doc["_attachments"].keys()[-1] | last_attachment_fname = doc["_attachments"].keys()[-1] |
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) | last_attachment = docsdb.get_attachment(doc, last_attachment_fname) |
content = last_attachment | content = last_attachment |
return (doc['url'], doc['mime_type'], content.read()) | |
if scrape_again == False: | |
print "Not scraping this URL again as requested" | |
return (doc['url'], doc['mime_type'], content.read()) | return (doc['url'], doc['mime_type'], content.read()) |
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") | req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") |
#if there is a previous version stored in couchdb, load caching helper tags | #if there is a previous version stored in couchdb, load caching helper tags |
if doc.has_key('etag'): | if doc.has_key('etag'): |
req.add_header("If-None-Match", doc['etag']) | req.add_header("If-None-Match", doc['etag']) |
if doc.has_key('last_modified'): | if doc.has_key('last_modified'): |
req.add_header("If-Modified-Since", doc['last_modified']) | req.add_header("If-Modified-Since", doc['last_modified']) |
opener = urllib2.build_opener(NotModifiedHandler()) | opener = urllib2.build_opener(NotModifiedHandler()) |
try: | try: |
url_handle = opener.open(req, None, 20) | url_handle = opener.open(req, None, 20) |
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url | doc['url'] = url_handle.geturl() # may have followed a redirect to a new url |
headers = url_handle.info() # the addinfourls have the .info() too | headers = url_handle.info() # the addinfourls have the .info() too |
doc['etag'] = headers.getheader("ETag") | doc['etag'] = headers.getheader("ETag") |
doc['last_modified'] = headers.getheader("Last-Modified") | doc['last_modified'] = headers.getheader("Last-Modified") |
doc['date'] = headers.getheader("Date") | doc['date'] = headers.getheader("Date") |
doc['page_scraped'] = time.time() | doc['page_scraped'] = time.time() |
doc['web_server'] = headers.getheader("Server") | doc['web_server'] = headers.getheader("Server") |
doc['via'] = headers.getheader("Via") | doc['via'] = headers.getheader("Via") |
doc['powered_by'] = headers.getheader("X-Powered-By") | doc['powered_by'] = headers.getheader("X-Powered-By") |
doc['file_size'] = headers.getheader("Content-Length") | doc['file_size'] = headers.getheader("Content-Length") |
content_type = headers.getheader("Content-Type") | content_type = headers.getheader("Content-Type") |
if content_type != None: | if content_type != None: |
doc['mime_type'] = content_type.split(";")[0] | doc['mime_type'] = content_type.split(";")[0] |
else: | else: |
(type, encoding) = mimetypes.guess_type(url) | (type, encoding) = mimetypes.guess_type(url) |
doc['mime_type'] = type | doc['mime_type'] = type |
if hasattr(url_handle, 'code'): | if hasattr(url_handle, 'code'): |
if url_handle.code == 304: | if url_handle.code == 304: |
print "the web page has not been modified" + hash | print "the web page has not been modified" + hash |
last_attachment_fname = doc["_attachments"].keys()[-1] | last_attachment_fname = doc["_attachments"].keys()[-1] |
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) | last_attachment = docsdb.get_attachment(doc, last_attachment_fname) |
content = last_attachment | content = last_attachment |
return (doc['url'], doc['mime_type'], content.read()) | return (doc['url'], doc['mime_type'], content.read()) |
else: | else: |
print "new webpage loaded" | print "new webpage loaded" |
content = url_handle.read() | content = url_handle.read() |
docsdb.save(doc) | docsdb.save(doc) |
doc = docsdb.get(hash) # need to get a _rev | doc = docsdb.get(hash) # need to get a _rev |
docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type']) | docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type']) |
return (doc['url'], doc['mime_type'], content) | return (doc['url'], doc['mime_type'], content) |
#store as attachment epoch-filename | #store as attachment epoch-filename |
except (urllib2.URLError, socket.timeout) as e: | except (urllib2.URLError, socket.timeout) as e: |
print "error!" | print "error!" |
error = "" | error = "" |
if hasattr(e, 'reason'): | if hasattr(e, 'reason'): |
error = "error %s in downloading %s" % (str(e.reason), url) | error = "error %s in downloading %s" % (str(e.reason), url) |
elif hasattr(e, 'code'): | elif hasattr(e, 'code'): |
error = "error %s in downloading %s" % (e.code, url) | error = "error %s in downloading %s" % (e.code, url) |
print error | print error |
doc['error'] = error | doc['error'] = error |
docsdb.save(doc) | docsdb.save(doc) |
return (None, None, None) | return (None, None, None) |
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): | def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): |
(url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID) | (url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID) |
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] | badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] |
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": | if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
navIDs = soup.findAll( | navIDs = soup.findAll( |
id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) | id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) |
for nav in navIDs: | for nav in navIDs: |
print "Removing element", nav['id'] | print "Removing element", nav['id'] |
nav.extract() | nav.extract() |
navClasses = soup.findAll( | navClasses = soup.findAll( |
attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) | attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) |
for nav in navClasses: | for nav in navClasses: |
print "Removing element", nav['class'] | print "Removing element", nav['class'] |
nav.extract() | nav.extract() |
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) | links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) |
linkurls = set([]) | linkurls = set([]) |
for link in links: | for link in links: |
if link.has_key("href"): | if link.has_key("href"): |
if link['href'].startswith("http"): | if link['href'].startswith("http"): |
# lets not do external links for now | # lets not do external links for now |
# linkurls.add(link['href']) | # linkurls.add(link['href']) |
None | None |
if link['href'].startswith("mailto"): | if link['href'].startswith("mailto"): |
# not http | # not http |
None | None |
if link['href'].startswith("javascript"): | if link['href'].startswith("javascript"): |
# not http | # not http |
None | None |
else: | else: |
# remove anchors and spaces in urls | # remove anchors and spaces in urls |
linkurls.add(fullurl(url, link['href'])) | linkurls.add(fullurl(url, link['href'])) |
for linkurl in linkurls: | for linkurl in linkurls: |
#print linkurl | #print linkurl |
scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID) | scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID) |
#couch = couchdb.Server('http://192.168.1.148:5984/') | |
#couch = couchdb.Server('http://192.168.1.113:5984/') | |
couch = couchdb.Server('http://127.0.0.1:5984/') | |
# select database | # select database |
agencydb = couch['disclosr-agencies'] | agencydb = couch['disclosr-agencies'] |
docsdb = couch['disclosr-documents'] | docsdb = couch['disclosr-documents'] |
if __name__ == "__main__": | if __name__ == "__main__": |
for row in agencydb.view('app/all'): #not recently scraped agencies view? | for row in agencydb.view('app/all'): #not recently scraped agencies view? |
agency = agencydb.get(row.id) | agency = agencydb.get(row.id) |
print agency['name'] | print agency['name'] |
for key in agency.keys(): | for key in agency.keys(): |
if key == "FOIDocumentsURL" and "status" not in agency.keys() and False: | if key == "FOIDocumentsURL" and "status" not in agency.keys() and False: |
scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) | scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) |
if key == 'website' and True: | if key == 'website' and True: |
scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) | scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) |
if "metadata" not in agency.keys(): | if "metadata" not in agency.keys(): |
agency['metadata'] = {} | agency['metadata'] = {} |
agency['metadata']['lastScraped'] = time.time() | agency['metadata']['lastScraped'] = time.time() |
if key.endswith('URL') and False: | if key.endswith('URL') and False: |
print key | print key |
depth = 1 | depth = 1 |
if 'scrapeDepth' in agency.keys(): | if 'scrapeDepth' in agency.keys(): |
depth = agency['scrapeDepth'] | depth = agency['scrapeDepth'] |
scrapeAndStore(docsdb, agency[key], depth, key, agency['_id']) | scrapeAndStore(docsdb, agency[key], depth, key, agency['_id']) |
agencydb.save(agency) | agencydb.save(agency) |