tool to check datagov resources
Former-commit-id: f406384c3ba09ba04f639abb5731511ddf02b88b
--- a/admin/refreshDesignDoc.php
+++ b/admin/refreshDesignDoc.php
@@ -42,6 +42,7 @@
$obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}";
$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}";
+$obj->views->datasetGroups->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n doc.metadata[\"data.gov.au Category\"] && doc.metadata[\"data.gov.au Category\"].forEach(function(tag) {\n emit(tag, doc.url); \n });\n}\n}";
$obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}";
$docdb->save($obj, true);
--- /dev/null
+++ b/disclosr.iml
@@ -1,1 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="WEB_MODULE" version="4">
+ <component name="FacetManager">
+ <facet type="Python" name="Python">
+ <configuration sdkName="" />
+ </facet>
+ </component>
+ <component name="NewModuleRootManager" inherit-compiler-output="true">
+ <exclude-output />
+ <content url="file://$MODULE_DIR$" />
+ <orderEntry type="inheritedJdk" />
+ <orderEntry type="sourceFolder" forTests="false" />
+ </component>
+</module>
+
--- /dev/null
+++ b/documents/datagov-export-groups.py
@@ -1,1 +1,81 @@
+import ckanclient
+import couchdb
+from ckanclient import CkanApiError
+import re
+
+class LoaderError(Exception):
+ pass
+
+# Instantiate the CKAN client.
+#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+ api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
+# https://github.com/okfn/ckanext-importlib
+def munge(name):
+ # convert spaces to underscores
+ name = re.sub(' ', '_', name).lower()
+ # convert symbols to dashes
+ name = re.sub('[:]', '_-', name).lower()
+ name = re.sub('[/]', '-', name).lower()
+ # take out not-allowed characters
+ name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
+ # remove double underscores
+ name = re.sub('__', '_', name).lower()
+ return name
+
+
+def name_munge(input_name):
+ return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+
+
+docsdb = couch['disclosr-documents']
+
+if __name__ == "__main__":
+ groups = {}
+ for doc in docsdb.view('app/datasetGroups'):
+ group_name = doc.key
+ if group_name != "Not specified":
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ if group_name in groups.keys():
+ groups[group_name] = list(set(groups[group_name] + [pkg_name]))
+ else:
+ groups[group_name] = [pkg_name]
+
+ # add dataset to group(s)
+ for group_name in groups.keys():
+ if group_name != "Not specified":
+ group_url = name_munge(group_name[:100])
+ print group_name
+ print groups[group_name]
+ try:
+ # Update the group details
+ group_entity = ckan.group_entity_get(group_url)
+ print "group "+group_name+" exists"
+ if 'packages' in group_entity.keys():
+ group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
+ else:
+ group_entity['packages'] = groups[group_name]
+ ckan.group_entity_put(group_entity)
+ except CkanApiError, e:
+ if ckan.last_status == 404:
+ print "group "+group_name+" does not exist, creating"
+ group_entity = {
+ 'name': group_url,
+ 'title': group_name,
+ 'description': group_name,
+ 'packages': groups[group_name]
+ }
+ #print group_entity
+ ckan.group_register_post(group_entity)
+ elif ckan.last_status == 409:
+ print "group already exists"
+ else:
+ raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+ ckan.last_status, pkg_name, e.args))
+
--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -3,17 +3,45 @@
from ckanclient import CkanApiError
import re
import html2text # aaronsw :(
+import ckanapi # https://github.com/open-data/ckanapi
class LoaderError(Exception):
pass
# Instantiate the CKAN client.
-#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
- api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
+api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
+server = 'data.disclosurelo.gs'
+
+ckan = ckanclient.CkanClient(base_location='http://'+server+'api',
+ api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://'+server, api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
+import urllib
+import urlparse
+
+def url_fix(s, charset='utf-8'):
+ """Sometimes you get an URL by a user that just isn't a real
+ URL because it contains unsafe characters like ' ' and so on. This
+ function can fix some of the problems in a similar way browsers
+ handle data entered by the user:
+
+ >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
+ 'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
+
+ :param charset: The target charset for the URL if the url was
+ given as unicode string.
+ """
+ if isinstance(s, unicode):
+ s = s.encode(charset, 'ignore')
+ if not urlparse.urlparse(s).scheme:
+ s = "http://"+s
+ scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
+ path = urllib.quote(path, '/%')
+ qs = urllib.quote_plus(qs, ':&=')
+ return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = {
@@ -24,6 +52,7 @@
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
+
def human2bytes(s):
"""
@@ -53,6 +82,9 @@
...
ValueError: can't interpret '12 foo'
"""
+ if s == None:
+ return 0
+ s = s.replace(',', '')
init = s
num = ""
while s and s[0:1].isdigit() or s[0:1] == '.':
@@ -91,7 +123,6 @@
def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
- #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
def get_licence_id(licencename):
@@ -112,82 +143,106 @@
raise Exception(licencename + " not found");
return map[licencename];
+
docsdb = couch['disclosr-documents']
if __name__ == "__main__":
+ orgs_list = []
+ orgs_ids = {}
for doc in docsdb.view('app/datasets'):
+ print " --- "
print doc.id
- if doc.value['url'] != "http://data.gov.au/data/":
+
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+
+
# Collect the package metadata.
- pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ print pkg_name
+ #add to or create organization using direct API
+ agency = doc.value['metadata']["Agency"]
+ if agency == "APS":
+ agency = "Australian Public Service Commission"
+ if agency == "Shared Services, Treasury Directorate":
+ agency = "Shared Services Procurement, Treasury Directorate"
+ if agency == "Treasury - Shared Services":
+ agency = "Shared Services Procurement, Treasury Directorate"
+ if agency == "Territory and Municipal Services (TAMS)":
+ agency = "Territory and Municipal Services Directorate"
+ if agency == "State Library of NSW":
+ agency = "State Library of New South Wales"
+ org_name = name_munge(agency[:100])
+ if org_name not in orgs_list:
+ orgs_list = ckandirect.action.organization_list()['result']
+ #print orgs_list
+ if org_name not in orgs_list:
+ try:
+ print "org not found, creating " + org_name
+ ckandirect.action.organization_create(name=org_name, title=agency,
+ description=agency)
+ orgs_list.append(org_name)
+ except ckanapi.ValidationError, e:
+ print e
+ raise LoaderError('Unexpected status')
+ else:
+ print "org found, adding dataset to " + org_name
+
+ # cache org names -> id mapping
+ if org_name not in orgs_ids:
+ org = ckandirect.action.organization_show(id=org_name)
+ orgs_ids[org_name] = org["result"]["id"]
+ org_id = orgs_ids[org_name]
+ print "org id is "+org_id
tags = []
- if len(doc.value['metadata']["Keywords / Tags"]) > 0:
- if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
- tags = tags + doc.value['metadata']["Keywords / Tags"]
- else:
- tags = tags + [doc.value['metadata']["Keywords / Tags"]]
- if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
- if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
- tags = tags + doc.value['metadata']['data.gov.au Category']
- else:
- tags = tags + [doc.value['metadata']['data.gov.au Category']]
- tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
- print tags
- package_entity = {
- 'name': pkg_name,
- 'title': doc.value['metadata']['DCTERMS.Title'],
- 'url': doc.value['metadata']['DCTERMS.Source.URI'],
- 'tags': tags, #tags are mandatory?
- 'author': doc.value['metadata']["DCTERMS.Creator"],
- 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
- 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
- 'notes': html2text.html2text(doc.value['metadata']['Description']),
- }
+ creator = doc.value['metadata']["DCTERMS.Creator"]
+ if doc.value['agencyID'] == "AGIMO":
+ if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+ if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+ tags = tags + doc.value['metadata']["Keywords / Tags"]
+ else:
+ tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+
+ tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
+ #print tags
+ extras = []
+
+ for extra_key in doc.value['metadata'].keys():
+ if extra_key not in ["Description","Content-Language","DCTERMS.Description", "Keywords / Tags" ,"data.gov.au Category", "Download", "Permalink","DCTERMS.Identifier"]:
+ if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "":
+ extras.append([extra_key, doc.value['metadata'][extra_key]])
+
+ package_entity = {
+ 'name': pkg_name,
+ 'title': doc.value['metadata']['DCTERMS.Title'],
+ 'url': doc.value['metadata']['DCTERMS.Source.URI'],
+ 'tags': tags, #tags are mandatory?
+ 'author': creator,
+ 'maintainer': creator,
+ 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+ 'notes': html2text.html2text(doc.value['metadata']['Description']),
+ 'owner_org': org_id,
+ 'extras': extras
+ }
+
try:
- print package_entity
+ #print package_entity
ckan.package_register_post(package_entity)
except CkanApiError, e:
- if ckan.last_status == 409:
+ if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
print "package already exists"
else:
+ print ckan.last_message
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args))
-
-
- #add to group
-
- group_name = name_munge(doc.value['metadata']["Agency"][:100])
- try:
- print ckan.group_entity_get(group_name)
-
- # Update the group details
- group_entity = ckan.last_message
- print "group exists"
- if 'packages' in group_entity.keys():
- group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
- else:
- group_entity['packages'] = [pkg_name]
- ckan.group_entity_put(group_entity)
- except CkanApiError, e:
- if ckan.last_status == 404:
- print "group does not exist, creating"
- group_entity = {
- 'name': group_name,
- 'title': doc.value['metadata']["Agency"],
- 'description': doc.value['metadata']["Agency"],
- 'packages': [pkg_name],
- # 'type': "organization" # not allowed via API, use database query
- # update "group" set type = 'organization';
- }
- print group_entity
- ckan.group_register_post(group_entity)
- else:
- raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
- ckan.last_status, pkg_name, e.args))
+ pkg = ckan.package_entity_get(pkg_name)
+
+
+ # add resources (downloadable data files)
if 'Download' in doc.value['metadata'].keys():
try:
- pkg = ckan.package_entity_get(pkg_name)
+
resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']:
@@ -199,12 +254,17 @@
format = 'xml'
if resource['format'] == '(CSV/XLS)':
format = 'csv'
+ if resource['format'] == '(Shapefile)':
+ format = 'shp'
+ if resource['format'] == '(KML/KMZ)':
+ format = 'kml'
name = resource['href']
if 'name' in resource.keys():
name = resource['name']
print resource
- ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
- format=format, size=human2bytes(resource['size'].replace(',', '')))
+ ckan.add_package_resource(pkg_name, url_fix(resource['href']), name=name, resource_type='data',
+ format=format,
+ size=human2bytes(resource.get('size','0B')))
else:
print "resources already exist"
except CkanApiError, e:
--- /dev/null
+++ b/documents/datagov-merge.php
@@ -1,1 +1,26 @@
+<?php
+include_once("../include/common.inc.php");
+
+
+setlocale(LC_CTYPE, 'C');
+
+$db = $server->get_db('disclosr-documents');
+$datasets = Array();
+try {
+ $rows = $db->get_view("app", "datasets", null, true)->rows;
+
+ foreach ($rows as $row) {
+ //print_r($row);
+ if ($row->value->url != "http://data.gov.au/data/")
+ $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+ksort($datasets);
+foreach ($datasets as $datasetname => $datasetkey) {
+ print "$datasetname => $datasetkey<br>\n";
+}
+?>
+
--- /dev/null
+++ b/documents/datagov-resourcereport.py
@@ -1,1 +1,79 @@
+import couchdb
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+import urllib
+import urlparse
+import httplib2
+import csv
+import ssl
+
+context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+context.verify_mode = ssl.CERT_NONE
+
+def url_fix(s, charset='utf-8'):
+ """Sometimes you get an URL by a user that just isn't a real
+ URL because it contains unsafe characters like ' ' and so on. This
+ function can fix some of the problems in a similar way browsers
+ handle data entered by the user:
+
+ :param charset: The target charset for the URL if the url was
+ given as unicode string.
+ """
+ if isinstance(s, unicode):
+ s = s.encode(charset, 'ignore')
+ if not urlparse.urlparse(s).scheme:
+ s = "http://"+s
+ scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
+ path = urllib.quote(path, '/%')
+ qs = urllib.quote_plus(qs, ':&=')
+ return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
+
+# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
+SYMBOLS = {
+ 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
+ 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
+ 'zetta', 'iotta'),
+ 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
+ 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
+ 'zebi', 'yobi'),
+}
+
+
+docsdb = couch['disclosr-documents']
+out = csv.writer(open("output.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
+if __name__ == "__main__":
+ for doc in docsdb.view('app/datasets'):
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+ # Collect the package metadata.
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ if 'Download' in doc.value['metadata'].keys() and len(doc.value['metadata']['Download']) > 0:
+ for resource in doc.value['metadata']['Download']:
+ # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
+ # (KML/KMZ) / (Shapefile) /(Other)
+ format = "plain"
+ if resource['format'] == '(XML)':
+ format = 'xml'
+ if resource['format'] == '(CSV/XLS)':
+ format = 'csv'
+ if resource['format'] == '(Shapefile)':
+ format = 'shp'
+ if resource['format'] == '(KML/KMZ)':
+ format = 'kml'
+ name = resource['href']
+ if 'name' in resource.keys():
+ name = resource['name']
+ if resource['href'].startswith("ftp"):
+ out.writerow([pkg_name, url_fix(resource['href']), name,format, "ftp", ""])
+ else:
+ try:
+ h = httplib2.Http(disable_ssl_certificate_validation=True)
+ resp = h.request(url_fix(resource['href']), 'HEAD')
+ content_type = resp[0]['content-type'] if 'content-type' in resp[0].keys() else ""
+ out.writerow([pkg_name, url_fix(resource['href']), name,format, resp[0]['status'], content_type])
+ except httplib2.ServerNotFoundError:
+ out.writerow([pkg_name, url_fix(resource['href']), name,format, "500","badurl"])
+ else:
+ out.writerow([pkg_name])
+
--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -39,7 +39,9 @@
link = item.find("a")
format = item.find(property="dc:format")
linkobj = {"href":link['href'].replace("/bye?","").strip(),
- "format": format.string.strip(), "size": format.next_sibling.string.strip()}
+ "format": format.string.strip()}
+ if format.next_sibling.string != None:
+ linkobj["size"] = format.next_sibling.string.strip()
if link.string != None:
linkobj["name"] = link.string.strip()
doc['metadata'][last_title].append(linkobj)
--- /dev/null
+++ b/documents/dataqld.py
@@ -1,1 +1,28 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
+from unidecode import unidecode
+import ckanclient
+
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
+
+# Get the package list.
+package_list = ckan.package_register_get()
+for package_name in package_list:
+# Get the details of a package.
+ (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
+ hash = scrape.mkhash(scrape.canonurl(url))
+ print hash
+ doc = scrape.docsdb.get(hash)
+ if "metadata" not in doc.keys() or True:
+ ckan.package_entity_get(package_name)
+ package_entity = ckan.last_message
+ doc['type'] = "dataset"
+ doc['metadata'] = package_entity
+ print package_entity
+ scrape.docsdb.save(doc)
+
--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -5,20 +5,53 @@
from unidecode import unidecode
-listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960"
-(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
- listurl, "gazette", "AGD")
-soup = BeautifulSoup(listhtml)
-for row in soup.find_all('tr'):
- if row.has_key('valign'):
- for col in tr.find_all('td'):
- print col.string
- #url = scrape.fullurl(listurl, atag['href'])
- #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
- # url, "data", "AGIMO")
- #hash = scrape.mkhash(scrape.canonurl(url))
- #doc = scrape.docsdb.get(hash)
- #print doc['metadata']
- #scrape.docsdb.save(doc)
- #time.sleep(2)
+items = 3950
+items = 1
+while True:
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
+ (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+ listurl, "gazette", "AGD", False)
+ for line in listhtml.split('\n'):
+ soup = BeautifulSoup(line)
+ #print line
+ for row in soup.find_all('tr'):
+ print line
+ if row.has_key('valign'):
+ i = 0
+ date = ""
+ id = ""
+ type = ""
+ description = ""
+ name = ""
+ url = ""
+ for col in soup.find_all('td'):
+ #print ''.join(col.stripped_strings)
+ if i == 0:
+ date = ''.join(col.stripped_strings)
+ if i == 1:
+ id = ''.join(col.stripped_strings)
+ if i == 2:
+ type = ''.join(col.stripped_strings)
+ if i == 3:
+ description = ''.join(col.stripped_strings)
+ for link in col.findAll('a'):
+ if link.has_key("href"):
+ url = link['href']
+ name = ''.join(link.stripped_strings)
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ print [date, id, type, description, name, url]
+ itemurl = scrape.fullurl(listurl, url)
+ (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ itemurl, "gazette", "AGD", False)
+ hash = scrape.mkhash(scrape.canonurl(itemurl))
+ doc = scrape.docsdb.get(hash)
+ doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
+ scrape.docsdb.save(doc)
+ #time.sleep(2)
+ i = i + 1;
+ items = items - 25
+ if items <= 0:
+ break
+
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -72,7 +72,8 @@
edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash,
- "date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description), "diff": diff}
+ "date": edate, "title": "Disclosure Log Updated",
+ "description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
foidocsdb.save(doc)
else:
print "already saved"
@@ -199,11 +200,16 @@
return table.find_all('tr')
def getDate(self, content, entry, doc):
- date = ''.join(content.stripped_strings).strip()
- (a, b, c) = date.partition("(")
- date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
- print date
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ strdate = ''.join(content.stripped_strings).strip()
+ (a, b, c) = strdate.partition("(")
+ strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
+ print strdate
+ try:
+ edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ except ValueError:
+ print >> sys.stderr, "ERROR date invalid %s " % strdate
+ print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip()
+ edate = date.today().strftime("%Y-%m-%d")
print edate
doc.update({'date': edate})
return
@@ -266,8 +272,7 @@
'Summary of FOIrequest received by agency/minister',
'Summary of FOI request received', 'Description of FOI Request',
"FOI request", 'Results 1 to 67 of 67']
- if doc['title'] not in badtitles\
- and doc['description'] != '':
+ if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '':
print "saving"
foidocsdb.save(doc)
else:
@@ -277,6 +282,6 @@
print "header row"
else:
- print "ERROR number of columns incorrect"
+ print >> sys.stderr, "ERROR number of columns incorrect"
print row
--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,10 +1,22 @@
-for f in scrapers/*.py;
- do echo "Processing $f file..";
- python $f;
+DIR="$( cd "$( dirname &