Handling of minister/secretary names in FOI export
Former-commit-id: 4d6a601bd2ae9012300836e1ddf12bc147981e10
--- a/alaveteli/exportAgencies.csv.php
+++ b/alaveteli/exportAgencies.csv.php
@@ -1,6 +1,21 @@
<?php
include_once("../include/common.inc.php");
+
+function shortName($name) {
+ $name = trim($name);
+ if (strstr($name,"Minister ") || strstr($name,"Treasurer") || strstr($name,"Parliamentary Secretary")) {
+ $badWords = Array ("Assisting the Prime Minister on","Assisting on"," the "," of "," for "," on "," and "," to ",","," ","'","`");
+ return str_replace($badWords,"",$name);
+ }
+
+ else {
+ $out = Array();
+ preg_match_all('/[A-Z]/', $name, $out);
+ return implode("", $out[0]);
+ }
+}
+
setlocale(LC_CTYPE, 'C');
$headers = Array("#id", "name", "request_email", "short_name", "notes", "publication_scheme", "home_page", "tag_string");
@@ -16,6 +31,7 @@
}
} catch (SetteeRestClientException $e) {
setteErrorHandler($e);
+ die();
}
$foiEmail = Array();
@@ -27,11 +43,12 @@
}
} catch (SetteeRestClientException $e) {
setteErrorHandler($e);
+ die();
}
$fp = fopen('php://output', 'w');
if ($fp && $db) {
- header('Content-Type: text/csv');
+ header('Content-Type: text/csv; charset=utf-8');
header('Content-Disposition: attachment; filename="export.' . date("c") . '.csv"');
header('Pragma: no-cache');
header('Expires: 0');
@@ -40,8 +57,8 @@
$agencies = $db->get_view("app", "byCanonicalName", null, true)->rows;
//print_r($rows);
foreach ($agencies as $agency) {
- // print_r($agency);
-
+ // print_r($agency);
+
if (isset($agency->value->foiEmail) && $agency->value->foiEmail != "null" && !isset($agency->value->status)) {
$row = Array();
$row["#id"] = $agency->id;
@@ -58,9 +75,7 @@
if (isset($agency->value->shortName)) {
$row["short_name"] = $agency->value->shortName;
} else {
- $out = Array();
- preg_match_all('/[A-Z]/', trim($agency->value->name), $out);
- $row["short_name"] = implode("", $out[0]);
+ $row["short_name"] = shortName($agency->value->name);
}
$row["notes"] = "";
$row["publication_scheme"] = (isset($agency->value->infoPublicationSchemeURL) ? $agency->value->infoPublicationSchemeURL : "");
@@ -75,8 +90,8 @@
if (isset($agency->value->foiBodies)) {
foreach ($agency->value->foiBodies as $foiBody) {
- $row['name'] = $foiBody;
- $row['short_name'] = "";
+ $row['name'] = iconv("UTF-8", "ASCII//TRANSLIT",$foiBody);
+ $row["short_name"] = shortName($foiBody);
fputcsv($fp, array_values($row));
}
}
--- a/include/common.inc.php
+++ b/include/common.inc.php
@@ -62,7 +62,3 @@
}
}
-?>
-
-
-
--- a/include/couchdb.inc.php
+++ b/include/couchdb.inc.php
@@ -86,5 +86,3 @@
function setteErrorHandler($e) {
echo $e->getMessage() . "<br>" . PHP_EOL;
}
-?>
-
--- a/include/template.inc.php
+++ b/include/template.inc.php
@@ -74,5 +74,3 @@
</html>
<?php }
-?>
-
--- /dev/null
+++ b/scrape.py
@@ -1,1 +1,65 @@
+#http://packages.python.org/CouchDB/client.html
+import couchdb
+import urllib2
+from BeautifulSoup import BeautifulSoup
+import re
+couch = couchdb.Server('http://192.168.1.148:5984/')
+
+# select database
+agencydb = couch['disclosr-agencies']
+
+for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
+ agency = agencydb.get(row.id)
+ print agency['agencyName']
+
+#http://diveintopython.org/http_web_services/etags.html
+class NotModifiedHandler(urllib2.BaseHandler):
+ def http_error_304(self, req, fp, code, message, headers):
+ addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
+ addinfourl.code = code
+ return addinfourl
+
+def scrapeAndStore(URL, depth, agency):
+ URL = "http://www.hole.fi/jajvirta/weblog/"
+ req = urllib2.Request(URL)
+
+ #if there is a previous version sotred in couchdb, load caching helper tags
+ if etag:
+ req.add_header("If-None-Match", etag)
+ if last_modified:
+ req.add_header("If-Modified-Since", last_modified)
+
+ opener = urllib2.build_opener(NotModifiedHandler())
+ url_handle = opener.open(req)
+ headers = url_handle.info() # the addinfourls have the .info() too
+ etag = headers.getheader("ETag")
+ last_modified = headers.getheader("Last-Modified")
+ web_server = headers.getheader("Server")
+ file_size = headers.getheader("Content-Length")
+ mime_type = headers.getheader("Content-Type")
+
+ if hasattr(url_handle, 'code')
+ if url_handle.code == 304:
+ print "the web page has not been modified"
+ else:
+ #do scraping
+ html = url_handle.read()
+ # http://www.crummy.com/software/BeautifulSoup/documentation.html
+ soup = BeautifulSoup(html)
+ links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
+ for link in links:
+ print link['href']
+ #for each unique link
+ #if html mimetype
+ # go down X levels,
+ # diff with last stored attachment, store in document
+ #if not
+ # remember to save parentURL and title (link text that lead to document)
+
+ #store as attachment epoch-filename
+ else:
+ print "error %s in downloading %s", url_handle.code, URL
+ #record/alert error to error database
+
+
--- a/unimplemented/scrape.py
+++ /dev/null
@@ -1,64 +1,1 @@
-#http://packages.python.org/CouchDB/client.html
-import couchdb
-import urllib2
-from BeautifulSoup import BeautifulSoup
-import re
-couch = couchdb.Server() # Assuming localhost:5984
-# If your CouchDB server is running elsewhere, set it up like this:
-# couch = couchdb.Server('http://example.com:5984/')
-
-# select database
-agencydb = couch['disclosr-agencies']
-
-for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
- agency = agencydb.get(row.id)
- print agency['agencyName']
-
-#http://diveintopython.org/http_web_services/etags.html
-class NotModifiedHandler(urllib2.BaseHandler):
- def http_error_304(self, req, fp, code, message, headers):
- addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
- addinfourl.code = code
- return addinfourl
-
-def scrapeAndStore(URL, depth, agency):
- URL = "http://www.hole.fi/jajvirta/weblog/"
- req = urllib2.Request(URL)
-
- #if there is a previous version sotred in couchdb, load caching helper tags
- if etag:
- req.add_header("If-None-Match", etag)
- if last_modified:
- req.add_header("If-Modified-Since", last_modified)
-
- opener = urllib2.build_opener(NotModifiedHandler())
- url_handle = opener.open(req)
- headers = url_handle.info() # the addinfourls have the .info() too
- etag = headers.getheader("ETag")
- last_modified = headers.getheader("Last-Modified")
- web_server = headers.getheader("Server")
- file_size = headers.getheader("Content-Length")
- mime_type = headers.getheader("Content-Type")
-
- if hasattr(url_handle, 'code') and url_handle.code == 304:
- print "the web page has not been modified"
- else:
- print "error %s in downloading %s", url_handle.code, URL
- #record/alert error to error database
-
- #do scraping
- html = ?
- # http://www.crummy.com/software/BeautifulSoup/documentation.html
- soup = BeautifulSoup(html)
-links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
-for link in links:
- print link['href']
- #for each unique link
- #if html mimetype
- # go down X levels,
- # diff with last stored attachment, store in document
- #if not
- # remember to save parentURL and title (link text that lead to document)
-
- #store as attachment epoch-filename