prod fixes
Former-commit-id: 130b8c05fff32afd5b4e3f8a9faadac5381bd456
--- a/couchdb/settee/src/classes/SetteeDatabase.class.php
+++ b/couchdb/settee/src/classes/SetteeDatabase.class.php
@@ -251,7 +251,7 @@
*
* @return void
*/
- function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false) {
+ function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce=null) {
$id = "_design/" . urlencode($design_doc);
$view_name = urlencode($view_name);
$id .= "/_view/$view_name";
@@ -269,6 +269,13 @@
if ($descending) {
$data .= "&descending=true";
}
+ if ($reduce != null) {
+ if ($reduce == true) {
+ $data .= "&reduce=true";
+ } else {
+ $data .= "&reduce=false";
+ }
+ }
if ($limit) {
$data .= "&limit=".$limit;
}
@@ -281,9 +288,11 @@
}
$full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
+
$full_uri = str_replace("%253Fgroup%253D","?group=",$full_uri);
$full_uri = str_replace("%253Flimit%253D","?limit=",$full_uri);
$ret = $this->rest_client->http_get($full_uri, $data);
+ //$ret['decoded'] = str_replace("?k","&k",$ret['decoded']);
return $ret['decoded'];
}
--- a/documents/about.php
+++ b/documents/about.php
@@ -1,7 +1,7 @@
<?php
include('template.inc.php');
-include_header_documents("");
+include_header_documents("About");
include_once('../include/common.inc.php');
?>
<h1>About</h1>
--- /dev/null
+++ b/documents/agency.php
@@ -1,1 +1,41 @@
+<?php
+include('template.inc.php');
+include_once('../include/common.inc.php');
+$agenciesdb = $server->get_db('disclosr-agencies');
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+ $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+
+include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency'));
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
+?>
+<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
+<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
+<?php
+try {
+ if ($_REQUEST['id']) {
+ $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
+ foreach ($rows as $row) {
+ //print_r($rows);
+ echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey))
+ $startkey = $row->key;
+ $endkey = $row->key;
+ }
+ } else {
+ $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
+ if ($rows) {
+ foreach ($rows as $row) {
+ echo '<a href="agency.php?id=' . $row->key . '">' . $idtoname[$row->key] . " (" . $row->value . " records)</a> <br>\n";
+ }
+ }
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
+include_footer_documents();
+?>
--- a/documents/charts.php
+++ b/documents/charts.php
@@ -1,6 +1,6 @@
<?php
include('template.inc.php');
-include_header_documents("");
+include_header_documents("Charts");
include_once('../include/common.inc.php');
$agenciesdb = $server->get_db('disclosr-agencies');
@@ -15,29 +15,28 @@
<h1><a href="about.php">Charts</a></h1>
<h4 class="subheader">Lorem ipsum.</h4>
</div>
-<div id="employees" style="width:1000px;height:900px;"></div>
+<div id="bydate" style="width:1000px;height:300px;"></div>
+<div id="byagency" style="width:1200px;height:300px;"></div>
<script id="source">
window.onload = function() {
$(document).ready(function() {
var
d1 = [],
- start = new Date("2009/01/01 01:00").getTime(),
- options,
- graph,
- i, x, o;
+ options1,
+ o1;
<?php
try {
- $rows = $foidocsdb->get_view("app", "byDate?group=true", null, true)->rows;
+ $rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true",null, false,false,true)->rows;
$dataValues = Array();
foreach ($rows as $row) {
- $dataValues[$row->value] = $row->key;
+ $dataValues[$row->key] = $row->value;
}
$i = 0;
ksort($dataValues);
- foreach ($dataValues as $value => $key) {
+ foreach ($dataValues as $key => $value) {
$date = date_create_from_format('Y-m-d', $key);
if (date_format($date, 'U') != "") {
echo " d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL;
@@ -52,7 +51,7 @@
- options = {
+ options1 = {
xaxis : {
mode : 'time',
labelsAngle : 45
@@ -68,19 +67,19 @@
function drawGraph (opts) {
// Clone the options, so the 'options' variable always keeps intact.
- o = Flotr._.extend(Flotr._.clone(options), opts || {});
+ o1 = Flotr._.extend(Flotr._.clone(options1), opts || {});
// Return a new graph.
return Flotr.draw(
- document.getElementById("employees"),
+ document.getElementById("bydate"),
[ d1 ],
- o
+ o1
);
}
graph = drawGraph();
- Flotr.EventAdapter.observe(container, 'flotr:select', function(area){
+ Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:select', function(area){
// Draw selected area
graph = drawGraph({
xaxis : { min : area.x1, max : area.x2, mode : 'time', labelsAngle : 45 },
@@ -89,10 +88,74 @@
});
// When graph is clicked, draw the graph with default area.
- Flotr.EventAdapter.observe(container, 'flotr:click', function () { graph = drawGraph(); });
+ Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:click', function () { graph = drawGraph(); });
});
};
+
+var d2 = [];
+var agencylabels = [];
+function agencytrackformatter(obj) {
+
+ return agencylabels[Math.floor(obj.x)] +" = "+obj.y;
+
+ }
+ function agencytickformatter(val, axis) {
+ if (agencylabels[Math.floor(val)]) {
+ return '<p style="margin-top:8em;-webkit-transform:rotate(-90deg);">'+(agencylabels[Math.floor(val)])+"</b>";
+
+ } else {
+ return "";
+ }
+ }
+<?php
+ try {
+ $rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows;
+
+
+ $dataValues = Array();
+ $i = 0;
+ foreach ($rows as $row) {
+ echo " d2.push([".$i.", $row->value]);" . PHP_EOL;
+ echo " agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL;
+
+ $i++;
+ }
+ } catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+ }
+ ?>
+ // Draw the graph
+ Flotr.draw(
+ document.getElementById("byagency"),
+ [d2],
+ {
+ bars : {
+ show : true,
+ horizontal : false,
+ shadowSize : 0,
+ barWidth : 0.5
+ },
+mouse : {
+ track : true,
+ relative : true,
+ trackFormatter: agencytrackformatter
+ },
+ yaxis : {
+ min : 0,
+ autoscaleMargin : 1
+ },
+ xaxis: {
+ minorTickFreq: 1,
+ noTicks: agencylabels.length,
+ showMinorLabels: true,
+ tickFormatter: agencytickformatter
+ },
+ legend: {
+ show: false
+ }
+ }
+ );
</script>
<?php
--- /dev/null
+++ b/documents/date.php
@@ -1,1 +1,34 @@
+<?php
+include('template.inc.php');
+include_header_documents("Entries by Date");
+include_once('../include/common.inc.php');
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
+?>
+<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
+<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
+<?php
+/*$agenciesdb = $server->get_db('disclosr-agencies');
+
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+ $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+try {
+ $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
+ if ($rows) {
+ foreach ($rows as $key => $row) {
+ echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey)) $startkey = $row->key;
+ $endkey = $row->key;
+ }
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
+*/
+include_footer_documents();
+?>
+
--- a/documents/disclogsList.php
+++ b/documents/disclogsList.php
@@ -1,7 +1,7 @@
<?php
include('template.inc.php');
-include_header_documents("");
+include_header_documents("List of Disclosure Logs");
include_once('../include/common.inc.php');
echo "<table>
--- /dev/null
+++ b/documents/disclosr-documents.nja
@@ -1,1 +1,7 @@
-
+{
+ "venv": "",
+ "project-type": "Import from sources",
+ "name": "disclosr-documents",
+ "license": "GNU General Public License v3",
+ "description": ""
+}
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -1,155 +1,281 @@
-import sys,os
+import sys
+import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape
from bs4 import BeautifulSoup
from time import mktime
import feedparser
import abc
-import unicodedata, re
+import unicodedata
+import re
import dateutil
from dateutil.parser import *
from datetime import *
import codecs
+import difflib
+
+from StringIO import StringIO
+
+from pdfminer.pdfparser import PDFDocument, PDFParser
+from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
+from pdfminer.pdfdevice import PDFDevice, TagExtractor
+from pdfminer.converter import TextConverter
+from pdfminer.cmapdb import CMapDB
+from pdfminer.layout import LAParams
+
+
class GenericDisclogScraper(object):
- __metaclass__ = abc.ABCMeta
- agencyID = None
- disclogURL = None
- def remove_control_chars(self, input):
- return "".join([i for i in input if ord(i) in range(32, 127)])
- def getAgencyID(self):
- """ disclosr agency id """
- if self.agencyID == None:
- self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
- return self.agencyID
-
- def getURL(self):
- """ disclog URL"""
- if self.disclogURL == None:
- agency = scrape.agencydb.get(self.getAgencyID())
- self.disclogURL = agency['FOIDocumentsURL']
- return self.disclogURL
-
- @abc.abstractmethod
- def doScrape(self):
- """ do the scraping """
- return
-
- @abc.abstractmethod
- def getDescription(self, content, entry, doc):
- """ get description"""
- return
-
+ __metaclass__ = abc.ABCMeta
+ agencyID = None
+ disclogURL = None
+
+ def remove_control_chars(self, input):
+ return "".join([i for i in input if ord(i) in range(32, 127)])
+
+ def getAgencyID(self):
+ """ disclosr agency id """
+ if self.agencyID is None:
+ self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
+ return self.agencyID
+
+ def getURL(self):
+ """ disclog URL"""
+ if self.disclogURL is None:
+ agency = scrape.agencydb.get(self.getAgencyID())
+ self.disclogURL = agency['FOIDocumentsURL']
+ return self.disclogURL
+
+ @abc.abstractmethod
+ def doScrape(self):
+ """ do the scraping """
+ return
+
+class GenericHTMLDisclogScraper(GenericDisclogScraper):
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
+ self.getURL(), "foidocuments", self.getAgencyID())
+ content = rcontent
+ dochash = scrape.mkhash(content)
+ doc = foidocsdb.get(dochash)
+ if doc is None:
+ print "saving " + dochash
+ description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
+ last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
+ if last_attach != None:
+ html_diff = difflib.HtmlDiff()
+ description = description + "\nChanges: "
+ description = description + html_diff.make_table(last_attach.read().split('\n'),
+ content.split('\n'))
+ edate = date.today().strftime("%Y-%m-%d")
+ doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+ , 'url': self.getURL(), 'docID': dochash,
+ "date": edate, "title": "Disclosure Log Updated", "description": description}
+ foidocsdb.save(doc)
+ else:
+ print "already saved"
+
+class GenericPDFDisclogScraper(GenericDisclogScraper):
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+ self.getURL(), "foidocuments", self.getAgencyID())
+ laparams = LAParams()
+ rsrcmgr = PDFResourceManager(caching=True)
+ outfp = StringIO()
+ device = TextConverter(rsrcmgr, outfp, codec='utf-8',
+ laparams=laparams)
+ fp = StringIO()
+ fp.write(content)
+
+ process_pdf(rsrcmgr, device, fp, set(), caching=True,
+ check_extractable=True)
+ description = outfp.getvalue()
+ fp.close()
+ device.close()
+ outfp.close()
+ dochash = scrape.mkhash(description)
+ doc = foidocsdb.get(dochash)
+ if doc is None:
+ print "saving " + dochash
+ edate = date.today().strftime("%Y-%m-%d")
+ doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+ , 'url': self.getURL(), 'docID': dochash,
+ "date": edate, "title": "Disclosure Log Updated", "description": description}
+ foidocsdb.save(doc)
+ else:
+ print "already saved"
+
+
+class GenericDOCXDisclogScraper(GenericDisclogScraper):
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
+ , self.getURL(), "foidocuments", self.getAgencyID())
+ mydoc = zipfile.ZipFile(file)
+ xmlcontent = mydoc.read('word/document.xml')
+ document = etree.fromstring(xmlcontent)
+ ## Fetch all the text out of the document we just created
+ paratextlist = getdocumenttext(document)
+ # Make explicit unicode version
+ newparatextlist = []
+ for paratext in paratextlist:
+ newparatextlist.append(paratext.encode("utf-8"))
+ ## Print our documnts test with two newlines under each paragraph
+ description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
+ dochash = scrape.mkhash(description)
+ doc = foidocsdb.get(dochash)
+
+ if doc is None:
+ print "saving " + dochash
+ edate = time().strftime("%Y-%m-%d")
+ doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+ , 'url': self.getURL(), 'docID': dochash,
+ "date": edate, "title": "Disclosure Log Updated", "description": description}
+ foidocsdb.save(doc)
+ else:
+ print "already saved"
class GenericRSSDisclogScraper(GenericDisclogScraper):
- def doScrape(self):
- foidocsdb = scrape.couch['disclosr-foidocuments']
- (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
- feed = feedparser.parse(content)
- for entry in feed.entries:
- #print entry
- print entry.id
- hash = scrape.mkhash(entry.id)
- #print hash
- doc = foidocsdb.get(hash)
- #print doc
- if doc == None:
- print "saving "+ hash
- edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
- doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
- "date": edate,"title": entry.title}
- self.getDescription(entry,entry, doc)
- foidocsdb.save(doc)
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+ self.getURL(), "foidocuments", self.getAgencyID())
+ feed = feedparser.parse(content)
+ for entry in feed.entries:
+ #print entry
+ print entry.id
+ dochash = scrape.mkhash(entry.id)
+ doc = foidocsdb.get(dochash)
+ #print doc
+ if doc is None:
+ print "saving " + dochash
+ edate = datetime.fromtimestamp(
+ mktime(entry.published_parsed)).strftime("%Y-%m-%d")
+ doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
+ 'url': entry.link, 'docID': entry.id,
+ "date": edate, "title": entry.title}
+ self.getDescription(entry, entry, doc)
+ foidocsdb.save(doc)
+ else:
+ print "already saved"
+
+ def getDescription(self, content, entry, doc):
+ """ get description from rss entry"""
+ doc.update({'description': content.summary})
+ return
+
+
+class GenericOAICDisclogScraper(GenericDisclogScraper):
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def getColumns(self, columns):
+ """ rearranges columns if required """
+ return
+
+ def getColumnCount(self):
+ return 5
+
+ def getDescription(self, content, entry, doc):
+ """ get description from rss entry"""
+ descriptiontxt = ""
+ for string in content.stripped_strings:
+ descriptiontxt = descriptiontxt + " \n" + string
+ doc.update({'description': descriptiontxt})
+
+ def getTitle(self, content, entry, doc):
+ doc.update({'title': (''.join(content.stripped_strings))})
+
+ def getTable(self, soup):
+ return soup.table
+
+ def getRows(self, table):
+ return table.find_all('tr')
+
+ def getDate(self, content, entry, doc):
+ date = ''.join(content.stripped_strings).strip()
+ (a, b, c) = date.partition("(")
+ date = self.remove_control_chars(a.replace("Octber", "October"))
+ print date
+ edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ print edate
+ doc.update({'date': edate})
+ return
+
+ def getLinks(self, content, entry, doc):
+ links = []
+ for atag in entry.find_all("a"):
+ if atag.has_key('href'):
+ links.append(scrape.fullurl(content, atag['href']))
+ if links != []:
+ doc.update({'links': links})
+ return
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+ self.getURL(), "foidocuments", self.getAgencyID())
+ if content is not None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
+ # http://www.crummy.com/software/BeautifulSoup/documentation.html
+ print "parsing"
+ soup = BeautifulSoup(content)
+ table = self.getTable(soup)
+ for row in self.getRows(table):
+ columns = row.find_all('td')
+ if len(columns) is self.getColumnCount():
+ (id, date, title,
+ description, notes) = self.getColumns(columns)
+ print self.remove_control_chars(
+ ''.join(id.stripped_strings))
+ if id.string is None:
+ dochash = scrape.mkhash(
+ self.remove_control_chars(
+ url + (''.join(date.stripped_strings))))
else:
- print "already saved"
- def getDescription(self, content, entry, doc):
- """ get description from rss entry"""
- doc.update({'description': content.summary})
- return
-
-class GenericOAICDisclogScraper(GenericDisclogScraper):
- __metaclass__ = abc.ABCMeta
- @abc.abstractmethod
- def getColumns(self,columns):
- """ rearranges columns if required """
- return
- def getColumnCount(self):
- return 5
- def getDescription(self, content, entry, doc):
- """ get description from rss entry"""
- descriptiontxt = ""
- for string in content.stripped_strings:
- descriptiontxt = descriptiontxt + " \n" + string
- doc.update({'description': descriptiontxt})
- return
- def getTitle(self, content, entry, doc):
- doc.update({'title': (''.join(content.stripped_strings))})
- return
- def getTable(self, soup):
- return soup.table
- def getRows(self, table):
- return table.find_all('tr')
- def getDate(self, content, entry, doc):
- date = ''.join(content.stripped_strings).strip()
- (a,b,c) = date.partition("(")
- date = self.remove_control_chars(a.replace("Octber","October"))
- print date
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
- print edate
- doc.update({'date': edate})
- return
- def getLinks(self, content, entry, doc):
- links = []
- for atag in entry.find_all("a"):
- if atag.has_key('href'):
- links.append(scrape.fullurl(content,atag['href']))
- if links != []:
- doc.update({'links': links})
- return
-
- def doScrape(self):
- foidocsdb = scrape.couch['disclosr-foidocuments']
- (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
- if content != None:
- if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
- # http://www.crummy.com/software/BeautifulSoup/documentation.html
- soup = BeautifulSoup(content)
- table = self.getTable(soup)
- for row in self.getRows(table):
- columns = row.find_all('td')
- if len(columns) == self.getColumnCount():
- (id, date, title, description, notes) = self.getColumns(columns)
- print self.remove_control_chars(''.join(id.stripped_strings))
- if id.string == None:
- hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
- else:
- hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
- doc = foidocsdb.get(hash)
-
- if doc == None:
- print "saving " +hash
- doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))}
- self.getLinks(self.getURL(),row,doc)
- self.getTitle(title,row, doc)
- self.getDate(date,row, doc)
- self.getDescription(description,row, doc)
- if notes != None:
- doc.update({ 'notes': (''.join(notes.stripped_strings))})
- badtitles = ['-','Summary of FOI Request','FOI request(in summary form)','Summary of FOI request received by the ASC',
-'Summary of FOI request received by agency/minister','Description of Documents Requested','FOI request','Description of FOI Request','Summary of request','Description','Summary',
+ dochash = scrape.mkhash(
+ self.remove_control_chars(
+ url + (''.join(id.stripped_strings))))
+ doc = foidocsdb.get(dochash)
+
+ if doc is None:
+ print "saving " + dochash
+ doc = {'_id': dochash,
+ 'agencyID': self.getAgencyID(),
+ 'url': self.getURL(),
+ 'docID': (''.join(id.stripped_strings))}
+ self.getLinks(self.getURL(), row, doc)
+ self.getTitle(title, row, doc)
+ self.getDate(date, row, doc)
+ self.getDescription(description, row, doc)
+ if notes is not None:
+ doc.update({ 'notes': (
+ ''.join(notes.stripped_strings))})
+ badtitles = ['-','Summary of FOI Request'
+ , 'FOI request(in summary form)'
+ , 'Summary of FOI request received by the ASC',
+'Summary of FOI request received by agency/minister',
+'Description of Documents Requested','FOI request',
+'Description of FOI Request','Summary of request','Description','Summary',
'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67']
- if doc['title'] not in badtitles and doc['description'] != '':
+ if doc['title'] not in badtitles\
+ and doc['description'] != '':
print "saving"
foidocsdb.save(doc)
- else:
- print "already saved "+hash
-
- elif len(row.find_all('th')) == self.getColumnCount():
- print "header row"
-
- else:
- print "ERROR number of columns incorrect"
- print row
-
+ else:
+ print "already saved " + dochash
+
+ elif len(row.find_all('th')) is self.getColumnCount():
+ print "header row"
+
+ else:
+ print "ERROR number of columns incorrect"
+ print row
+
Binary files /dev/null and b/documents/img/feed-icon-14x14.png differ
--- a/documents/index.php
+++ b/documents/index.php
@@ -3,10 +3,11 @@
include('template.inc.php');
include_header_documents("");
include_once('../include/common.inc.php');
-$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99');
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
?>
+<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
+<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
<?php
-
$agenciesdb = $server->get_db('disclosr-agencies');
$idtoname = Array();
@@ -15,17 +16,18 @@
}
$foidocsdb = $server->get_db('disclosr-foidocuments');
try {
- $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows;
+ $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
if ($rows) {
foreach ($rows as $key => $row) {
echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey)) $startkey = $row->key;
$endkey = $row->key;<