Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr
Former-commit-id: c29a791360b1cd169ea3062bf04c20cb4e7dced8
--- a/.gitmodules
+++ b/.gitmodules
@@ -28,4 +28,7 @@
[submodule "lib/amon-php"]
path = lib/amon-php
url = https://github.com/martinrusev/amon-php.git
+[submodule "documents/lib/parsedatetime"]
+ path = documents/lib/parsedatetime
+ url = git://github.com/bear/parsedatetime.git
--- a/admin/refreshDesignDoc.php
+++ b/admin/refreshDesignDoc.php
@@ -1,7 +1,177 @@
<?php
require_once '../include/common.inc.php';
+//function createFOIDocumentsDesignDoc() {
+
+$foidb = $server->get_db('disclosr-foidocuments');
+ $obj = new stdClass();
+ $obj->_id = "_design/" . urlencode("app");
+ $obj->language = "javascript";
+ $obj->views->all->map = "function(doc) { emit(doc._id, doc); };";
+ $obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };";
+
+ // allow safe updates (even if slightly slower due to extra: rev-detection check).
+ $foidb->save($obj, true);
+
+
+function createDocumentsDesignDoc() {
+ /*
+ global $db;
+ $obj = new stdClass();
+ $obj->_id = "_design/" . urlencode("app");
+ $obj->language = "javascript";
+ $obj->views->all->map = "function(doc) { emit(doc._id, doc); };";
+ $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };";
+ "views": {
+ "web_server": {
+ "map": "function(doc) {\n emit(doc.web_server, 1);\n}",
+ "reduce": "function (key, values, rereduce) {\n return sum(values);\n}"
+ },
+ "byAgency": {
+ "map": "function(doc) {\n emit(doc.agencyID, 1);\n}",
+ "reduce": "function (key, values, rereduce) {\n return sum(values);\n}"
+ },
+ "byURL": {
+ "map": "function(doc) {\n emit(doc.url, doc);\n}"
+ },
+ "agency": {
+ "map": "function(doc) {\n emit(doc.agencyID, doc);\n}"
+ },
+ "byWebServer": {
+ "map": "function(doc) {\n emit(doc.web_server, doc);\n}"
+ },
+ "getValidationRequired": {
+ "map": "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}"
+ }
+ } */
+}
+
+//function createAgencyDesignDoc() {
$db = $server->get_db('disclosr-agencies');
-createAgencyDesignDoc();
+ $obj = new stdClass();
+ $obj->_id = "_design/" . urlencode("app");
+ $obj->language = "javascript";
+ $obj->views->all->map = "function(doc) { emit(doc._id, doc); };";
+ $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };";
+ $obj->views->byCanonicalName->map = "function(doc) {
+ if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') {
+ emit(doc.name, doc);
+ }
+};";
+ $obj->views->byDeptStateName->map = "function(doc) {
+ if (doc.orgType == 'FMA-DepartmentOfState') {
+ emit(doc.name, doc._id);
+ }
+};";
+ $obj->views->parentOrgs->map = "function(doc) {
+ if (doc.parentOrg) {
+ emit(doc._id, doc.parentOrg);
+ }
+};";
+ $obj->views->byName->map = 'function(doc) {
+ if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") {
+ emit(doc.name, doc._id);
+if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) {
+ emit(doc.shortName, doc._id);
+}
+ for (name in doc.otherNames) {
+if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) {
+ emit(doc.otherNames[name], doc._id);
+}
+ }
+ for (name in doc.foiBodies) {
+if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) {
+ emit(doc.foiBodies[name], doc._id);
+}
+ }
+ }
+};';
+
+ $obj->views->foiEmails->map = "function(doc) {
+ emit(doc._id, doc.foiEmail);
+};";
+
+ $obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }";
+ $obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };';
+ $obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };';
+ $obj->views->getScrapeRequired->map = "function(doc) {
+
+var lastScrape = Date.parse(doc.metadata.lastScraped);
+
+var today = new Date();
+
+if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) {
+ emit(doc._id, doc);
+}
+
+};";
+ $obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };";
+ $obj->views->getConflicts->map = "function(doc) {
+ if (doc._conflicts) {
+ emit(null, [doc._rev].concat(doc._conflicts));
+ }
+}";
+ // http://stackoverflow.com/questions/646628/javascript-startswith
+ $obj->views->score->map = 'if(!String.prototype.startsWith){
+ String.prototype.startsWith = function (str) {
+ return !this.indexOf(str);
+ }
+}
+
+function(doc) {
+ count = 0;
+ if (doc["status"] != "suspended") {
+ for(var propName in doc) {
+ if(typeof(doc[propName]) != "undefined" && doc[propName] != "") {
+ count++;
+ }
+ }
+ portfolio = doc.parentOrg;
+ if (doc.orgType == "FMA-DepartmentOfState") {
+ portfolio = doc._id;
+ }
+ if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") {
+ portfolio = doc.orgType;
+ }
+ emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio});
+ }
+}';
+ $obj->views->scoreHas->map = 'if(!String.prototype.startsWith){
+ String.prototype.startsWith = function (str) {
+ return !this.indexOf(str);
+ }
+}
+if(!String.prototype.endsWith){
+ String.prototype.endsWith = function(suffix) {
+ return this.indexOf(suffix, this.length - suffix.length) !== -1;
+ };
+}
+function(doc) {
+if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") {
+for(var propName in doc) {
+ if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) {
+ emit(propName, 1);
+ }
+}
+ emit("total", 1);
+ }
+}';
+ $obj->views->scoreHas->reduce = 'function (key, values, rereduce) {
+ return sum(values);
+}';
+ $obj->views->fieldNames->map = '
+function(doc) {
+for(var propName in doc) {
+ emit(propName, doc._id);
+ }
+
+}';
+ $obj->views->fieldNames->reduce = 'function (key, values, rereduce) {
+ return values.length;
+}';
+ // allow safe updates (even if slightly slower due to extra: rev-detection check).
+ $db->save($obj, true);
+
+
?>
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -1,29 +1,87 @@
import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape
+from bs4 import BeautifulSoup
+import parsedatetime as pdt
+from time import mktime
+from datetime import datetime
+import feedparser
+import abc
-from bs4 import BeautifulSoup
-import abc
-import dateutil.parser
+class GenericDisclogScraper(object):
+ __metaclass__ = abc.ABCMeta
+ agencyID = None
+ disclogURL = None
+ def getAgencyID(self):
+ """ disclosr agency id """
+ if self.agencyID == None:
+ self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
+ return self.agencyID
-class GenericOAICDisclogScraper(object):
- __metaclass__ = abc.ABCMeta
+ def getURL(self):
+ """ disclog URL"""
+ if self.disclogURL == None:
+ agency = scrape.agencydb.get(self.getAgencyID())
+ self.disclogURL = agency['FOIDocumentsURL']
+ return self.disclogURL
+
@abc.abstractmethod
- def getAgencyID(self):
- """ disclosr agency id """
+ def doScrape(self):
+ """ do the scraping """
return
@abc.abstractmethod
- def getURL(self):
- """ disclog URL"""
+ def getDescription(self, content, entry, doc):
+ """ get description"""
return
+
+
+class GenericRSSDisclogScraper(GenericDisclogScraper):
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
+ feed = feedparser.parse(content)
+ for entry in feed.entries:
+ #print entry
+ print entry.id
+ hash = scrape.mkhash(entry.id)
+ #print hash
+ doc = foidocsdb.get(hash)
+ #print doc
+ if doc == None:
+ print "saving"
+ edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
+ doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
+ "date": edate,"title": entry.title}
+ self.getDescription(entry,entry, doc)
+ foidocsdb.save(doc)
+ else:
+ print "already saved"
+ def getDescription(self, content, entry, doc):
+ """ get description from rss entry"""
+ doc.update({'description': content.summary})
+ return
+
+class GenericOAICDisclogScraper(GenericDisclogScraper):
+ __metaclass__ = abc.ABCMeta
@abc.abstractmethod
def getColumns(self,columns):
""" rearranges columns if required """
return
+ def getColumnCount(self):
+ return 5
+ def getDescription(self, content, entry, doc):
+ """ get description from rss entry"""
+ descriptiontxt = ""
+ for string in content.stripped_strings:
+ descriptiontxt = descriptiontxt + " \n" + string
+ doc.update({'description': descriptiontxt})
+ return
def doScrape(self):
+ cal = pdt.Calendar()
foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None:
@@ -32,7 +90,7 @@
soup = BeautifulSoup(content)
for row in soup.table.find_all('tr'):
columns = row.find_all('td')
- if len(columns) == 5:
+ if len(columns) == self.getColumnCount():
(id, date, description, title, notes) = self.getColumns(columns)
print id.string
hash = scrape.mkhash(url+id.string)
@@ -41,20 +99,29 @@
if atag.has_key('href'):
links.append(scrape.fullurl(url,atag['href']))
doc = foidocsdb.get(hash)
- descriptiontxt = ""
- for string in description.stripped_strings:
- descriptiontxt = descriptiontxt + string
if doc == None:
print "saving"
- edate = dateutil.parser.parse(date.string).date().strftime("%Y-%m-%d")
- doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
- "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}
+ dtresult = cal.parseDateText(date.string)
+ if len(dtresult) == 2:
+ (dtdate,dtr) = dtresult
+ print dtdate
+ edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
+ else:
+ edate = datetime.strptime(date.string, "%d %B %Y").strftime("%Y-%m-%d")
+ doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string,
+ "date": edate,"title": title.string}
+ self.getDescription(description,row, doc)
+
+ if links != []:
+ doc.update({'links': links})
+ if notes != None:
+ doc.update({ 'notes': notes.string})
foidocsdb.save(doc)
else:
- print "already saved"
+ print "already saved "+hash
- elif len(row.find_all('th')) == 5:
+ elif len(row.find_all('th')) == self.getColumnCount():
print "header row"
else:
--- a/documents/index.php
+++ b/documents/index.php
@@ -10,8 +10,8 @@
$agenciesdb = $server->get_db('disclosr-agencies');
$idtoname = Array();
-foreach ($agenciesdb->get_view("app", "byName")->rows as $row) {
- $idtoname[$row->value] = trim($row->key);
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+ $idtoname[$row->id] = trim($row->value->name);
}
$foidocsdb = $server->get_db('disclosr-foidocuments');
try {
@@ -34,3 +34,4 @@
}
include_footer_documents();
?>
+
--- /dev/null
+++ b/documents/lib/parsedatetime
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -189,7 +189,7 @@
scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
#couch = couchdb.Server('http://192.168.1.148:5984/')
-couch = couchdb.Server('http://192.168.1.148:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
# select database
agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents']
--- /dev/null
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -1,1 +1,48 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getDescription(self,content, entry,doc):
+ link = None
+ for atag in entry.find_all('a'):
+ if atag.has_key('href'):
+ link = scrape.fullurl(url,atag['href'])
+ (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+ if htcontent != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ # http://www.crummy.com/software/BeautifulSoup/documentation.html
+ soup = BeautifulSoup(content)
+ links = []
+ description = ""
+ dldivs = soup.find('div',class_="download")
+ if dldivs != None:
+ for atag in dldivs.find_all("a"):
+ if atag.has_key('href'):
+ links.append(scrape.fullurl(url,atag['href']))
+ nodldivs = soup.find('div',class_="incompleteNotification")
+ if nodldivs != None and nodldivs.stripped_strings != None:
+ for text in nodldivs.stripped_strings:
+ description = description + text
+ for row in soup.table.find_all('tr'):
+ if row != None:
+ description = description + "\n" + row.find('th').string + ": "
+ for text in row.find('div').stripped_strings:
+ description = description + text
+ if links != []:
+ doc.update({'links': links})
+ if description != "":
+ doc.update({ 'description': description})
+ def getColumnCount(self):
+ return 2
+ def getColumns(self,columns):
+ (date, title) = columns
+ return (title, date, title, title, None)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
+++ b/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
@@ -5,12 +5,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getAgencyID(self):
- return "3cd40b1240e987cbcd3f0e67054ce259"
-
- def getURL(self):
- return "http://www.apvma.gov.au/about/foi/disclosure/index.php"
-
def getColumns(self,columns):
(id, date, description, title, notes) = columns
return (id, date, description, title, notes)
@@ -19,3 +13,4 @@
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape()
+
--- a/documents/scrapers/6847d7d95fda5ea58a7cd9a2620c673a.txt
+++ b/documents/scrapers/6847d7d95fda5ea58a7cd9a2620c673a.txt
@@ -1,1 +1,3 @@
+# multiple pages need to be scraped initially, each entry has a subpage
http://www.ipaustralia.gov.au/about-us/freedom-of-information/foi-disclosure-log/?page=35&sessionId=3644188
+
--- /dev/null
+++ b/documents/scrapers/820c3df09aa62f6ee7468c73bea0e323.py
@@ -1,1 +1,18 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getColumnCount(self):
+ return 2
+ def getColumns(self,columns):
+ (date, title) = columns
+ return (title, date, title, title, None)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
+++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
@@ -1,8 +1,17 @@
import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
-import scrape
-foidocsdb = scrape.couch['disclosr-foidocuments']
+import genericScrapers
+#RSS feed not detailed
-#rss feed has only one entry
-http://www.daff.gov.au/about/foi/ips/disclosure-log
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getColumns(self,columns):
+ (id, date, title, description, notes) = columns
+ return (id, date, description, title, notes)
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
+
--- /dev/null
+++ b/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.txt
@@ -1,1 +1,2 @@
+# does not have any disclog entries or table
--- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
+++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
@@ -1,11 +1,42 @@
import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
import scrape
-foidocsdb = scrape.couch['disclosr-foidocuments']
-
-import feedparser
-feed = feedparser.parse( "http://foi.deewr.gov.au/disclosure-log/rss")
-print feed.entries[0]
-#foreach feed.entries
+from bs4 import BeautifulSoup
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
+ def getDescription(self,content, entry,doc):
+ (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False)
+ if htcontent != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ # http://www.crummy.com/software/BeautifulSoup/documentation.html
+ soup = BeautifulSoup(content)
+ links = []
+ description = ""
+ dldivs = soup.find('div',class_="download")
+ if dldivs != None:
+ for atag in dldivs.find_all("a"):
+ if atag.has_key('href'):
+ links.append(scrape.fullurl(url,atag['href']))
+ nodldivs = soup.find('div',class_="incompleteNotification")
+ if nodldivs != None and nodldivs.stripped_strings != None:
+ for text in nodldivs.stripped_strings:
+ description = description + text
+ for row in soup.table.find_all('tr'):
+ if row != None:
+ description = description + "\n" + row.find('th').string + ": "
+ for text in row.find('div').stripped_strings:
+ description = description + text
+ if links != []:
+ doc.update({'links': links})
+ if description != "":
+ doc.update({ 'description': description})
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+ ScraperImplementation().doScrape()
+
+
--- /dev/null
+++ b/documents/scrapers/c43ca6780764f4e61918e8836be74420.py
@@ -1,1 +1,16 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getColumns(self,columns):
+ (id, date, title,description,notes) = columns
+ return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt
+++ b/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt
@@ -1,1 +1,3 @@
+# pdf
http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf
+
--- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
+++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
@@ -1,1 +1,18 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
+ def getColumns(self,columns):
+ (id, date, title, description, notes) = columns
+ return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+ ScraperImplementation().doScrape()
+
www.finance.gov.au/foi/disclosure-log/foi-rss.xml
+
--- a/documents/scrapers/rtk.py
+++ b/documents/scrapers/rtk.py
@@ -1,1 +1,18 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
+ def getColumns(self,columns):
+ (id, date, title, description, notes) = columns
+ return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+ ScraperImplementation().doScrape()
+
http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful)
+
--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -69,7 +69,7 @@
</p>
<ul class="nav">
- <li class="active"><a href="#">Home</a></li>
+ <li><a href="index.php">Home</a></li>
<li><a href="disclogsList.php">List of Disclosure Logs</a></li>
<li><a href="about.php">About</a></li>
@@ -127,13 +127,21 @@
}
function displayLogEntry($row, $idtoname) {
- echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2>
- <p>".$row->value->description." <br>Note: ".$row->value->notes."</p>";
+ echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2> <p>".str_replace("\n","<br>",$row->value->description);
+if (isset($row->value->notes)) {
+echo " <br>Note: ".$row->value->notes;
+}
+echo "</p>";
+
+if (isset($row->value->links)){
echo "<h3>Links/Documents</h3><ul>";
foreach ($row->value->links as $link) {
echo "<li><a href='$link'>".$link."</a></li>";
}
+
echo "</ul>";
+}
echo "<small><A href='".$row->value->url."'>View original source...</a> ID: ".$row->value->docID."</small>";
echo"</div>";
}
+