From: Maxious About
--- /dev/null
+++ b/documents/agency.php
@@ -1,1 +1,41 @@
+get_db('disclosr-agencies');
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+ $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+
+include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency'));
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
+?>
+
+get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
+ foreach ($rows as $row) {
+ //print_r($rows);
+ echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey))
+ $startkey = $row->key;
+ $endkey = $row->key;
+ }
+ } else {
+ $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
+ if ($rows) {
+ foreach ($rows as $row) {
+ echo '' . $idtoname[$row->key] . " (" . $row->value . " records)
\n";
+ }
+ }
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+echo "next page ";
+include_footer_documents();
+?>
--- a/documents/charts.php
+++ b/documents/charts.php
@@ -1,6 +1,6 @@
get_db('disclosr-agencies');
@@ -15,29 +15,28 @@
Charts
Lorem ipsum.
-
+
+
+
+get_db('disclosr-agencies');
+
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+ $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+try {
+ $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
+ if ($rows) {
+ foreach ($rows as $key => $row) {
+ echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey)) $startkey = $row->key;
+ $endkey = $row->key;
+ }
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+echo "next page ";
+*/
+include_footer_documents();
+?>
+
--- a/documents/disclogsList.php
+++ b/documents/disclogsList.php
@@ -1,7 +1,7 @@
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -196,10 +196,9 @@
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID())
if content is not None:
- if mime_type is "text/html"\
- or mime_type is "application/xhtml+xml"\
- or mime_type is"application/xml":
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
+ print "parsing"
soup = BeautifulSoup(content)
table = self.getTable(soup)
for row in self.getRows(table):
@@ -217,11 +216,11 @@
dochash = scrape.mkhash(
self.remove_control_chars(
url + (''.join(id.stripped_strings))))
- doc = foidocsdb.get(hash)
+ doc = foidocsdb.get(dochash)
if doc is None:
- print "saving " + hash
- doc = {'_id': hash,
+ print "saving " + dochash
+ doc = {'_id': dochash,
'agencyID': self.getAgencyID(),
'url': self.getURL(),
'docID': (''.join(id.stripped_strings))}
--- a/documents/index.php
+++ b/documents/index.php
@@ -3,10 +3,11 @@
include('template.inc.php');
include_header_documents("");
include_once('../include/common.inc.php');
-$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99');
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
?>
+
get_db('disclosr-agencies');
$idtoname = Array();
@@ -15,17 +16,18 @@
}
$foidocsdb = $server->get_db('disclosr-foidocuments');
try {
- $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows;
+ $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
if ($rows) {
foreach ($rows as $key => $row) {
echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey)) $startkey = $row->key;
$endkey = $row->key;
}
}
} catch (SetteeRestClientException $e) {
setteErrorHandler($e);
}
-echo "next page";
+echo "next page ";
include_footer_documents();
?>
--- a/documents/rss.xml.php
+++ b/documents/rss.xml.php
@@ -8,21 +8,28 @@
//Creating an instance of FeedWriter class.
$TestFeed = new RSS2FeedWriter();
//Setting the channel elements
-//Use wrapper functions for common channelelements
-$TestFeed->setTitle('disclosurelo.gs Newest Entries - All');
-$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php');
-$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies');
-$TestFeed->setChannelElement('language', 'en-us');
-$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
-
-//Retriving informations from database
+////Retriving informations from database
$idtoname = Array();
$agenciesdb = $server->get_db('disclosr-agencies');
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name);
}
$foidocsdb = $server->get_db('disclosr-foidocuments');
-$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
+if (isset($_REQUEST['id'])) {
+ $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
+ $title = $idtoname[$_REQUEST['id']];
+} else {
+ $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
+ $title = 'All Agencies';
+}
+//Use wrapper functions for common channelelements
+$TestFeed->setTitle('disclosurelo.gs Newest Entries - '.$title);
+$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'.(isset($_REQUEST['id'])? '?id='.$_REQUEST['id'] : ''));
+$TestFeed->setDescription('disclosurelo.gs Newest Entries - '.$title);
+$TestFeed->setChannelElement('language', 'en-us');
+$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
+
+
//print_r($rows);
foreach ($rows as $row) {
//Create an empty FeedItem
--- /dev/null
+++ b/documents/scrapers/00a294de663db69062ca09aede7c0487.py
@@ -1,1 +1,47 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+ def getDate(self, content, entry, doc):
+ date = ''.join(entry.find('th').stripped_strings).strip()
+ (a, b, c) = date.partition("(")
+ date = self.remove_control_chars(a.replace("Octber", "October"))
+ print date
+ edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ print edate
+ doc.update({'date': edate})
+ return
+ def getColumnCount(self):
+ return 4
+
+ def getTable(self, soup):
+ return soup.find(summary="List of Defence documents released under Freedom of Information requets")
+
+ def getColumns(self, columns):
+ (id, description, access, notes) = columns
+ return (id, None, description, description, notes)
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+ nsi = ScraperImplementation()
+ nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201213.cfm"
+ nsi.doScrape()
+
+ nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201112.cfm"
+ nsi.doScrape()
+
+ nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201011.cfm"
+ nsi.doScrape()
+
+
--- a/documents/scrapers/00a294de663db69062ca09aede7c0487.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage
--- a/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-ACMA style
--- /dev/null
+++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py
@@ -1,1 +1,58 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import scrape
+from bs4 import BeautifulSoup
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+ def getDescription(self,content, entry,doc):
+ link = None
+ links = []
+ description = ""
+ for atag in entry.find_all('a'):
+ if atag.has_key('href'):
+ link = scrape.fullurl(self.getURL(), atag['href'])
+ (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+ if htcontent != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ soup = BeautifulSoup(htcontent)
+ row = soup.find(id="content_div_148050")
+ description = ''.join(row.stripped_strings)
+ for atag in row.find_all("a"):
+ if atag.has_key('href'):
+ links.append(scrape.fullurl(link, atag['href']))
+
+ if links != []:
+ doc.update({'links': links})
+ if description != "":
+ doc.update({ 'description': description})
+ def getColumnCount(self):
+ return 4
+
+ def getColumns(self, columns):
+ (id, date, datepub, title) = columns
+ return (id, date, title, title, None)
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+ nsi = ScraperImplementation()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5"
+ nsi.doScrape()
+
--- a/documents/scrapers/1803322b27286950cab0c543168b5f21.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage log
--- /dev/null
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -1,1 +1,42 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from datetime import date
+from pyquery import PyQuery as pq
+from lxml import etree
+import urllib
+class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+ self.getURL(), "foidocuments", self.getAgencyID())
+
+ d = pq(content)
+ d.make_links_absolute()
+ d.table.filter('.ncTAF_DataTABLE')
+ print [i.text() for i in d.items('span')]
+ description = ""
+ dochash = scrape.mkhash(description)
+ doc = foidocsdb.get(dochash)
+ if doc is None:
+ print "saving " + dochash
+ edate = date.today().strftime("%Y-%m-%d")
+ doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+ , 'url': self.getURL(), 'docID': dochash,
+ "date": edate, "title": "Disclosure Log Updated", "description": description}
+ #foidocsdb.save(doc)
+ else:
+ print "already saved"
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ACMADisclogScraper,
+ genericScrapers.GenericDisclogScraper)
+ print 'Instance:', isinstance(ACMADisclogScraper(),
+ genericScrapers.GenericDisclogScraper)
+ ACMADisclogScraper().doScrape()
+
--- /dev/null
+++ b/documents/scrapers/8796220032faf94501bd366763263685.py
@@ -1,1 +1,37 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+ def getColumnCount(self):
+ return 6
+
+ def getColumns(self, columns):
+ (id, date, title, description, datepub, notes) = columns
+ return (id, date, title, description, notes)
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+ nsi = ScraperImplementation()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/pmo/2011-12.cfm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2011-12.cfm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2012-13.cfm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omsi/2011-12.cfm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omps/2012-13.cfm"
+ nsi.doScrape()
+
--- a/documents/scrapers/8796220032faf94501bd366763263685.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multiple pages
--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -3,7 +3,7 @@
import genericScrapers
import scrape
from bs4 import BeautifulSoup
-import codecs
+import codecs
#http://www.doughellmann.com/PyMOTW/abc/
class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getDescription(self,content, entry,doc):
@@ -20,7 +20,7 @@
soup = BeautifulSoup(htcontent)
for text in soup.find(id="divFullWidthColumn").stripped_strings:
description = description + text.encode('ascii', 'ignore')
-
+
for atag in soup.find(id="divFullWidthColumn").find_all("a"):
if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href']))
@@ -76,11 +76,10 @@
if __name__ == '__main__':
print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
- #NewScraperImplementation().doScrape()
+ NewScraperImplementation().doScrape()
print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
osi = OldScraperImplementation()
osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI"
osi.doScrape()
-# old site too
--- /dev/null
+++ b/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.py
@@ -1,1 +1,35 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+ def getColumnCount(self):
+ return 2
+
+ def getColumns(self, columns):
+ (date, title) = columns
+ return (title, date, title, title, None)
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+ nsi = ScraperImplementation()
+ nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2012.htm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2011.htm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2010.htm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2009.htm"
+ nsi.doScrape()
+
--- a/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage immi
--- a/documents/sitemap.xml.php
+++ b/documents/sitemap.xml.php
@@ -10,10 +10,18 @@
if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php")
echo "