beginning datagov scraper
beginning datagov scraper


Former-commit-id: a8775a64a3cdda480e4433742ed7ea6ca6a437ef

<!doctype html> <!doctype html>
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
<title>Page Not Found :(</title> <title>Page Not Found :(</title>
<style> <style>
::-moz-selection { background: #fe57a1; color: #fff; text-shadow: none; } ::-moz-selection {
::selection { background: #fe57a1; color: #fff; text-shadow: none; } background: #fe57a1;
html { padding: 30px 10px; font-size: 20px; line-height: 1.4; color: #737373; background: #f0f0f0; -webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; } color: #fff;
html, input { font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; } text-shadow: none;
body { max-width: 500px; _width: 500px; padding: 30px 20px 50px; border: 1px solid #b3b3b3; border-radius: 4px; margin: 0 auto; box-shadow: 0 1px 10px #a7a7a7, inset 0 1px 0 #fff; background: #fcfcfc; } }
h1 { margin: 0 10px; font-size: 50px; text-align: center; }  
h1 span { color: #bbb; } ::selection {
h3 { margin: 1.5em 0 0.5em; } background: #fe57a1;
p { margin: 1em 0; } color: #fff;
ul { padding: 0 0 0 40px; margin: 1em 0; } text-shadow: none;
.container { max-width: 380px; _width: 380px; margin: 0 auto; } }
/* google search */  
#goog-fixurl ul { list-style: none; padding: 0; margin: 0; } html {
#goog-fixurl form { margin: 0; } padding: 30px 10px;
#goog-wm-qt, #goog-wm-sb { border: 1px solid #bbb; font-size: 16px; line-height: normal; vertical-align: top; color: #444; border-radius: 2px; } font-size: 20px;
#goog-wm-qt { width: 220px; height: 20px; padding: 5px; margin: 5px 10px 0 0; box-shadow: inset 0 1px 1px #ccc; } line-height: 1.4;
#goog-wm-sb { display: inline-block; height: 32px; padding: 0 10px; margin: 5px 0 0; white-space: nowrap; cursor: pointer; background-color: #f5f5f5; background-image: -webkit-linear-gradient(rgba(255,255,255,0), #f1f1f1); background-image: -moz-linear-gradient(rgba(255,255,255,0), #f1f1f1); background-image: -ms-linear-gradient(rgba(255,255,255,0), #f1f1f1); background-image: -o-linear-gradient(rgba(255,255,255,0), #f1f1f1); -webkit-appearance: none; -moz-appearance: none; appearance: none; *overflow: visible; *display: inline; *zoom: 1; } color: #737373;
#goog-wm-sb:hover, #goog-wm-sb:focus { border-color: #aaa; box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); background-color: #f8f8f8; } background: #f0f0f0;
#goog-wm-qt:focus, #goog-wm-sb:focus { border-color: #105cb6; outline: 0; color: #222; } -webkit-text-size-adjust: 100%;
input::-moz-focus-inner { padding: 0; border: 0; } -ms-text-size-adjust: 100%;
</style> }
   
  html, input {
  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
  }
   
  body {
  max-width: 500px;
  _width: 500px;
  padding: 30px 20px 50px;
  border: 1px solid #b3b3b3;
  border-radius: 4px;
  margin: 0 auto;
  box-shadow: 0 1px 10px #a7a7a7, inset 0 1px 0 #fff;
  background: #fcfcfc;
  }
   
  h1 {
  margin: 0 10px;
  font-size: 50px;
  text-align: center;
  }
   
  h1 span {
  color: #bbb;
  }
   
  h3 {
  margin: 1.5em 0 0.5em;
  }
   
  p {
  margin: 1em 0;
  }
   
  ul {
  padding: 0 0 0 40px;
  margin: 1em 0;
  }
   
  .container {
  max-width: 380px;
  _width: 380px;
  margin: 0 auto;
  }
   
  /* google search */
  #goog-fixurl ul {
  list-style: none;
  padding: 0;
  margin: 0;
  }
   
  #goog-fixurl form {
  margin: 0;
  }
   
  #goog-wm-qt, #goog-wm-sb {
  border: 1px solid #bbb;
  font-size: 16px;
  line-height: normal;
  vertical-align: top;
  color: #444;
  border-radius: 2px;
  }
   
  #goog-wm-qt {
  width: 220px;
  height: 20px;
  padding: 5px;
  margin: 5px 10px 0 0;
  box-shadow: inset 0 1px 1px #ccc;
  }
   
  #goog-wm-sb {
  display: inline-block;
  height: 32px;
  padding: 0 10px;
  margin: 5px 0 0;
  white-space: nowrap;
  cursor: pointer;
  background-color: #f5f5f5;
  background-image: -webkit-linear-gradient(rgba(255, 255, 255, 0), #f1f1f1);
  background-image: -moz-linear-gradient(rgba(255, 255, 255, 0), #f1f1f1);
  background-image: -ms-linear-gradient(rgba(255, 255, 255, 0), #f1f1f1);
  background-image: -o-linear-gradient(rgba(255, 255, 255, 0), #f1f1f1);
  -webkit-appearance: none;
  -moz-appearance: none;
  appearance: none;
  *overflow: visible;
  *display: inline;
  *zoom: 1;
  }
   
  #goog-wm-sb:hover, #goog-wm-sb:focus {
  border-color: #aaa;
  box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1);
  background-color: #f8f8f8;
  }
   
  #goog-wm-qt:focus, #goog-wm-sb:focus {
  border-color: #105cb6;
  outline: 0;
  color: #222;
  }
   
  input::-moz-focus-inner {
  padding: 0;
  border: 0;
  }
  </style>
</head> </head>
<body> <body>
<div class="container"> <div class="container">
<h1>Not found <span>:(</span></h1> <h1>Not found <span>:(</span></h1>
   
<p>Sorry, but the page you were trying to view does not exist.</p> <p>Sorry, but the page you were trying to view does not exist.</p>
   
<p>It looks like this was the result of either:</p> <p>It looks like this was the result of either:</p>
<ul> <ul>
<li>a mistyped address</li> <li>a mistyped address</li>
<li>an out-of-date link</li> <li>an out-of-date link</li>
</ul> </ul>
<script> <script>
var GOOG_FIXURL_LANG = (navigator.language || '').slice(0,2),GOOG_FIXURL_SITE = location.host; var GOOG_FIXURL_LANG = (navigator.language || '').slice(0, 2), GOOG_FIXURL_SITE = location.host;
</script> </script>
<script src="http://linkhelp.clients.google.com/tbproxy/lh/wm/fixurl.js"></script> <script src="http://linkhelp.clients.google.com/tbproxy/lh/wm/fixurl.js"></script>
</div> </div>
   
   
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
   
include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency')); include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency'));
$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); $endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
?> ?>
<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div> <div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act
<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br> in one place!
  </div>
  <a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a>
  <br>
<?php <?php
try { try {
if ($_REQUEST['id']) { if ($_REQUEST['id']) {
$rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
foreach ($rows as $row) { foreach ($rows as $row) {
//print_r($rows); //print_r($rows);
echo displayLogEntry($row, $idtoname); echo displayLogEntry($row, $idtoname);
if (!isset($startkey)) if (!isset($startkey))
$startkey = $row->key; $startkey = $row->key;
$endkey = $row->key; $endkey = $row->key;
} }
} else { } else {
$rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows; $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
if ($rows) { if ($rows) {
foreach ($rows as $row) { foreach ($rows as $row) {
echo '<a href="agency.php?id=' . $row->key . '">' . $idtoname[$row->key] . " (" . $row->value . " records)</a> <br>\n"; echo '<a href="agency.php?id=' . $row->key . '">' . $idtoname[$row->key] . " (" . $row->value . " records)</a> <br>\n";
} }
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>"; echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
include_footer_documents(); include_footer_documents();
?> ?>
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_header_documents("Charts"); include_header_documents("Charts");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
   
?> ?>
<div class="foundation-header"> <div class="foundation-header">
<h1><a href="about.php">Charts</a></h1> <h1><a href="about.php">Charts</a></h1>
<h4 class="subheader">Lorem ipsum.</h4> <h4 class="subheader">Lorem ipsum.</h4>
</div> </div>
<div id="bydate" style="width:1000px;height:300px;"></div> <div id="bydate" style="width:1000px;height:300px;"></div>
<div id="byagency" style="width:1200px;height:300px;"></div> <div id="byagency" style="width:1200px;height:300px;"></div>
<script id="source"> <script id="source">
window.onload = function() { window.onload = function () {
$(document).ready(function() { $(document).ready(function () {
var var
d1 = [], d1 = [],
options1, options1,
o1; o1;
   
<?php <?php
try { try {
$rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true",null, false,false,true)->rows; $rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true",null, false,false,true)->rows;
   
   
$dataValues = Array(); $dataValues = Array();
foreach ($rows as $row) { foreach ($rows as $row) {
$dataValues[$row->key] = $row->value; $dataValues[$row->key] = $row->value;
} }
$i = 0; $i = 0;
ksort($dataValues); ksort($dataValues);
foreach ($dataValues as $key => $value) { foreach ($dataValues as $key => $value) {
$date = date_create_from_format('Y-m-d', $key); $date = date_create_from_format('Y-m-d', $key);
if (date_format($date, 'U') != "") { if (date_format($date, 'U') != "") {
echo " d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL; echo " d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL;
// echo " emplabels.push('$key');" . PHP_EOL; // echo " emplabels.push('$key');" . PHP_EOL;
$i++; $i++;
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
?> ?>
   
   
  options1 = {
options1 = { xaxis: {
xaxis : { mode: 'time',
mode : 'time', labelsAngle: 45
labelsAngle : 45 },
}, selection: {
selection : { mode: 'x'
mode : 'x' },
}, HtmlText: false,
HtmlText : false, title: 'Time'
title : 'Time' };
};  
   
// Draw graph with default options, overwriting with passed options  
function drawGraph (opts) {  
   
// Clone the options, so the 'options' variable always keeps intact. // Draw graph with default options, overwriting with passed options
o1 = Flotr._.extend(Flotr._.clone(options1), opts || {}); function drawGraph(opts) {
   
// Return a new graph. // Clone the options, so the 'options' variable always keeps intact.
return Flotr.draw( o1 = Flotr._.extend(Flotr._.clone(options1), opts || {});
document.getElementById("bydate"),  
[ d1 ],  
o1  
);  
}  
   
graph = drawGraph(); // Return a new graph.
  return Flotr.draw(
Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:select', function(area){ document.getElementById("bydate"),
// Draw selected area [ d1 ],
graph = drawGraph({ o1
xaxis : { min : area.x1, max : area.x2, mode : 'time', labelsAngle : 45 }, );
yaxis : { min : area.y1, max : area.y2 } }
});  
}); graph = drawGraph();
   
// When graph is clicked, draw the graph with default area. Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:select', function (area) {
Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:click', function () { graph = drawGraph(); }); // Draw selected area
  graph = drawGraph({
  xaxis: { min: area.x1, max: area.x2, mode: 'time', labelsAngle: 45 },
  yaxis: { min: area.y1, max: area.y2 }
  });
  });
   
  // When graph is clicked, draw the graph with default area.
  Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:click', function () {
  graph = drawGraph();
  });
   
}); });
}; };
   
var d2 = []; var d2 = [];
var agencylabels = []; var agencylabels = [];
function agencytrackformatter(obj) { function agencytrackformatter(obj) {
   
return agencylabels[Math.floor(obj.x)] +" = "+obj.y; return agencylabels[Math.floor(obj.x)] + " = " + obj.y;
   
} }
function agencytickformatter(val, axis) { function agencytickformatter(val, axis) {
if (agencylabels[Math.floor(val)]) { if (agencylabels[Math.floor(val)]) {
return '<p style="margin-top:8em;-webkit-transform:rotate(-90deg);">'+(agencylabels[Math.floor(val)])+"</b>"; return '<p style="margin-top:8em;-webkit-transform:rotate(-90deg);">' + (agencylabels[Math.floor(val)]) + "</b>";
   
} else { } else {
return ""; return "";
} }
} }
<?php <?php
try { try {
$rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows; $rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows;
   
   
$dataValues = Array(); $dataValues = Array();
$i = 0; $i = 0;
foreach ($rows as $row) { foreach ($rows as $row) {
echo " d2.push([".$i.", $row->value]);" . PHP_EOL; echo " d2.push([".$i.", $row->value]);" . PHP_EOL;
echo " agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL; echo " agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL;
   
$i++; $i++;
  }
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
} }
} catch (SetteeRestClientException $e) { ?>
setteErrorHandler($e); // Draw the graph
} Flotr.draw(
?> document.getElementById("byagency"),
// Draw the graph [d2],
Flotr.draw( {
document.getElementById("byagency"), bars: {
[d2], show: true,
{ horizontal: false,
bars : { shadowSize: 0,
show : true, barWidth: 0.5
horizontal : false, },
shadowSize : 0, mouse: {
barWidth : 0.5 track: true,
}, relative: true,
mouse : { trackFormatter: agencytrackformatter
track : true, },
relative : true, yaxis: {
trackFormatter: agencytrackformatter min: 0,
}, autoscaleMargin: 1
yaxis : { },
min : 0, xaxis: {
autoscaleMargin : 1 minorTickFreq: 1,
}, noTicks: agencylabels.length,
xaxis: { showMinorLabels: true,
minorTickFreq: 1, tickFormatter: agencytickformatter
noTicks: agencylabels.length, },
showMinorLabels: true, legend: {
tickFormatter: agencytickformatter show: false
}, }
legend: { }
show: false );
}  
}  
);  
</script> </script>
   
<?php <?php
include_footer_documents(); include_footer_documents();
?> ?>
   
   
<?xml version="1.0"?> <?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd"> <!DOCTYPE cross-domain-policy SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy> <cross-domain-policy>
   
   
<!-- Read this: www.adobe.com/devnet/articles/crossdomain_policy_file_spec.html --> <!-- Read this: www.adobe.com/devnet/articles/crossdomain_policy_file_spec.html -->
   
<!-- Most restrictive policy: --> <!-- Most restrictive policy: -->
<site-control permitted-cross-domain-policies="none"/> <site-control permitted-cross-domain-policies="none"/>
   
   
  <!-- Least restrictive policy: -->
<!-- Least restrictive policy: --> <!--
<!-- <site-control permitted-cross-domain-policies="all"/>
<site-control permitted-cross-domain-policies="all"/> <allow-access-from domain="*" to-ports="*" secure="false"/>
<allow-access-from domain="*" to-ports="*" secure="false"/> <allow-http-request-headers-from domain="*" headers="*" secure="false"/>
<allow-http-request-headers-from domain="*" headers="*" secure="false"/> -->
--> <!--
<!-- If you host a crossdomain.xml file with allow-access-from domain="*"
If you host a crossdomain.xml file with allow-access-from domain="*" and don’t understand all of the points described here, you probably
and don’t understand all of the points described here, you probably have a nasty security vulnerability. ~ simon willison
have a nasty security vulnerability. ~ simon willison -->
-->  
   
</cross-domain-policy> </cross-domain-policy>
   
  import sys, os
 
  import scrape
  from bs4 import BeautifulSoup
 
 
  listurl = "http://data.gov.au/data/"
  (url, mime_type, datasetlisthtml) = scrape.fetchURL(scrape.docsdb,
  listurl, "data", "AGIMO")
  soup = BeautifulSoup(datasetlisthtml)
  for atag in soup.find_all(class_='result-title'):
  if atag.has_key('href'):
  url = scrape.fullurl(listurl, atag['href'])
  (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
  url, "data", "AGIMO")
  hash = scrape.mkhash(scrape.canonurl(url))
  doc = scrape.docsdb.get(hash)
  if "metadata" not in doc.keys():
  doc['metadata'] = {}
  soup = BeautifulSoup(html)
  for metatag in soup.find_all('meta'):
  if metatag.has_key('name'):
  doc['metadata'][metatag['name']] = metatag['content']
  for list in soup.find_all('dl'):
  last_title = ""
  for child in list.children:
  if str(type(child)) != "<class 'bs4.element.NavigableString'>":
  if child.name == 'dt' and child.string != None:
  last_title = child.string.strip()
  if child.name == 'dd':
  #print last_title
  if last_title == "Download":
  for item in child.find_all("li"):
  link = item.find("a")
  format = item.find(property="dc:format")
  linkobj = {"href":link['href'].replace("/bye?","").strip(), "name": link.string.strip(),
  "format": format.string.strip(), "size": format.next_sibling.string.strip()}
  doc['metadata'][last_title] = linkobj
 
  else:
  atags = child.find_all('a')
  if len(atags) < 2:
  [s.extract() for s in child(class_='viewAll')]
  doc['metadata'][last_title] = ''.join(child.stripped_strings).strip()
  else:
  doc['metadata'][last_title] = [item.string.replace(",","").strip() for item in atags]
  print doc['metadata']
  sys.exit("ggg")
<?php <?php
   
include('template.inc.php'); include('template.inc.php');
include_header_documents("Entries by Date"); include_header_documents("Entries by Date");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); $endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
?> ?>
<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div> <div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in
<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br> one place!
  </div>
  <a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a>
  <br>
<?php <?php
/*$agenciesdb = $server->get_db('disclosr-agencies'); /*$agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
try { try {
$rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows; $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
if ($rows) { if ($rows) {
foreach ($rows as $key => $row) { foreach ($rows as $key => $row) {
echo displayLogEntry($row, $idtoname); echo displayLogEntry($row, $idtoname);
if (!isset($startkey)) $startkey = $row->key; if (!isset($startkey)) $startkey = $row->key;
$endkey = $row->key; $endkey = $row->key;
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>"; echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
*/ */
include_footer_documents(); include_footer_documents();
?> ?>
   
<?php <?php
   
include('template.inc.php'); include('template.inc.php');
include_header_documents("List of Disclosure Logs"); include_header_documents("List of Disclosure Logs");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
   
echo "<table> echo "<table>
<tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>"; <tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>";
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
$docsdb = $server->get_db('disclosr-documents'); $docsdb = $server->get_db('disclosr-documents');
$agencies = 0; $agencies = 0;
$disclogs = 0; $disclogs = 0;
$red = 0; $red = 0;
$green = 0; $green = 0;
$yellow = 0; $yellow = 0;
$orange = 0; $orange = 0;
try { try {
$rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows; $rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows;
   
   
if ($rows) { if ($rows) {
foreach ($rows as $row) { foreach ($rows as $row) {
if ((!isset($row->value->status) || $row->value->status != "suspended") && isset($row->value->foiEmail)) { if ((!isset($row->value->status) || $row->value->status != "suspended") && isset($row->value->foiEmail)) {
echo "<tr><td>"; echo "<tr><td>";
if (isset($row->value->website)) echo "<a href='" . $row->value->website . "'>"; if (isset($row->value->website)) echo "<a href='" . $row->value->website . "'>";
echo "<b>" . $row->value->name . "</b>"; echo "<b>" . $row->value->name . "</b>";
if (isset($row->value->website)) echo "</a>"; if (isset($row->value->website)) echo "</a>";
if ($ENV == "DEV") if ($ENV == "DEV")
echo "<br>(" . $row->id . ")"; echo "<br>(" . $row->id . ")";
echo "</td>\n"; echo "</td>\n";
$agencies++; $agencies++;
   
echo "<td>"; echo "<td>";
if (isset($row->value->FOIDocumentsURL)) { if (isset($row->value->FOIDocumentsURL)) {
$disclogs++; $disclogs++;
echo '<a href="' . $row->value->FOIDocumentsURL . '">' echo '<a href="' . $row->value->FOIDocumentsURL . '">'
. $row->value->FOIDocumentsURL . '</a>'; . $row->value->FOIDocumentsURL . '</a>';
if ($ENV == "DEV") if ($ENV == "DEV")
echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">' echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">'
. 'view local copy</a>)</small>'; . 'view local copy</a>)</small>';
} else { } else {
echo "<font color='red'><abbr title='No'>✘</abbr></font>"; echo "<font color='red'><abbr title='No'>✘</abbr></font>";
} }
echo "</td>\n<td>"; echo "</td>\n<td>";
if (isset($row->value->FOIDocumentsURL)) { if (isset($row->value->FOIDocumentsURL)) {
if (file_exists("./scrapers/" . $row->id . '.py')) { if (file_exists("./scrapers/" . $row->id . '.py')) {
echo "<font color='green'><abbr title='Yes'>✔</abbr></font>"; echo "<font color='green'><abbr title='Yes'>✔</abbr></font>";
$green++; $green++;
} else if (file_exists("./scrapers/" . $row->id . '.txt')) { } else if (file_exists("./scrapers/" . $row->id . '.txt')) {
if (trim(file_get_contents("./scrapers/" . $row->id . '.txt')) == "no disclog") { if (trim(file_get_contents("./scrapers/" . $row->id . '.txt')) == "no disclog") {
echo "<font color='yellow'><abbr title='No log table exists at URL to scrape'><b>◎</b></abbr></font>"; echo "<font color='yellow'><abbr title='No log table exists at URL to scrape'><b>◎</b></abbr></font>";
$yellow++; $yellow++;
} else { } else {
echo file_get_contents("./scrapers/" . $row->id . '.txt'); echo file_get_contents("./scrapers/" . $row->id . '.txt');
echo "<font color='orange'><abbr title='Work in progress'><b>▬</b></abbr></font>"; echo "<font color='orange'><abbr title='Work in progress'><b>▬</b></abbr></font>";
$orange++; $orange++;
} }
} else { } else {
echo "<font color='red'><abbr title='No'>✘</abbr></font>"; echo "<font color='red'><abbr title='No'>✘</abbr></font>";
$red++; $red++;
} }
} }
echo "</td></tr>\n"; echo "</td></tr>\n";
} }
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "</table>"; echo "</table>";
echo $agencies . " agencies, " . round(($disclogs / $agencies) * 100) . "% with disclosure logs; " echo $agencies . " agencies, " . round(($disclogs / $agencies) * 100) . "% with disclosure logs; "
. round(($green / $disclogs) * 100) . "% logs with scrapers " . round(($red / $disclogs) * 100) . "% logs without scrapers " . round(($orange / $disclogs) * 100) . "% logs Work-In-Progress scrapers "; . round(($green / $disclogs) * 100) . "% logs with scrapers " . round(($red / $disclogs) * 100) . "% logs without scrapers " . round(($orange / $disclogs) * 100) . "% logs Work-In-Progress scrapers ";
   
include_footer_documents(); include_footer_documents();
?> ?>
   
<?php <?php
   
include_once("../include/common.inc.php"); include_once("../include/common.inc.php");
   
   
setlocale(LC_CTYPE, 'C'); setlocale(LC_CTYPE, 'C');
   
$db = $server->get_db('disclosr-foidocuments'); $db = $server->get_db('disclosr-foidocuments');
$headers = Array(); $headers = Array();
try { try {
$rows = $db->get_view("app", "fieldNames?group=true", null, true)->rows; $rows = $db->get_view("app", "fieldNames?group=true", null, true)->rows;
   
$dataValues = Array(); $dataValues = Array();
foreach ($rows as $row) { foreach ($rows as $row) {
$headers[] = $row->key; $headers[] = $row->key;
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
   
$fp = fopen('php://output', 'w'); $fp = fopen('php://output', 'w');
if ($fp && $db) { if ($fp && $db) {
header('Content-Type: text/csv; charset=utf-8'); header('Content-Type: text/csv; charset=utf-8');
header('Content-Disposition: attachment; filename="export.' . date("c") . '.csv"'); header('Content-Disposition: attachment; filename="export.' . date("c") . '.csv"');
header('Pragma: no-cache'); header('Pragma: no-cache');
header('Expires: 0'); header('Expires: 0');
fputcsv($fp, $headers); fputcsv($fp, $headers);
try { try {
$agencies = $db->get_view("app", "all", null, true)->rows; $agencies = $db->get_view("app", "all", null, true)->rows;
//print_r($rows); //print_r($rows);
foreach ($agencies as $agency) { foreach ($agencies as $agency) {
// print_r($agency); // print_r($agency);
   
if (!isset($agency->value->status)) { if (!isset($agency->value->status)) {
$row = Array(); $row = Array();
$agencyArray = object_to_array($agency->value); $agencyArray = object_to_array($agency->value);
foreach ($headers as $fieldName) { foreach ($headers as $fieldName) {
if (isset($agencyArray[$fieldName])) { if (isset($agencyArray[$fieldName])) {
if (is_array($agencyArray[$fieldName])) { if (is_array($agencyArray[$fieldName])) {
$row[] = implode(";", $agencyArray[$fieldName]); $row[] = implode(";", $agencyArray[$fieldName]);
} else { } else {
$row[] = str_replace(Array("\n", '"', "\t"),"",$agencyArray[$fieldName]); $row[] = str_replace(Array("\n", '"', "\t"), "", $agencyArray[$fieldName]);
} }
} else { } else {
$row[] = ""; $row[] = "";
} }
} }
   
fputcsv($fp, array_values($row)); fputcsv($fp, array_values($row));
   
   
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
   
die; die;
} }
?> ?>
   
 
import sys import sys
import os import os
   
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from time import mktime from time import mktime
import feedparser import feedparser
import abc import abc
import unicodedata import unicodedata
import re import re
import dateutil import dateutil
from dateutil.parser import * from dateutil.parser import *
from datetime import * from datetime import *
import codecs import codecs
   
import difflib import difflib
   
from StringIO import StringIO from StringIO import StringIO
   
from pdfminer.pdfparser import PDFDocument, PDFParser from pdfminer.pdfparser import PDFDocument, PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
from pdfminer.pdfdevice import PDFDevice, TagExtractor from pdfminer.pdfdevice import PDFDevice, TagExtractor
from pdfminer.converter import TextConverter from pdfminer.converter import TextConverter
from pdfminer.cmapdb import CMapDB from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams from pdfminer.layout import LAParams
   
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
   
def remove_control_chars(self, input): def remove_control_chars(self, input):
return "".join([i for i in input if ord(i) in range(32, 127)]) return "".join([i for i in input if ord(i) in range(32, 127)])
   
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
if self.agencyID is None: if self.agencyID is None:
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
return self.agencyID return self.agencyID
   
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
if self.disclogURL is None: if self.disclogURL is None:
agency = scrape.agencydb.get(self.getAgencyID()) agency = scrape.agencydb.get(self.getAgencyID())
self.disclogURL = agency['FOIDocumentsURL'] self.disclogURL = agency['FOIDocumentsURL']
return self.disclogURL return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def doScrape(self): def doScrape(self):
""" do the scraping """ """ do the scraping """
return return
   
   
class GenericHTMLDisclogScraper(GenericDisclogScraper): class GenericHTMLDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
content = rcontent content = rcontent
dochash = scrape.mkhash(content) dochash = scrape.mkhash(content)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
if last_attach != None: if last_attach != None:
html_diff = difflib.HtmlDiff() html_diff = difflib.HtmlDiff()
description = description + "\nChanges: " diff = html_diff.make_table(last_attach.read().split('\n'),
description = description + html_diff.make_table(last_attach.read().split('\n'), content.split('\n'))
content.split('\n'))  
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": description} "date": edate, "title": "Disclosure Log Updated", "description": description, "diff": diff}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericPDFDisclogScraper(GenericDisclogScraper): class GenericPDFDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
laparams = LAParams() laparams = LAParams()
rsrcmgr = PDFResourceManager(caching=True) rsrcmgr = PDFResourceManager(caching=True)
outfp = StringIO() outfp = StringIO()
device = TextConverter(rsrcmgr, outfp, codec='utf-8', device = TextConverter(rsrcmgr, outfp, codec='utf-8',
laparams=laparams) laparams=laparams)
fp = StringIO() fp = StringIO()
fp.write(content) fp.write(content)
   
process_pdf(rsrcmgr, device, fp, set(), caching=True, process_pdf(rsrcmgr, device, fp, set(), caching=True,
check_extractable=True) check_extractable=True)
description = outfp.getvalue() description = outfp.getvalue()
fp.close() fp.close()
device.close() device.close()
outfp.close() outfp.close()
dochash = scrape.mkhash(description) dochash = scrape.mkhash(description)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": description} "date": edate, "title": "Disclosure Log Updated", "description": description}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericDOCXDisclogScraper(GenericDisclogScraper): class GenericDOCXDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
, self.getURL(), "foidocuments", self.getAgencyID()) , self.getURL(), "foidocuments", self.getAgencyID())
mydoc = zipfile.ZipFile(file) mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml') xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent) document = etree.fromstring(xmlcontent)
## Fetch all the text out of the document we just created ## Fetch all the text out of the document we just created
paratextlist = getdocumenttext(document) paratextlist = getdocumenttext(document)
# Make explicit unicode version # Make explicit unicode version
newparatextlist = [] newparatextlist = []
for paratext in paratextlist: for paratext in paratextlist:
newparatextlist.append(paratext.encode("utf-8")) newparatextlist.append(paratext.encode("utf-8"))
## Print our documnts test with two newlines under each paragraph ## Print our documnts test with two newlines under each paragraph
description = '\n\n'.join(newparatextlist).strip(' \t\n\r') description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
dochash = scrape.mkhash(description) dochash = scrape.mkhash(description)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
   
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = time().strftime("%Y-%m-%d") edate = time().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": description} "date": edate, "title": "Disclosure Log Updated", "description": description}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericRSSDisclogScraper(GenericDisclogScraper): class GenericRSSDisclogScraper(GenericDisclogScraper):
  def doScrape(self):
def doScrape(self): foidocsdb = scrape.couch['disclosr-foidocuments']
foidocsdb = scrape.couch['disclosr-foidocuments'] (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
self.getURL(), "foidocuments", self.getAgencyID()) feed = feedparser.parse(content)
feed = feedparser.parse(content) for entry in feed.entries:
for entry in feed.entries: #print entry
#print entry print entry.id
print entry.id dochash = scrape.mkhash(entry.id)
dochash = scrape.mkhash(entry.id) doc = foidocsdb.get(dochash)
doc = foidocsdb.get(dochash) #print doc
#print doc if doc is None:
if doc is None: print "saving " + dochash
print "saving " + dochash edate = datetime.fromtimestamp(
edate = datetime.fromtimestamp( mktime(entry.published_parsed)).strftime("%Y-%m-%d")
mktime(entry.published_parsed)).strftime("%Y-%m-%d") doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
doc = {'_id': dochash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
'url': entry.link, 'docID': entry.id, "date": edate, "title": entry.title}
"date": edate, "title": entry.title} self.getDescription(entry, entry, doc)
self.getDescription(entry, entry, doc) foidocsdb.save(doc)
foidocsdb.save(doc) else:
else: print "already saved"
print "already saved"  
  def getDescription(self, content, entry, doc):
def getDescription(self, content, entry, doc): """ get description from rss entry"""
""" get description from rss entry""" doc.update({'description': content.summary})
doc.update({'description': content.summary})  
return return
   
   
class GenericOAICDisclogScraper(GenericDisclogScraper): class GenericOAICDisclogScraper(GenericDisclogScraper):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
   
@abc.abstractmethod @abc.abstractmethod
def getColumns(self, columns): def getColumns(self, columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
   
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
   
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
descriptiontxt = "" descriptiontxt = ""
for string in content.stripped_strings: for string in content.stripped_strings:
descriptiontxt = descriptiontxt + " \n" + string descriptiontxt = descriptiontxt + " \n" + string
doc.update({'description': descriptiontxt}) doc.update({'description': descriptiontxt})
   
def getTitle(self, content, entry, doc): def getTitle(self, content, entry, doc):
doc.update({'title': (''.join(content.stripped_strings))}) doc.update({'title': (''.join(content.stripped_strings))})
   
def getTable(self, soup): def getTable(self, soup):
return soup.table return soup.table
   
def getRows(self, table): def getRows(self, table):
return table.find_all('tr') return table.find_all('tr')
   
def getDate(self, content, entry, doc): def getDate(self, content, entry, doc):
date = ''.join(content.stripped_strings).strip() date = ''.join(content.stripped_strings).strip()
(a, b, c) = date.partition("(") (a, b, c) = date.partition("(")
date = self.remove_control_chars(a.replace("Octber", "October")) date = self.remove_control_chars(a.replace("Octber", "October"))
print date print date
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
print edate print edate
doc.update({'date': edate}) doc.update({'date': edate})
return return
   
def getLinks(self, content, entry, doc): def getLinks(self, content, entry, doc):
links = [] links = []
for atag in entry.find_all("a"): for atag in entry.find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(content, atag['href'])) links.append(scrape.fullurl(content, atag['href']))
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
return return
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
if content is not None: if content is not None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
print "parsing" print "parsing"
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
table = self.getTable(soup) table = self.getTable(soup)
for row in self.getRows(table): for row in self.getRows(table):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) is self.getColumnCount(): if len(columns) is self.getColumnCount():
(id, date, title, (id, date, title,
description, notes) = self.getColumns(columns) description, notes) = self.getColumns(columns)
print self.remove_control_chars( print self.remove_control_chars(
''.join(id.stripped_strings)) ''.join(id.stripped_strings))
if id.string is None: if id.string is None:
dochash = scrape.mkhash( dochash = scrape.mkhash(
self.remove_control_chars( self.remove_control_chars(
url + (''.join(date.stripped_strings)))) url + (''.join(date.stripped_strings))))
else: else:
dochash = scrape.mkhash( dochash = scrape.mkhash(
self.remove_control_chars( self.remove_control_chars(
url + (''.join(id.stripped_strings)))) url + (''.join(id.stripped_strings))))
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
   
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
doc = {'_id': dochash, doc = {'_id': dochash,
'agencyID': self.getAgencyID(), 'agencyID': self.getAgencyID(),
'url': self.getURL(), 'url': self.getURL(),
'docID': (''.join(id.stripped_strings))} 'docID': (''.join(id.stripped_strings))}
self.getLinks(self.getURL(), row, doc) self.getLinks(self.getURL(), row, doc)
self.getTitle(title, row, doc) self.getTitle(title, row, doc)
self.getDate(date, row, doc) self.getDate(date, row, doc)
self.getDescription(description, row, doc) self.getDescription(description, row, doc)
if notes is not None: if notes is not None:
doc.update({ 'notes': ( doc.update({'notes': (
''.join(notes.stripped_strings))}) ''.join(notes.stripped_strings))})
badtitles = ['-','Summary of FOI Request' badtitles = ['-', 'Summary of FOI Request'
, 'FOI request(in summary form)' , 'FOI request(in summary form)'
, 'Summary of FOI request received by the ASC', , 'Summary of FOI request received by the ASC',
'Summary of FOI request received by agency/minister', 'Summary of FOI request received by agency/minister',
'Description of Documents Requested','FOI request', 'Description of Documents Requested', 'FOI request',
'Description of FOI Request','Summary of request','Description','Summary', 'Description of FOI Request', 'Summary of request', 'Description', 'Summary',
'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67'] 'Summary of FOIrequest received by agency/minister',
  'Summary of FOI request received', 'Description of FOI Request',
  "FOI request", 'Results 1 to 67 of 67']
if doc['title'] not in badtitles\ if doc['title'] not in badtitles\
and doc['description'] != '': and doc['description'] != '':
print "saving" print "saving"
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved " + dochash print "already saved " + dochash
   
elif len(row.find_all('th')) is self.getColumnCount(): elif len(row.find_all('th')) is self.getColumnCount():
print "header row" print "header row"
   
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row
   
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); $endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
$enddocid = (isset($_REQUEST['end_docid']) ? $_REQUEST['end_docid'] : null); $enddocid = (isset($_REQUEST['end_docid']) ? $_REQUEST['end_docid'] : null);
?> ?>
<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div> <div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in
<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br> one place!
  </div>
  <a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a>
  <br>
<?php <?php
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
try { try {
$rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20,null, $enddocid)->rows; $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20, null, $enddocid)->rows;
if ($rows) { if ($rows) {
foreach ($rows as $key => $row) { foreach ($rows as $key => $row) {
echo displayLogEntry($row, $idtoname); echo displayLogEntry($row, $idtoname);
if (!isset($startkey)) if (!isset($startkey))
$startkey = $row->key; $startkey = $row->key;
$endkey = $row->key; $endkey = $row->key;
$enddocid = $row->value->_id; $enddocid = $row->value->_id;
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey&amp;end_docid=$enddocid' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>"; echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey&amp;end_docid=$enddocid' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
include_footer_documents(); include_footer_documents();
?> ?>
   
<?php <?php
$subdomain = str_replace('disclo.gs','',$_SERVER['SERVER_NAME']); $subdomain = str_replace('disclo.gs', '', $_SERVER['SERVER_NAME']);
$script = $_SERVER['REQUEST_URI']; $script = $_SERVER['REQUEST_URI'];
   
if ($script == '/google676a414ad086cefb.html') { if ($script == '/google676a414ad086cefb.html') {
echo 'google-site-verification: google676a414ad086cefb.html'; echo 'google-site-verification: google676a414ad086cefb.html';
exit(); exit();
} }
if ($script == '/googlebcce906c6b666bb8.html') { if ($script == '/googlebcce906c6b666bb8.html') {
echo 'google-site-verification: googlebcce906c6b666bb8.html'; echo 'google-site-verification: googlebcce906c6b666bb8.html';
exit(); exit();
} }
   
header('HTTP/1.1 301 Moved Permanently'); header('HTTP/1.1 301 Moved Permanently');
header('Location: http://'.$subdomain.'disclosurelo.gs'.$script); header('Location: http://' . $subdomain . 'disclosurelo.gs' . $script);
exit(); exit();
?> ?>
   
   
<?php <?php
   
// Agency X updated Y, new files, diff of plain text/link text, // Agency X updated Y, new files, diff of plain text/link text,
// feed for just one agency or all // feed for just one agency or all
// This is a minimum example of using the Universal Feed Generator Class // This is a minimum example of using the Universal Feed Generator Class
include("../lib/FeedWriter/FeedTypes.php"); include("../lib/FeedWriter/FeedTypes.php");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
//Creating an instance of FeedWriter class. //Creating an instance of FeedWriter class.
$TestFeed = new RSS2FeedWriter(); $TestFeed = new RSS2FeedWriter();
//Setting the channel elements //Setting the channel elements
////Retriving informations from database ////Retriving informations from database
$idtoname = Array(); $idtoname = Array();
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
if (isset($_REQUEST['id'])) { if (isset($_REQUEST['id'])) {
$rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
$title = $idtoname[$_REQUEST['id']]; $title = $idtoname[$_REQUEST['id']];
} else { } else {
$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows; $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
$title = 'All Agencies'; $title = 'All Agencies';
} }
//Use wrapper functions for common channelelements //Use wrapper functions for common channelelements
$TestFeed->setTitle('disclosurelo.gs Newest Entries - '.$title); $TestFeed->setTitle('disclosurelo.gs Newest Entries - ' . $title);
$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'.(isset($_REQUEST['id'])? '?id='.$_REQUEST['id'] : '')); $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php' . (isset($_REQUEST['id']) ? '?id=' . $_REQUEST['id'] : ''));
$TestFeed->setDescription('disclosurelo.gs Newest Entries - '.$title); $TestFeed->setDescription('disclosurelo.gs Newest Entries - ' . $title);
$TestFeed->setChannelElement('language', 'en-us'); $TestFeed->setChannelElement('language', 'en-us');
$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
   
   
//print_r($rows); //print_r($rows);
foreach ($rows as $row) { foreach ($rows as $row) {
//Create an empty FeedItem //Create an empty FeedItem
$newItem = $TestFeed->createNewItem(); $newItem = $TestFeed->createNewItem();
//Add elements to the feed item //Add elements to the feed item
$newItem->setTitle($row->value->title); $newItem->setTitle($row->value->title);
$newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id); $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id);
$newItem->setDate(strtotime($row->value->date)); $newItem->setDate(strtotime($row->value->date));
$newItem->setDescription(displayLogEntry($row, $idtoname)); $newItem->setDescription(displayLogEntry($row, $idtoname));
$newItem->setAuthor($idtoname[$row->value->agencyID]); $newItem->setAuthor($idtoname[$row->value->agencyID]);
$newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true')); $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true'));
//Now add the feed item //Now add the feed item
$TestFeed->addItem($newItem); $TestFeed->addItem($newItem);
} }
//OK. Everything is done. Now genarate the feed. //OK. Everything is done. Now genarate the feed.
$TestFeed->generateFeed(); $TestFeed->generateFeed();
?> ?>
   
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
import mimetypes import mimetypes
import urllib import urllib
import urlparse import urlparse
import socket import socket
   
   
def mkhash(input): def mkhash(input):
return hashlib.md5(input).hexdigest().encode("utf-8") return hashlib.md5(input).hexdigest().encode("utf-8")
   
   
def canonurl(url): def canonurl(url):
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
if the URL looks invalid. if the URL looks invalid.
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
'http://xn--hgi.ws/' 'http://xn--hgi.ws/'
""" """
# strip spaces at the ends and ensure it's prefixed with 'scheme://' # strip spaces at the ends and ensure it's prefixed with 'scheme://'
url = url.strip() url = url.strip()
if not url: if not url:
return '' return ''
if not urlparse.urlsplit(url).scheme: if not urlparse.urlsplit(url).scheme:
url = 'http://' + url url = 'http://' + url
   
# turn it into Unicode # turn it into Unicode
#try: #try:
# url = unicode(url, 'utf-8') # url = unicode(url, 'utf-8')
#except UnicodeDecodeError: #except UnicodeDecodeError:
# return '' # bad UTF-8 chars in URL # return '' # bad UTF-8 chars in URL
   
# parse the URL into its components # parse the URL into its components
parsed = urlparse.urlsplit(url) parsed = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = parsed scheme, netloc, path, query, fragment = parsed
   
# ensure scheme is a letter followed by letters, digits, and '+-.' chars # ensure scheme is a letter followed by letters, digits, and '+-.' chars
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
return '' return ''
scheme = str(scheme) scheme = str(scheme)
   
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
if not match: if not match:
return '' return ''
domain, port = match.groups() domain, port = match.groups()
netloc = domain + (port if port else '') netloc = domain + (port if port else '')
netloc = netloc.encode('idna') netloc = netloc.encode('idna')
   
# ensure path is valid and convert Unicode chars to %-encoded # ensure path is valid and convert Unicode chars to %-encoded
if not path: if not path:
path = '/' # eg: 'http://google.com' -> 'http://google.com/' path = '/' # eg: 'http://google.com' -> 'http://google.com/'
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
   
# ensure query is valid # ensure query is valid
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
   
# ensure fragment is valid # ensure fragment is valid
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
   
# piece it all back together, truncating it to a maximum of 4KB # piece it all back together, truncating it to a maximum of 4KB
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
return url[:4096] return url[:4096]
   
def fullurl(url,href):  
href = href.replace(" ","%20") def fullurl(url, href):
href = re.sub('#.*$','',href) href = href.replace(" ", "%20")
return urljoin(url,href) href = re.sub('#.*$', '', href)
  return urljoin(url, href)
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def getLastAttachment(docsdb,url):  
  def getLastAttachment(docsdb, url):
hash = mkhash(url) hash = mkhash(url)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc != None: if doc != None:
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
return last_attachment return last_attachment
else: else:
return None return None
   
   
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
url = canonurl(url) url = canonurl(url)
hash = mkhash(url) hash = mkhash(url)
req = urllib2.Request(url) req = urllib2.Request(url)
print "Fetching %s (%s)" % (url,hash) print "Fetching %s (%s)" % (url, hash)
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
print "Not a valid HTTP url" print "Not a valid HTTP url"
return (None,None,None) return (None, None, None)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc == None: if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName, 'type': 'website'} doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'}
else: else:
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14 * 1000):
print "Uh oh, trying to scrape URL again too soon!"+hash print "Uh oh, trying to scrape URL again too soon!" + hash
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
content = last_attachment content = last_attachment
return (doc['url'],doc['mime_type'],content.read()) return (doc['url'], doc['mime_type'], content.read())
if scrape_again == False: if scrape_again == False:
print "Not scraping this URL again as requested" print "Not scraping this URL again as requested"
return (doc['url'],doc['mime_type'],content.read()) return (doc['url'], doc['mime_type'], content.read())
   
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if doc.has_key('etag'): if doc.has_key('etag'):
req.add_header("If-None-Match", doc['etag']) req.add_header("If-None-Match", doc['etag'])
if doc.has_key('last_modified'): if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", doc['last_modified']) req.add_header("If-Modified-Since", doc['last_modified'])
   
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
try: try:
#default_timeout = 12 url_handle = opener.open(req, None, 20)
#socket.setdefaulttimeout(default_timeout)  
url_handle = opener.open(req,None,3)  
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
doc['etag'] = headers.getheader("ETag") doc['etag'] = headers.getheader("ETag")
doc['last_modified'] = headers.getheader("Last-Modified") doc['last_modified'] = headers.getheader("Last-Modified")
doc['date'] = headers.getheader("Date") doc['date'] = headers.getheader("Date")
doc['page_scraped'] = time.time() doc['page_scraped'] = time.time()
doc['web_server'] = headers.getheader("Server") doc['web_server'] = headers.getheader("Server")
doc['via'] = headers.getheader("Via") doc['via'] = headers.getheader("Via")
doc['powered_by'] = headers.getheader("X-Powered-By") doc['powered_by'] = headers.getheader("X-Powered-By")
doc['file_size'] = headers.getheader("Content-Length") doc['file_size'] = headers.getheader("Content-Length")
content_type = headers.getheader("Content-Type") content_type = headers.getheader("Content-Type")
if content_type != None: if content_type != None:
doc['mime_type'] = content_type.split(";")[0] doc['mime_type'] = content_type.split(";")[0]
else: else:
(type,encoding) = mimetypes.guess_type(url) (type, encoding) = mimetypes.guess_type(url)
doc['mime_type'] = type doc['mime_type'] = type
if hasattr(url_handle, 'code'): if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified"+hash print "the web page has not been modified" + hash
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
content = last_attachment content = last_attachment
return (doc['url'],doc['mime_type'],content.read()) return (doc['url'], doc['mime_type'], content.read())
else: else:
print "new webpage loaded" print "new webpage loaded"
content = url_handle.read() content = url_handle.read()
docsdb.save(doc) docsdb.save(doc)
doc = docsdb.get(hash) # need to get a _rev doc = docsdb.get(hash) # need to get a _rev
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type'])
return (doc['url'], doc['mime_type'], content) return (doc['url'], doc['mime_type'], content)
#store as attachment epoch-filename #store as attachment epoch-filename
   
except (urllib2.URLError, socket.timeout) as e: except (urllib2.URLError, socket.timeout) as e:
print "error!" print "error!"
error = "" error = ""
if hasattr(e, 'reason'): if hasattr(e, 'reason'):
error = "error %s in downloading %s" % (str(e.reason), url) error = "error %s in downloading %s" % (str(e.reason), url)
elif hasattr(e, 'code'): elif hasattr(e, 'code'):
error = "error %s in downloading %s" % (e.code, url) error = "error %s in downloading %s" % (e.code, url)
print error print error
doc['error'] = error doc['error'] = error
docsdb.save(doc) docsdb.save(doc)
return (None,None,None) return (None, None, None)
   
   
   
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
(url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) (url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID)
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) navIDs = soup.findAll(
for nav in navIDs: id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
print "Removing element", nav['id'] for nav in navIDs:
  print "Removing element", nav['id']
  nav.extract()
  navClasses = soup.findAll(
  attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
  for nav in navClasses:
  print "Removing element", nav['class']
nav.extract() nav.extract()
navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
for nav in navClasses: linkurls = set([])
print "Removing element", nav['class'] for link in links:
nav.extract() if link.has_key("href"):
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) if link['href'].startswith("http"):
linkurls = set([]) # lets not do external links for now
for link in links: # linkurls.add(link['href'])
if link.has_key("href"): None
if link['href'].startswith("http"): if link['href'].startswith("mailto"):
# lets not do external links for now # not http
# linkurls.add(link['href']) None
None if link['href'].startswith("javascript"):
if link['href'].startswith("mailto"): # not http
# not http None
None else:
if link['href'].startswith("javascript"): # remove anchors and spaces in urls
# not http linkurls.add(fullurl(url, link['href']))
None for linkurl in linkurls:
else: #print linkurl
# remove anchors and spaces in urls scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID)
linkurls.add(fullurl(url,link['href']))  
for linkurl in linkurls:  
#print linkurl  
scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)  
   
#couch = couchdb.Server('http://192.168.1.148:5984/') #couch = couchdb.Server('http://192.168.1.148:5984/')
couch = couchdb.Server('http://192.168.1.113:5984/') couch = couchdb.Server('http://192.168.1.113:5984/')
#couch = couchdb.Server('http://127.0.0.1:5984/') #couch = couchdb.Server('http://127.0.0.1:5984/')
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for row in agencydb.view('app/all'): #not recently scraped agencies view? for row in agencydb.view('app/all'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
for key in agency.keys(): for key in agency.keys():
if key == "FOIDocumentsURL" and "status" not in agency.keys() and False: if key == "FOIDocumentsURL" and "status" not in agency.keys() and False:
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) scrapeAndStore(docsdb, agency[key], 0, key, agency['_id'])
if key == 'website' and True: if key == 'website' and True:
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) scrapeAndStore(docsdb, agency[key], 0, key, agency['_id'])
if "metadata" not in agency.keys(): if "metadata" not in agency.keys():
agency['metadata'] = {} agency['metadata'] = {}
agency['metadata']['lastScraped'] = time.time() agency['metadata']['lastScraped'] = time.time()
if key.endswith('URL') and False: if key.endswith('URL') and False:
print key print key
depth = 1 depth = 1
if 'scrapeDepth' in agency.keys(): if 'scrapeDepth' in agency.keys():
depth = agency['scrapeDepth'] depth = agency['scrapeDepth']
scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) scrapeAndStore(docsdb, agency[key], depth, key, agency['_id'])
agencydb.save(agency) agencydb.save(agency)
   
<?php <?php
include_once('include/common.inc.php'); include_once('include/common.inc.php');
include_header('Search'); include_header('Search');
?> ?>
<div class="foundation-header"> <div class="foundation-header">
<h1><a href="search.php">Search</a></h1> <h1><a href="search.php">Search</a></h1>
</div> </div>
<form> <form>
<input type="text" name="q" value="<?php if (isset($_REQUEST['q']))echo $_REQUEST['q'];?>"/> <input type="text" name="q" value="<?php if (isset($_REQUEST['q'])) echo $_REQUEST['q'];?>"/>
<input type="submit"/> <input type="submit"/>
</form> </form>
   
<?php <?php
if (isset($_REQUEST['q'])) { if (isset($_REQUEST['q'])) {
$request = Requests::get($serverAddr."disclosr-documents/_fti/_design/lucene/by_all?include_docs=true&q=".$_REQUEST['q']); $request = Requests::get($serverAddr . "disclosr-documents/_fti/_design/lucene/by_all?include_docs=true&q=" . $_REQUEST['q']);
$results = json_decode($request->body); $results = json_decode($request->body);
$db = $server->get_db('disclosr-documents'); $db = $server->get_db('disclosr-documents');
foreach ($results->rows as $result) { foreach ($results->rows as $result) {
//print_r($result); //print_r($result);
//$row = $db->get($result->id); //$row = $db->get($result->id);
echo $result->doc->_id." ".$result->doc->url."<br>".PHP_EOL; echo $result->doc->_id . " " . $result->doc->url . "<br>" . PHP_EOL;
} }
} }
include_footer(); include_footer();
?> ?>
<?php <?php
   
function include_header_documents($title) { function include_header_documents($title)
  {
header('X-UA-Compatible: IE=edge,chrome=1'); header('X-UA-Compatible: IE=edge,chrome=1');
?> ?>
<!doctype html> <!doctype html>
<!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> <!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ -->
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> <!--[if lt IE 7]>
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> <!--[if IE 7]>
  <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]-->
  <!--[if IE 8]>
  <html class="no-js lt-ie9" lang="en"> <![endif]-->
<!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> <!-- Consider adding a manifest.appcache: h5bp.com/d/Offline -->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> <!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]-->
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
   
<title>Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?></title> <title>Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?></title>
<meta name="description" content=""> <meta name="description" content="">
   
<!-- Mobile viewport optimized: h5bp.com/viewport --> <!-- Mobile viewport optimized: h5bp.com/viewport -->
<meta name="viewport" content="width=device-width"> <meta name="viewport" content="width=device-width">
<link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php" /> <link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php"/>
<!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> <!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons -->
<meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" /> <meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8"/>
   
<!-- Le styles --> <!-- Le styles -->
<link href="css/bootstrap.min.css" rel="stylesheet"> <link href="css/bootstrap.min.css" rel="stylesheet">
<style type="text/css"> <style type="text/css">
body { body {
padding-top: 60px; padding-top: 60px;
padding-bottom: 40px; padding-bottom: 40px;
}  
.sidebar-nav {  
padding: 9px 0;  
}  
</style>  
<link href="css/bootstrap-responsive.min.css" rel="stylesheet">  
   
<!-- HTML5 shim, for IE6-8 support of HTML5 elements -->  
<!--[if lt IE 9]>  
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>  
<![endif]-->  
<!-- More ideas for your <head> here: h5bp.com/d/head-Tips -->  
   
<!-- All JavaScript at the bottom, except this Modernizr build.  
Modernizr enables HTML5 elements & feature detects for optimal performance.  
Create your own custom Modernizr build: www.modernizr.com/download/  
<script src="js/libs/modernizr-2.5.3.min.js"></script>-->  
<script src="js/jquery.js"></script>  
<script type="text/javascript" src="js/flotr2.min.js"></script>  
   
</head>  
<body>  
<div class="navbar navbar-inverse navbar-fixed-top">  
<div class="navbar-inner">  
<div class="container-fluid">  
<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse">  
<span class="icon-bar"></span>  
<span class="icon-bar"></span>  
<span class="icon-bar"></span>  
</a>  
<a class="brand" href="#">Australian Disclosure Logs</a>  
<div class="nav-collapse collapse">  
<p class="navbar-text pull-right">  
<small>  
Subsites on:  
</small>  
<a href="http://orgs.disclosurelo.gs">Government Agencies</a>  
• <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a>  
• <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a>  
   
</p>  
<ul class="nav">  
<li><a href="agency.php">By Agency</a></li>  
<li><a href="date.php">By Date</a></li>  
<li><a href="disclogsList.php">List of Disclosure Logs</a></li>  
<li><a href="about.php">About</a></li>  
   
</ul>  
</div><!--/.nav-collapse -->  
</div>  
</div>  
</div>  
<div class="container">  
<?php  
} }
   
function include_footer_documents() { .sidebar-nav {
global $ENV; padding: 9px 0;
?> }
</div> <!-- /container --> </style>
<hr> <link href="css/bootstrap-responsive.min.css" rel="stylesheet">
   
<footer> <!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
<p>Not affiliated with or endorsed by any government agency.</p> <!--[if lt IE 9]>
</footer> <script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>
<?php <![endif]-->
if ($ENV != "DEV") { <!-- More ideas for your <head> here: h5bp.com/d/head-Tips -->
echo "<script type='text/javascript'>  
  <!-- All JavaScript at the bottom, except this Modernizr build.
  Modernizr enables HTML5 elements & feature detects for optimal performance.
  Create your own custom Modernizr build: www.modernizr.com/download/
  <script src="js/libs/modernizr-2.5.3.min.js"></script>-->
  <script src="js/jquery.js"></script>
  <script type="text/javascript" src="js/flotr2.min.js"></script>
   
  </head>
  <body>
  <div class="navbar navbar-inverse navbar-fixed-top">
  <div class="navbar-inner">
  <div class="container-fluid">
  <a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse">
  <span class="icon-bar"></span>
  <span class="icon-bar"></span>
  <span class="icon-bar"></span>
  </a>
  <a class="brand" href="#">Australian Disclosure Logs</a>
   
  <div class="nav-collapse collapse">
  <p class="navbar-text pull-right">
  <small>
  Subsites on:
  </small>
  <a href="http://orgs.disclosurelo.gs">Government Agencies</a>
  • <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a>
  • <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a>
   
  </p>
  <ul class="nav">
  <li><a href="agency.php">By Agency</a></li>
  <li><a href="date.php">By Date</a></li>
  <li><a href="disclogsList.php">List of Disclosure Logs</a></li>
  <li><a href="about.php">About</a></li>
   
  </ul>
  </div>
  <!--/.nav-collapse -->
  </div>
  </div>
  </div>
  <div class="container">
  <?php
  }
   
  function include_footer_documents()
  {
  global $ENV;
  ?>
  </div> <!-- /container -->
  <hr>
   
  <footer>
  <p>Not affiliated with or endorsed by any government agency.</p>
  </footer>
  <?php
  if ($ENV != "DEV") {
  echo "<script type='text/javascript'>
   
var _gaq = _gaq || []; var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-12341040-4']); _gaq.push(['_setAccount', 'UA-12341040-4']);
_gaq.push(['_setDomainName', 'disclosurelo.gs']); _gaq.push(['_setDomainName', 'disclosurelo.gs']);
_gaq.push(['_setAllowLinker', true]); _gaq.push(['_setAllowLinker', true]);
_gaq.push(['_trackPageview']); _gaq.push(['_trackPageview']);
   
(function() { (function() {
var ga = document.createElement('script'); var ga = document.createElement('script');
ga.type = 'text/javascript'; ga.type = 'text/javascript';
ga.async = true; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(ga, s); s.parentNode.insertBefore(ga, s);
})(); })();
   
</script>"; </script>";
} }
?> ?>
<!-- Le javascript <!-- Le javascript
================================================== --> ================================================== -->
<!-- Placed at the end of the document so the pages load faster --> <!-- Placed at the end of the document so the pages load faster -->
<!-- <!--
<script src="js/bootstrap-transition.js"></script> <script src="js/bootstrap-transition.js"></script>
<script src="js/bootstrap-alert.js"></script> <script src="js/bootstrap-alert.js"></script>
<script src="js/bootstrap-modal.js"></script> <script src="js/bootstrap-modal.js"></script>
<script src="js/bootstrap-dropdown.js"></script> <script src="js/bootstrap-dropdown.js"></script>
<script src="js/bootstrap-scrollspy.js"></script> <script src="js/bootstrap-scrollspy.js"></script>
<script src="js/bootstrap-tab.js"></script> <script src="js/bootstrap-tab.js"></script>
<script src="js/bootstrap-tooltip.js"></script> <script src="js/bootstrap-tooltip.js"></script>
<script src="js/bootstrap-popover.js"></script> <script src="js/bootstrap-popover.js"></script>
<script src="js/bootstrap-button.js"></script> <script src="js/bootstrap-button.js"></script>
<script src="js/bootstrap-collapse.js"></script> <script src="js/bootstrap-collapse.js"></script>
<script src="js/bootstrap-carousel.js"></script> <script src="js/bootstrap-carousel.js"></script>
<script src="js/bootstrap-typeahead.js"></script>--> <script src="js/bootstrap-typeahead.js"></script>-->
   
   
</body> </body>
</html> </html>
<?php <?php
} }
   
function truncate($string, $length, $stopanywhere = false) { function truncate($string, $length, $stopanywhere = false)
  {
//truncates a string to a certain char length, stopping on a word if not specified otherwise. //truncates a string to a certain char length, stopping on a word if not specified otherwise.
if (strlen($string) > $length) { if (strlen($string) > $length) {
//limit hit! //limit hit!
$string = substr($string, 0, ($length - 3)); $string = substr($string, 0, ($length - 3));
if ($stopanywhere) { if ($stopanywhere) {
//stop anywhere //stop anywhere
$string .= '...'; $string .= '...';
} else { } else {
//stop on a word. //stop on a word.
$string = substr($string, 0, strrpos($string, ' ')) . '...'; $string = substr($string, 0, strrpos($string, ' ')) . '...';
} }
} }
return $string; return $string;
} }
   
function displayLogEntry($row, $idtoname) { function displayLogEntry($row, $idtoname)
  {
$result = ""; $result = "";
$result .= '<div itemscope itemtype="http://schema.org/Article">'; $result .= '<div itemscope itemtype="http://schema.org/Article">';
$result .= '<h2><a href="http://disclosurelo.gs/view.php?id='.$row->value->_id.'"> <span itemprop="datePublished">' . $row->value->date . "</span>: <span itemprop='name headline'>" . truncate($row->value->title, 120) . "</span>"; $result .= '<h2><a href="http://disclosurelo.gs/view.php?id=' . $row->value->_id . '"> <span itemprop="datePublished">' . $row->value->date . "</span>: <span itemprop='name headline'>" . truncate($row->value->title, 120) . "</span>";
$result .= ' (<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</a></h2>'; $result .= ' (<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</a></h2>';
$result .= "<p itemprop='description articleBody text'> Title: " . $row->value->title . "<br/>"; $result .= "<p itemprop='description articleBody text'> Title: " . $row->value->title . "<br/>";
if (isset($row->value->description)) { if (isset($row->value->description)) {
$result .= str_replace("\n", "<br>", preg_replace("/(^[\r\n]*|[\r\n]+)[\s\t]*[\r\n]+/", "",trim($row->value->description))); $result .= str_replace("\n", "<br>", preg_replace("/(^[\r\n]*|[\r\n]+)[\s\t]*[\r\n]+/", "", trim($row->value->description)));
} }
if (isset($row->value->notes)) { if (isset($row->value->notes)) {
$result .= " <br>Note: " . $row->value->notes; $result .= " <br>Note: " . $row->value->notes;
} }
$result .= "</p>"; $result .= "</p>";
   
if (isset($row->value->links)) { if (isset($row->value->links)) {
$result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">'; $result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">';
foreach ($row->value->links as $link) { foreach ($row->value->links as $link) {
$result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href="' . htmlspecialchars ($link) . '" itemprop="url contentURL">' . htmlspecialchars ( $link) . "</a></li>"; $result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href="' . htmlspecialchars($link) . '" itemprop="url contentURL">' . htmlspecialchars($link) . "</a></li>";
} }
   
$result .= "</ul>"; $result .= "</ul>";
} }
$result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>"; $result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>";
$result .= "</div>\n"; $result .= "</div>\n";
return $result; return $result;
} }
   
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
?> ?>
<?php <?php
   
   
   
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
try { try {
$obj = new stdClass(); $obj = new stdClass();
$obj->value = $foidocsdb->get($_REQUEST['id']); $obj->value = $foidocsdb->get($_REQUEST['id']);
include_header_documents($obj->value->title); include_header_documents($obj->value->title);
   
echo displayLogEntry($obj,$idtoname); echo displayLogEntry($obj, $idtoname);
   
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
include_footer_documents(); include_footer_documents();
?> ?>
   
<?php <?php
   
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$hash = $_REQUEST['hash']; $hash = $_REQUEST['hash'];
$docsdb = $server->get_db('disclosr-documents'); $docsdb = $server->get_db('disclosr-documents');
try { try {
$doc = object_to_array($docsdb->get($hash)); $doc = object_to_array($docsdb->get($hash));
   
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
   
   
if (!isset($doc['_attachments']) || count($doc['_attachments']) == 0) die ("no attachments"); if (!isset($doc['_attachments']) || count($doc['_attachments']) == 0) die ("no attachments");
$attachments = $doc['_attachments']; $attachments = $doc['_attachments'];
$attachment_filenames = array_keys($attachments); $attachment_filenames = array_keys($attachments);
//print_r($attachments); //print_r($attachments);
$url = $serverAddr.'disclosr-documents/'.$hash.'/'.urlencode($attachment_filenames[0]); $url = $serverAddr . 'disclosr-documents/' . $hash . '/' . urlencode($attachment_filenames[0]);
//echo $url; //echo $url;
$request = Requests::get($url); $request = Requests::get($url);
echo ($request->body); echo ($request->body);