Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr
Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr


Former-commit-id: 83a487e0d7a1ad00d91cde1cda3471ae0f100c36

[submodule "couchdb/couchdb-lucene"] [submodule "couchdb/couchdb-lucene"]
path = couchdb/couchdb-lucene path = couchdb/couchdb-lucene
url = https://github.com/rnewson/couchdb-lucene.git url = https://github.com/rnewson/couchdb-lucene.git
[submodule "couchdb/settee"]  
path = couchdb/settee  
url = https://github.com/inadarei/settee.git  
[submodule "lib/php-diff"] [submodule "lib/php-diff"]
path = lib/php-diff path = lib/php-diff
url = https://github.com/chrisboulton/php-diff.git url = https://github.com/chrisboulton/php-diff.git
[submodule "lib/Requests"] [submodule "lib/Requests"]
path = lib/Requests path = lib/Requests
url = https://github.com/rmccue/Requests.git url = https://github.com/rmccue/Requests.git
[submodule "js/flotr2"] [submodule "js/flotr2"]
path = js/flotr2 path = js/flotr2
url = https://github.com/HumbleSoftware/Flotr2.git url = https://github.com/HumbleSoftware/Flotr2.git
[submodule "lib/phpquery"] [submodule "lib/phpquery"]
path = lib/phpquery path = lib/phpquery
url = https://github.com/TobiaszCudnik/phpquery.git url = https://github.com/TobiaszCudnik/phpquery.git
[submodule "js/sigma"] [submodule "js/sigma"]
path = js/sigma path = js/sigma
url = https://github.com/jacomyal/sigma.js.git url = https://github.com/jacomyal/sigma.js.git
[submodule "js/bubbletree"] [submodule "js/bubbletree"]
path = js/bubbletree path = js/bubbletree
url = https://github.com/okfn/bubbletree.git url = https://github.com/okfn/bubbletree.git
[submodule "lib/querypath"] [submodule "lib/querypath"]
path = lib/querypath path = lib/querypath
url = https://github.com/technosophos/querypath.git url = https://github.com/technosophos/querypath.git
[submodule "lib/amon-php"] [submodule "lib/amon-php"]
path = lib/amon-php path = lib/amon-php
url = https://github.com/martinrusev/amon-php.git url = https://github.com/martinrusev/amon-php.git
[submodule "documents/lib/parsedatetime"] [submodule "documents/lib/parsedatetime"]
path = documents/lib/parsedatetime path = documents/lib/parsedatetime
url = git://github.com/bear/parsedatetime.git url = git://github.com/bear/parsedatetime.git
  [submodule "lib/FeedWriter"]
  path = lib/FeedWriter
  url = https://github.com/mibe/FeedWriter
   
<?php <?php
   
include_once("../include/common.inc.php"); include_once("../include/common.inc.php");
   
$format = "csv"; $format = "csv";
//$format = "json"; //$format = "json";
if (isset($_REQUEST['format'])) $format = $_REQUEST['format']; if (isset($_REQUEST['format']))
  $format = $_REQUEST['format'];
setlocale(LC_CTYPE, 'C'); setlocale(LC_CTYPE, 'C');
if ($format == "csv") { if ($format == "csv") {
$headers = Array("name"); $headers = Array("name");
} else { } else {
$headers = Array(); $headers = Array();
} }
   
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
try { try {
$rows = $db->get_view("app", "all", null, true)->rows; $rows = $db->get_view("app", "all", null, true)->rows;
   
$dataValues = Array(); $dataValues = Array();
foreach ($rows as $row) { foreach ($rows as $row) {
if (isset($row->value->statistics->employees)) { if (isset($row->value->statistics->employees)) {
   
$headers = array_unique(array_merge($headers, array_keys(object_to_array($row->value->statistics->employees)))); $headers = array_unique(array_merge($headers, array_keys(object_to_array($row->value->statistics->employees))));
   
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
   
$fp = fopen('php://output', 'w'); $fp = fopen('php://output', 'w');
if ($fp && $db) { if ($fp && $db) {
if ($format == "csv") { if ($format == "csv") {
header('Content-Type: text/csv; charset=utf-8'); header('Content-Type: text/csv; charset=utf-8');
header('Content-Disposition: attachment; filename="export.employeestats.' . date("c") . '.csv"'); header('Content-Disposition: attachment; filename="export.employeestats.' . date("c") . '.csv"');
} }
header('Pragma: no-cache'); header('Pragma: no-cache');
header('Expires: 0'); header('Expires: 0');
if ($format == "csv") { if ($format == "csv") {
fputcsv($fp, $headers); fputcsv($fp, $headers);
} else if ($format == "json") { } else if ($format == "json") {
echo '{ echo '{
"labels" : ["' . implode('","', $headers) . '"],'.PHP_EOL; "labels" : ["' . implode('","', $headers) . '"],' . PHP_EOL;
} }
try { try {
$agencies = $db->get_view("app", "all", null, true)->rows; $agencies = $db->get_view("app", "all", null, true)->rows;
//print_r($agencies); //print_r($agencies);
$first = true; $first = true;
if ($format == "json") { if ($format == "json") {
echo '"data" : ['.PHP_EOL; echo '"data" : [' . PHP_EOL;
   
} }
foreach ($agencies as $agency) { foreach ($agencies as $agency) {
   
if (isset($agency->value->statistics->employees)) { if (isset($agency->value->statistics->employees)) {
$row = Array(); $row = Array();
$agencyEmployeesArray = object_to_array($agency->value->statistics->employees); $agencyEmployeesArray = object_to_array($agency->value->statistics->employees);
foreach ($headers as $i => $fieldName) { foreach ($headers as $i => $fieldName) {
  if ($format == "csv") {
  if (isset($agencyEmployeesArray[$fieldName])) {
  $row[] = $agencyEmployeesArray[$fieldName]["value"] ;
  } else if ($i == 0) {
  $row[] = $agency->value->name;
  } else {
  $row[] = 0;
  }
  } else if ($format == "json") {
if (isset($agencyEmployeesArray[$fieldName])) { if (isset($agencyEmployeesArray[$fieldName])) {
$row[] = '['.$i.','.$agencyEmployeesArray[$fieldName]["value"].']'; $row[] = '[' . $i . ',' . $agencyEmployeesArray[$fieldName]["value"] . ']';
} else { } else {
$row[] = '['.$i.',0]'; $row[] = '[' . $i . ',0]';
} }
  }
} }
if ($format == "csv") { if ($format == "csv") {
fputcsv($fp, array_values($row)); fputcsv($fp, array_values($row));
} else if ($format == "json") { } else if ($format == "json") {
if (!$first) echo ","; if (!$first)
echo '{"data" : [' . implode(",", array_values($row)) . '], "label": "'.$agency->value->name.'", "lines" : { "show" : true }, "points" : { "show" : true }}'.PHP_EOL; echo ",";
  echo '{"data" : [' . implode(",", array_values($row)) . '], "label": "' . $agency->value->name . '", "lines" : { "show" : true }, "points" : { "show" : true }}' . PHP_EOL;
$first = false; $first = false;
} }
} }
} }
if ($format == "json") { if ($format == "json") {
echo '] echo ']
}'.PHP_EOL; }' . PHP_EOL;
   
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
   
die; die;
} }
?> ?>
   
<?php <?php
   
require_once '../include/common.inc.php'; require_once '../include/common.inc.php';
   
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
$rows = $db->get_view("app", "byName")->rows; $rows = $db->get_view("app", "byName")->rows;
$nametoid = Array(); $nametoid = Array();
$sums = Array(); $sums = Array();
foreach ($rows as $row) { foreach ($rows as $row) {
$nametoid[trim($row->key)] = $row->value; $nametoid[trim($row->key)] = $row->value;
} }
$employeeCSVs = Array("2002-2003" => "0203apsemployees.csv", $employeeCSVs = Array("2002-2003" => "0203apsemployees.csv",
"2003-2004" => "0304apsemployees.csv", "2003-2004" => "0304apsemployees.csv",
"2004-2005" => "0405apsemployees.csv", "2004-2005" => "0405apsemployees.csv",
"2005-2006" => "0506apsemployees.csv", "2005-2006" => "0506apsemployees.csv",
"2006-2007" => "0607apsemployees.csv", "2006-2007" => "0607apsemployees.csv",
"2007-2008" => "0708apsemployees.csv", "2007-2008" => "0708apsemployees.csv",
"2008-2009" => "0809apsemployees.csv", "2008-2009" => "0809apsemployees.csv",
"2009-2010" => "0910apsemployees.csv", "2009-2010" => "0910apsemployees.csv",
"2010-2011" => "1011apsemployees.csv" "2010-2011" => "1011apsemployees.csv"
); );
foreach ($employeeCSVs as $timePeriod => $employeeCSV) { foreach ($employeeCSVs as $timePeriod => $employeeCSV) {
echo $employeeCSV . "<br>" . PHP_EOL; echo $employeeCSV . "<br>" . PHP_EOL;
$row = 1; $row = 1;
if (($handle = fopen($employeeCSV, "r")) !== FALSE) { if (($handle = fopen($employeeCSV, "r")) !== FALSE) {
while (($data = fgetcsv($handle, 1000, ",")) !== FALSE) { while (($data = fgetcsv($handle, 1000, ",")) !== FALSE) {
//print_r($data); //print_r($data);
$name = trim($data[0]); $name = trim($data[0]);
if (isset($nametoid[$name])) { if (isset($nametoid[$name])) {
$id = $nametoid[$name]; $id = $nametoid[$name];
//echo $id . "<br>" . PHP_EOL; //echo $id . "<br>" . PHP_EOL;
@$sums[$id][$timePeriod] += $data[1]; @$sums[$id][$timePeriod] += $data[1];
} else { } else {
echo "<br>ERROR NAME MISSING FROM ID LIST<br><bR>" . PHP_EOL; echo "<br>ERROR NAME MISSING FROM ID LIST<br><bR>" . PHP_EOL;
   
die(); die();
} }
} }
fclose($handle); fclose($handle);
} }
} }
foreach ($sums as $id => $sum) { foreach ($sums as $id => $sum) {
echo $id . "<br>" . PHP_EOL; echo $id . "<br>" . PHP_EOL;
$doc = $db->get($id); $doc = $db->get($id);
echo $doc->name . "<br>" . PHP_EOL; echo $doc->name . "<br>" . PHP_EOL;
// print_r($doc); // print_r($doc);
$changed = false; $changed = false;
if (!isset($doc->statistics)) { if (!isset($doc->statistics)) {
$changed = true; $changed = true;
$doc->statistics = Array(); $doc->statistics = new stdClass();
  }
  if (!isset($doc->statistics->employees)) {
  $changed = true;
  $doc->statistics->employees = new stdClass();
} }
foreach ($sum as $timePeriod => $value) { foreach ($sum as $timePeriod => $value) {
if (!isset($doc->statistics->employees->$timePeriod->value) if (!isset($doc->statistics->employees->$timePeriod->value)
|| $doc->statistics->employees->$timePeriod->value != $value) { || $doc->statistics->employees->$timePeriod->value != $value) {
$changed = true; $changed = true;
$doc->statistics["employees"][$timePeriod] = Array("value" => $value, "source" => "http://apsc.gov.au/stateoftheservice/"); $doc->statistics->employees->$timePeriod = Array("value" => $value, "source" => "http://apsc.gov.au/stateoftheservice/");
} }
} }
if ($changed) { if ($changed) {
$db->save($doc); $db->save($doc);
} else { } else {
echo "not changed" . "<br>" . PHP_EOL; echo "not changed" . "<br>" . PHP_EOL;
} }
} }
// employees: timeperiod, source = apsc state of service, value // employees: timeperiod, source = apsc state of service, value
?> ?>
   
  <?php
 
  require_once '../include/common.inc.php';
  require($basePath . 'lib/phpquery/phpQuery/phpQuery.php');
  $db = $server->get_db('disclosr-agencies');
  $rows = $db->get_view("app", "byName")->rows;
  $nametoid = Array();
  $sums = Array();
  $functions = Array();
  foreach ($rows as $row) {
  $nametoid[trim($row->key)] = $row->value;
  }
 
 
  $request = Requests::get("http://www.apsc.gov.au/publications-and-media/parliamentary/state-of-the-service/new-sosr/appendix-2-aps-agencies");
  $doc = phpQuery::newDocumentHTML($request->body);
  phpQuery::selectDocument($doc);
  foreach (pq('tr')->elements as $tr) {
  //echo $tr->nodeValue.PHP_EOL;
  $agency = "";
  $employees = "";
  $function = "";
  $i = 0;
  foreach ($tr->childNodes as $td) {
  //echo $td->nodeValue." $i <br>";
  if ($i == 0)
  $agency = $td->nodeValue;
  if ($i == 2) {
  $employees = trim(str_replace(",", "", $td->nodeValue));
  }
  if ($i == 4) {
  $function = $td->nodeValue;
  }
  $i++;
  }
  if ($agency != "" && $employees != "" && $function != "") {
  $name = trim(str_replace('2','',$agency));
  //echo "$name<br><bR>" . PHP_EOL;
  if (isset($nametoid[$name])) {
  $id = $nametoid[$name];
  //echo $id . "<br>" . PHP_EOL;
  @$sums[$id]["2011-2012"] += $employees;
  $functions[$id] = $function;
  } else if ($agency != "Agency"){
  echo "<br>ERROR NAME '$agency' MISSING FROM ID LIST<br><bR>" . PHP_EOL;
 
  die();
  }
  } else {
  echo "skipped $agency";
  }
  }
  //print_r($sums);
  foreach ($sums as $id => $sum) {
  echo $id . "<br>" . PHP_EOL;
  $doc = $db->get($id);
  echo $doc->name . "<br>" . PHP_EOL;
  // print_r($doc);
  $changed = false;
  if (!isset($doc->statistics)) {
  $changed = true;
  $doc->statistics = new stdClass();
  }
  if (!isset($doc->statistics->employees)) {
  $changed = true;
  $doc->statistics->employees = new stdClass();
  }
  foreach ($sum as $timePeriod => $value) {
  if (!isset($doc->statistics->employees->$timePeriod->value)
  || $doc->statistics->employees->$timePeriod->value != $value) {
  $changed = true;
  $doc->statistics->employees->$timePeriod = Array("value" => $value, "source" => "http://apsc.gov.au/stateoftheservice/");
  $doc->employees = $value;
  $doc->functionClassification = $functions[$id];
  }
  }
 
  if ($changed) {
  $db->save($doc);
  } else {
  echo "not changed" . "<br>" . PHP_EOL;
  }
  }
  // employees: timeperiod, source = apsc state of service, value
  ?>
 
<?php <?php
   
require_once '../include/common.inc.php'; require_once '../include/common.inc.php';
//function createFOIDocumentsDesignDoc() { //function createFOIDocumentsDesignDoc() {
   
$foidb = $server->get_db('disclosr-foidocuments'); $foidb = $server->get_db('disclosr-foidocuments');
$obj = new stdClass(); $obj = new stdClass();
$obj->_id = "_design/" . urlencode("app"); $obj->_id = "_design/" . urlencode("app");
$obj->language = "javascript"; $obj->language = "javascript";
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; $obj->views->all->map = "function(doc) { emit(doc._id, doc); };";
$obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };"; $obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };";
$obj->views->byDate->reduce = "_count"; $obj->views->byDate->reduce = "_count";
$obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };"; $obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };";
$obj->views->byAgencyID->reduce = "_count"; $obj->views->byDateMonthYear->reduce = "_count";
  $obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };";
  $obj->views->byAgencyID->reduce = "_count";
   
// allow safe updates (even if slightly slower due to extra: rev-detection check). // allow safe updates (even if slightly slower due to extra: rev-detection check).
$foidb->save($obj, true); $foidb->save($obj, true);
   
   
function createDocumentsDesignDoc() { //function createDocumentsDesignDoc() {
/* $docdb = $server->get_db('disclosr-documents');
global $db;  
$obj = new stdClass(); $obj = new stdClass();
$obj->_id = "_design/" . urlencode("app"); $obj->_id = "_design/" . urlencode("app");
$obj->language = "javascript"; $obj->language = "javascript";
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; $obj->views->web_server->map = "function(doc) {\n emit(doc.web_server, 1);\n}";
$obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; $obj->views->web_server->reduce = "function (key, values, rereduce) {\n return sum(values);\n}";
"views": { $obj->views->byAgency->map = "function(doc) {\n emit(doc.agencyID, 1);\n}";
"web_server": { $obj->views->byAgency->reduce = "function (key, values, rereduce) {\n return sum(values);\n}";
"map": "function(doc) {\n emit(doc.web_server, 1);\n}", $obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}";
"reduce": "function (key, values, rereduce) {\n return sum(values);\n}" $obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}";
}, $obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}";
"byAgency": { $obj->views->getValidationRequired = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}";
"map": "function(doc) {\n emit(doc.agencyID, 1);\n}",  
"reduce": "function (key, values, rereduce) {\n return sum(values);\n}"  
},  
"byURL": {  
"map": "function(doc) {\n emit(doc.url, doc);\n}"  
},  
"agency": {  
"map": "function(doc) {\n emit(doc.agencyID, doc);\n}"  
},  
"byWebServer": {  
"map": "function(doc) {\n emit(doc.web_server, doc);\n}"  
},  
"getValidationRequired": {  
"map": "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}"  
}  
} */  
}  
   
//function createAgencyDesignDoc() { //function createAgencyDesignDoc() {
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
$obj = new stdClass(); $obj = new stdClass();
$obj->_id = "_design/" . urlencode("app"); $obj->_id = "_design/" . urlencode("app");
$obj->language = "javascript"; $obj->language = "javascript";
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; $obj->views->all->map = "function(doc) { emit(doc._id, doc); };";
$obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };";
$obj->views->byCanonicalName->map = "function(doc) { $obj->views->byCanonicalName->map = "function(doc) {
if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') {
emit(doc.name, doc); emit(doc.name, doc);
} }
};"; };";
$obj->views->byDeptStateName->map = "function(doc) { $obj->views->byDeptStateName->map = "function(doc) {
if (doc.orgType == 'FMA-DepartmentOfState') { if (doc.orgType == 'FMA-DepartmentOfState') {
emit(doc.name, doc._id); emit(doc.name, doc._id);
} }
};"; };";
$obj->views->parentOrgs->map = "function(doc) { $obj->views->parentOrgs->map = "function(doc) {
if (doc.parentOrg) { if (doc.parentOrg) {
emit(doc._id, doc.parentOrg); emit(doc._id, doc.parentOrg);
} }
};"; };";
$obj->views->byName->map = 'function(doc) { $obj->views->byName->map = 'function(doc) {
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") {
emit(doc.name, doc._id); emit(doc.name, doc._id);
if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) {
emit(doc.shortName, doc._id); emit(doc.shortName, doc._id);
} }
for (name in doc.otherNames) { for (name in doc.otherNames) {
if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) {
emit(doc.otherNames[name], doc._id); emit(doc.otherNames[name], doc._id);
} }
} }
for (name in doc.foiBodies) { for (name in doc.foiBodies) {
if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) {
emit(doc.foiBodies[name], doc._id); emit(doc.foiBodies[name], doc._id);
} }
} }
for (name in doc.positions) { for (name in doc.positions) {
if (doc.positions[name] != "" && doc.positions[name] != doc.name) { if (doc.positions[name] != "" && doc.positions[name] != doc.name) {
emit(doc.positions[name], doc._id); emit(doc.positions[name], doc._id);
} }
} }
} }
};'; };';
   
$obj->views->foiEmails->map = "function(doc) { $obj->views->foiEmails->map = "function(doc) {
emit(doc._id, doc.foiEmail); emit(doc._id, doc.foiEmail);
};"; };";
   
$obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; $obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }";
$obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; $obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };';
$obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; $obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };';
$obj->views->getScrapeRequired->map = "function(doc) { $obj->views->getScrapeRequired->map = "function(doc) {
   
var lastScrape = Date.parse(doc.metadata.lastScraped); var lastScrape = Date.parse(doc.metadata.lastScraped);
   
var today = new Date(); var today = new Date();
   
if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) {
emit(doc._id, doc); emit(doc._id, doc);
} }
   
};"; };";
$obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; $obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };";
$obj->views->getConflicts->map = "function(doc) { $obj->views->getConflicts->map = "function(doc) {
if (doc._conflicts) { if (doc._conflicts) {
emit(null, [doc._rev].concat(doc._conflicts)); emit(null, [doc._rev].concat(doc._conflicts));
} }
}"; }";
// http://stackoverflow.com/questions/646628/javascript-startswith // http://stackoverflow.com/questions/646628/javascript-startswith
$obj->views->score->map = 'if(!String.prototype.startsWith){ $obj->views->score->map = 'if(!String.prototype.startsWith){
String.prototype.startsWith = function (str) { String.prototype.startsWith = function (str) {
return !this.indexOf(str); return !this.indexOf(str);
} }
} }
   
function(doc) { function(doc) {
count = 0; count = 0;
if (doc["status"] != "suspended") { if (doc["status"] != "suspended") {
for(var propName in doc) { for(var propName in doc) {
if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { if(typeof(doc[propName]) != "undefined" && doc[propName] != "") {
count++; count++;
} }
} }
portfolio = doc.parentOrg; portfolio = doc.parentOrg;
if (doc.orgType == "FMA-DepartmentOfState") { if (doc.orgType == "FMA-DepartmentOfState") {
portfolio = doc._id; portfolio = doc._id;
} }
if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") {
portfolio = doc.orgType; portfolio = doc.orgType;
} }
emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio});
} }
}'; }';
$obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ $obj->views->scoreHas->map = 'if(!String.prototype.startsWith){
String.prototype.startsWith = function (str) { String.prototype.startsWith = function (str) {
return !this.indexOf(str); return !this.indexOf(str);
} }
} }
if(!String.prototype.endsWith){ if(!String.prototype.endsWith){
String.prototype.endsWith = function(suffix) { String.prototype.endsWith = function(suffix) {
    return this.indexOf(suffix, this.length - suffix.length) !== -1;     return this.indexOf(suffix, this.length - suffix.length) !== -1;
}; };
} }
function(doc) { function(doc) {
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") {
for(var propName in doc) { for(var propName in doc) {
if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) {
emit(propName, 1); emit(propName, 1);
} }
} }
emit("total", 1); emit("total", 1);
} }
}'; }';
$obj->views->scoreHas->reduce = 'function (key, values, rereduce) { $obj->views->scoreHas->reduce = 'function (key, values, rereduce) {
return sum(values); return sum(values);
}'; }';
$obj->views->fieldNames->map = ' $obj->views->fieldNames->map = '
function(doc) { function(doc) {
for(var propName in doc) { for(var propName in doc) {
emit(propName, doc._id); emit(propName, doc._id);
} }
}'; }';
$obj->views->fieldNames->reduce = 'function (key, values, rereduce) { $obj->views->fieldNames->reduce = 'function (key, values, rereduce) {
return values.length; return values.length;
}'; }';
// allow safe updates (even if slightly slower due to extra: rev-detection check). // allow safe updates (even if slightly slower due to extra: rev-detection check).
$db->save($obj, true); $db->save($obj, true);
   
   
?> ?>
   
<?php  
 
/**  
* Databaase class.  
*/  
class SetteeDatabase {  
 
/**  
* Base URL of the CouchDB REST API  
*/  
private $conn_url;  
 
/**  
* HTTP REST Client instance  
*/  
protected $rest_client;  
 
/**  
* Name of the database  
*/  
private $dbname;  
 
/**  
* Default constructor  
*/  
function __construct($conn_url, $dbname) {  
$this->conn_url = $conn_url;  
$this->dbname = $dbname;  
$this->rest_client = SetteeRestClient::get_instance($this->conn_url);  
}  
 
 
/**  
* Get UUID from CouchDB  
*  
* @return  
* CouchDB-generated UUID string  
*  
*/  
function gen_uuid() {  
$ret = $this->rest_client->http_get('_uuids');  
return $ret['decoded']->uuids[0]; // should never be empty at this point, so no checking  
}  
 
/**  
* Create or update a document database  
*  
* @param $document  
* PHP object, a PHP associative array, or a JSON String representing the document to be saved. PHP Objects and arrays are JSON-encoded automatically.  
*  
* <p>If $document has a an "_id" property set, it will be used as document's unique id (even for "create" operation).  
* If "_id" is missing, CouchDB will be used to generate a UUID.  
*  
* <p>If $document has a "_rev" property (revision), document will be updated, rather than creating a new document.  
* You have to provide "_rev" if you want to update an existing document, otherwise operation will be assumed to be  
* one of creation and you will get a duplicate document exception from CouchDB. Also, you may not provide "_rev" but  
* not provide "_id" since that is an invalid input.  
*  
* @param $allowRevAutoDetection  
* Default: false. When true and _rev is missing from the document, save() function will auto-detect latest revision  
* for a document and use it. This option is "false" by default because it involves an extra http HEAD request and  
* therefore can make save() operation slightly slower if such auto-detection is not required.  
*  
* @return  
* document object with the database id (uuid) and revision attached;  
*  
* @throws SetteeCreateDatabaseException  
*/  
function save($document, $allowRevAutoDetection = false) {  
if (is_string($document)) {  
$document = json_decode($document);  
}  
 
// Allow passing of $document as an array (for syntactic simplicity and also because in JSON world it does not matter)  
if(is_array($document)) {  
$document = (object) $document;  
}  
 
if (empty($document->_id) && empty($document->_rev)) {  
$id = $this->gen_uuid();  
}  
elseif (empty($document->_id) && !empty($document->_rev)) {  
throw new SetteeWrongInputException("Error: You can not save a document with a revision provided, but missing id");  
}  
else {  
$id = $document->_id;  
 
if ($allowRevAutoDetection) {  
try {  
$rev = $this->get_rev($id);  
} catch (SetteeRestClientException $e) {  
// auto-detection may fail legitimately, if a document has never been saved before (new doc), so skipping error  
}  
if (!empty($rev)) {  
$document->_rev = $rev;  
}  
}  
}  
 
$full_uri = $this->dbname . "/" . $this->safe_urlencode($id);  
$document_json = json_encode($document, JSON_NUMERIC_CHECK);  
 
$ret = $this->rest_client->http_put($full_uri, $document_json);  
 
$document->_id = $ret['decoded']->id;  
$document->_rev = $ret['decoded']->rev;  
 
return $document;  
}  
 
/**  
* @param $doc  
* @param $name  
* @param $content  
* Content of the attachment in a string-buffer format. This function will automatically base64-encode content for  
* you, so you don't have to do it.  
* @param $mime_type  
* Optional. Will be auto-detected if not provided  
* @return void  
*/  
public function add_attachment($doc, $name, $content, $mime_type = null) {  
if (empty($doc->_attachments) || !is_object($doc->_attachments)) {  
$doc->_attachments = new stdClass();  
}  
 
if (empty($mime_type)) {  
$mime_type = $this->rest_client->content_mime_type($content);  
}  
 
$doc->_attachments->$name = new stdClass();  
$doc->_attachments->$name->content_type = $mime_type;  
$doc->_attachments->$name->data = base64_encode($content);  
}  
 
/**  
* @param $doc  
* @param $name  
* @param $file  
* Full path to a file (e.g. as returned by PHP's realpath function).  
* @param $mime_type  
* Optional. Will be auto-detected if not provided  
* @return void  
*/  
public function add_attachment_file($doc, $name, $file, $mime_type = null) {  
$content = file_get_contents($file);  
$this->add_attachment($doc, $name, $content, $mime_type);  
}  
 
/**  
*  
* Retrieve a document from CouchDB  
*  
* @throws SetteeWrongInputException  
*  
* @param $id  
* Unique ID (usually: UUID) of the document to be retrieved.  
* @return  
* database document in PHP object format.  
*/  
function get($id) {  
if (empty($id)) {  
throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid.");  
}  
 
$full_uri = $this->dbname . "/" . $this->safe_urlencode($id);  
$full_uri = str_replace("%3Frev%3D","?rev=",$full_uri);  
$ret = $this->rest_client->http_get($full_uri);  
return $ret['decoded'];  
}  
 
/**  
*  
* Get the latest revision of a document with document id: $id in CouchDB.  
*  
* @throws SetteeWrongInputException  
*  
* @param $id  
* Unique ID (usually: UUID) of the document to be retrieved.  
* @return  
* database document in PHP object format.  
*/  
function get_rev($id) {  
if (empty($id)) {  
throw new SetteeWrongInputException("Error: Can't query a document without a uuid.");  
}  
 
$full_uri = $this->dbname . "/" . $this->safe_urlencode($id);  
$headers = $this->rest_client->http_head($full_uri);  
if (empty($headers['Etag'])) {  
throw new SetteeRestClientException("Error: could not retrieve revision. Server unexpectedly returned empty Etag");  
}  
$etag = str_replace('"', '', $headers['Etag']);  
return $etag;  
}  
 
/**  
* Delete a document  
*  
* @param $document  
* a PHP object or JSON representation of the document that has _id and _rev fields.  
*  
* @return void  
*/  
function delete($document) {  
if (!is_object($document)) {  
$document = json_decode($document);  
}  
 
$full_uri = $this->dbname . "/" . $this->safe_urlencode($document->_id) . "?rev=" . $document->_rev;  
$this->rest_client->http_delete($full_uri);  
}  
 
 
/*----------------- View-related functions --------------*/  
 
/**  
* Create a new view or update an existing one.  
*  
* @param $design_doc  
* @param $view_name  
* @param $map_src  
* Source code of the map function in Javascript  
* @param $reduce_src  
* Source code of the reduce function in Javascript (optional)  
* @return void  
*/  
function save_view($design_doc, $view_name, $map_src, $reduce_src = null) {  
$obj = new stdClass();  
$obj->_id = "_design/" . urlencode($design_doc);  
$view_name = urlencode($view_name);  
$obj->views->$view_name->map = $map_src;  
if (!empty($reduce_src)) {  
$obj->views->$view_name->reduce = $reduce_src;  
}  
 
// allow safe updates (even if slightly slower due to extra: rev-detection check).  
return $this->save($obj, true);  
}  
 
/**  
* Create a new view or update an existing one.  
*  
* @param $design_doc  
* @param $view_name  
* @param $key  
* key parameter to a view. Can be a single value or an array (for a range). If passed an array, function assumes  
* that first element is startkey, second: endkey.  
* @param $descending  
* return results in descending order. Please don't forget that if you are using a startkey/endkey, when you change  
* order you also need to swap startkey and endkey values!  
*  
* @return void  
*/  
function get_view($design_doc, $view_name, $key = null, $descending = false) {  
$id = "_design/" . urlencode($design_doc);  
$view_name = urlencode($view_name);  
$id .= "/_view/$view_name";  
 
$data = array();  
if (!empty($key)) {  
if (is_string($key)) {  
$data = "key=" . '"' . $key . '"';  
}  
elseif (is_array($key)) {  
list($startkey, $endkey) = $key;  
$data = "startkey=" . '"' . $startkey . '"&' . "endkey=" . '"' . $endkey . '"';  
}  
 
if ($descending) {  
$data .= "&descending=true";  
}  
}  
 
 
 
if (empty($id)) {  
throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid.");  
}  
 
$full_uri = $this->dbname . "/" . $this->safe_urlencode($id);  
$full_uri = str_replace("%253Fgroup%253Dtrue","?group=true",$full_uri);  
$ret = $this->rest_client->http_get($full_uri, $data);  
return $ret['decoded'];  
 
}  
 
/**  
* @param $id  
* @return  
* return a properly url-encoded id.  
*/  
private function safe_urlencode($id) {  
//-- System views like _design can have "/" in their URLs.  
$id = rawurlencode($id);  
if (substr($id, 0, 1) == '_') {  
$id = str_replace('%2F', '/', $id);  
}  
return $id;  
}  
 
/** Getter for a database name */  
function get_name() {  
return $this->dbname;  
}  
 
}  
directory:a/couchdb/settee (deleted)
 
  language: php
  phps:
  - 5.3
  - 5.4
  before_script: cd tests/
 
  (The MIT License)
 
  Copyright (c) 2011 Irakli Nadareishvili
 
  Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
 
  The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
 
  THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  Inspired by: "CouchRest library for Ruby":http://jchrisa.net/drl/_design/sofa/_list/post/post-page?startkey=%5B%22couchrest__restful_ruby_client_%22%5D and the "couchdb-python":http://packages.python.org/CouchDB/client.html#document library.
 
  h3. Server Functions
 
  # Specify a server:
  @$server = new SetteeServer('http://127.0.0.1:5984');@
  # Database API
  ## Create a database:
  @$ret = $server->create_db('irakli_test');@
  ## Drop a database:
  @$ret = $server->drop_db('irakli_test');@
  ## List all databases:
  @$ret = $server->list_dbs();@
  ## Get a database object
  @$db = $server->get_db('irakli_test');@
  # Document API
  ## Create/Update a document:
  @$ret = $db->save($doc);@
  ## Retrieve a document:
  @$db_doc = $db->get($id);@
  ## Determine the latest revision_id for a document:
  @$rev = $db->get_rev($id);@
  ## Delete a document:
  @$db_doc = $db->delete($doc);@
  # Attachments API
  ## Add content as attachment:
  @$db->add_attachment($doc, "foo.txt", "Some text that will be base64 encoded", "text/plain");@
  ## Add a file path to be attached:
  @$db->add_attachment_file($doc, "foo.pdf", $file_path, "application/pdf");@
  ## Add a file path to be attached (mime-type is auto-detected):
  @$db->add_attachment_file($doc, "foo.pdf", $file_path);@
  ## Full attachment saving example:
  $doc = new stdClass();
  $doc->_id = "attachment_doc";
  $file_path = dirname(__FILE__) . "/resources/couch-logo.pdf";
  $this->db->add_attachment_file($doc, "foo.pdf", $file_path, "application/pdf");
  $db_doc = $this->db->save($doc);
  ## ATTENTION: there is no "load_attachments" method, because when you load a document, all its attachments get loaded with it, as well.
  # Views API
  ## Create a new view or save a view:
  @$view = $db->save_view("some_design_document_id", "a_view_name", $map_src);@
  @$view = $db->save_view("some_design_document_id", "a_view_name", $map_src, $reduce_src);@
  ## Get a view (run query and get results):
  @$view = $db->get_view("some_design_document_id", "a_view_name");@
  ## Parametrized view:
  @$view = $db->get_view("some_design_document_id", "a_view_name", "2009/02/17 21:13:39");@
  ## Parametrized view with key range:
  @$view = $db->get_view("some_design_document_id", "a_view_name", array("2009/01/30 18:04:11", "2009/02/17 21:13:39"));@
  ## Parametrized view with key range, ordered descending:
  @$view = $db->get_view("some_design_document_id", "a_view_name", array("2009/01/30 18:04:11", "2009/02/17 21:13:39"), true);@
 
 
  h3. Requirements
  # PHP 5.2 or newer
 
  h3. Recommended
  # PHP 5.3 or newer. With PHP 5.2 following functionality will not work:
  ## Some unit-tests
  ## Mime type auto-detection.
  # pecl_http
  #!/usr/bin/env php
 
  <?php
 
  require (realpath(dirname(__FILE__) . '/../src/settee.php'));
 
  $server = new SetteeServer('http://127.0.0.1:5984');
 
 
  $dbs = array (
  1 => "settee_test_perf_01",
  2 => "settee_test_perf_02",
  3 => "settee_test_perf_03",
  );
 
  print ("creating databases: \n");
 
  foreach ($dbs as $db) {
  $start = microtime(true);
  try {
  $ret = $server->create_db($db);
  } catch (Exception $e) {
  //-- re-throw. this is just for demo
  throw $e;
  }
  $elapsed = microtime(true) - $start;
  print("Time elapsed: $elapsed \n");
  }
 
  $ret = $server->list_dbs();
  print_r($ret);
  print ("\n");
 
  print ("dropping databases: \n");
 
  foreach ($dbs as $db) {
  $start = microtime(true);
  try {
  $ret = $server->drop_db($db);
  } catch (Exception $e) {
  //-- re-throw. this is just for demo
  throw $e;
  }
  $elapsed = microtime(true) - $start;
  print("Time elapsed: $elapsed \n");
  }
 
  $ret = $server->list_dbs();
  print_r($ret);
 
  #!/usr/bin/env php
 
  <?php
 
  require (realpath(dirname(__FILE__) . '/../src/settee.php'));
 
  $server = new SetteeServer('http://127.0.0.1:5984');
  $dname = 'irakli';
  $db = $server->get_db('irakli');
 
  try {
  $server->create_db($db);
  } catch (Exception $e) {
  print_r("database irakli already exists! \n");
  }
 
  $doc = new StdClass();
  $doc->firstName = "Irakli";
  $doc->lastName = "Nadareishvili";
  $doc->IQ = 200;
  $doc->hobbies = array("skiing", "swimming");
  $doc->pets = array ("whitey" => "labrador", "mikey" => "pug");
 
  // Should work with json string as well:
  //$doc = '{"firstName":"irakli","lastName":"Nadareishvili","IQ":200,"hobbies":["skiing","swimming"],"pets":{"whitey":"labrador","mikey":"pug"}}';
 
  $doc = $db->save($doc);
  print_r($doc);
 
  $doc = $db->get($doc->_id);
  print_r($doc);
 
  $doc->firstName = "Ika";
  $doc = $db->save($doc);
  print_r($doc);
 
  $db->delete($doc);
 
 
 
  <?php
 
  /**
  * Databaase class.
  */
  class SetteeDatabase {
 
  /**
  * Base URL of the CouchDB REST API
  */
  private $conn_url;
 
  /**
  * HTTP REST Client instance
  */
  protected $rest_client;
 
  /**
  * Name of the database
  */
  private $dbname;
 
  /**
  * Default constructor
  */
  function __construct($conn_url, $dbname) {
  $this->conn_url = $conn_url;
  $this->dbname = $dbname;
  $this->rest_client = SetteeRestClient::get_instance($this->conn_url);
  }
 
 
  /**
  * Get UUID from CouchDB
  *
  * @return
  * CouchDB-generated UUID string
  *
  */
  function gen_uuid() {
  $ret = $this->rest_client->http_get('_uuids');
  return $ret['decoded']->uuids[0]; // should never be empty at this point, so no checking
  }
 
  /**
  * Create or update a document database
  *
  * @param $document
  * PHP object, a PHP associative array, or a JSON String representing the document to be saved. PHP Objects and arrays are JSON-encoded automatically.
  *
  * <p>If $document has a an "_id" property set, it will be used as document's unique id (even for "create" operation).
  * If "_id" is missing, CouchDB will be used to generate a UUID.
  *
  * <p>If $document has a "_rev" property (revision), document will be updated, rather than creating a new document.
  * You have to provide "_rev" if you want to update an existing document, otherwise operation will be assumed to be
  * one of creation and you will get a duplicate document exception from CouchDB. Also, you may not provide "_rev" but
  * not provide "_id" since that is an invalid input.
  *
  * @param $allowRevAutoDetection
  * Default: false. When true and _rev is missing from the document, save() function will auto-detect latest revision
  * for a document and use it. This option is "false" by default because it involves an extra http HEAD request and
  * therefore can make save() operation slightly slower if such auto-detection is not required.
  *
  * @return
  * document object with the database id (uuid) and revision attached;
  *
  * @throws SetteeCreateDatabaseException
  */
  function save($document, $allowRevAutoDetection = false) {
  if (is_string($document)) {
  $document = json_decode($document);
  }
 
  // Allow passing of $document as an array (for syntactic simplicity and also because in JSON world it does not matter)
  if(is_array($document)) {
  $document = (object) $document;
  }
 
  if (empty($document->_id) && empty($document->_rev)) {
  $id = $this->gen_uuid();
  }
  elseif (empty($document->_id) && !empty($document->_rev)) {
  throw new SetteeWrongInputException("Error: You can not save a document with a revision provided, but missing id");
  }
  else {
  $id = $document->_id;
 
  if ($allowRevAutoDetection) {
  try {
  $rev = $this->get_rev($id);
  } catch (SetteeRestClientException $e) {
  // auto-detection may fail legitimately, if a document has never been saved before (new doc), so skipping error
  }
  if (!empty($rev)) {
  $document->_rev = $rev;
  }
  }
  }
 
  $full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
  $document_json = json_encode($document, JSON_NUMERIC_CHECK);
 
  $ret = $this->rest_client->http_put($full_uri, $document_json);
 
  $document->_id = $ret['decoded']->id;
  $document->_rev = $ret['decoded']->rev;
 
  return $document;
  }
 
  /**
  * @param $doc
  * @param $name
  * @param $content
  * Content of the attachment in a string-buffer format. This function will automatically base64-encode content for
  * you, so you don't have to do it.
  * @param $mime_type
  * Optional. Will be auto-detected if not provided
  * @return void
  */
  public function add_attachment($doc, $name, $content, $mime_type = null) {
  if (empty($doc->_attachments) || !is_object($doc->_attachments)) {
  $doc->_attachments = new stdClass();
  }
 
  if (empty($mime_type)) {
  $mime_type = $this->rest_client->content_mime_type($content);
  }
 
  $doc->_attachments->$name = new stdClass();
  $doc->_attachments->$name->content_type = $mime_type;
  $doc->_attachments->$name->data = base64_encode($content);
  }
 
  /**
  * @param $doc
  * @param $name
  * @param $file
  * Full path to a file (e.g. as returned by PHP's realpath function).
  * @param $mime_type
  * Optional. Will be auto-detected if not provided
  * @return void
  */
  public function add_attachment_file($doc, $name, $file, $mime_type = null) {
  $content = file_get_contents($file);
  $this->add_attachment($doc, $name, $content, $mime_type);
  }
 
  /**
  *
  * Retrieve a document from CouchDB
  *
  * @throws SetteeWrongInputException
  *
  * @param $id
  * Unique ID (usually: UUID) of the document to be retrieved.
  * @return
  * database document in PHP object format.
  */
  function get($id) {
  if (empty($id)) {
  throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid.");
  }
 
  $full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
  $full_uri = str_replace("%3Frev%3D","?rev=",$full_uri);
  $ret = $this->rest_client->http_get($full_uri);
  return $ret['decoded'];
  }
 
  /**
  *
  * Get the latest revision of a document with document id: $id in CouchDB.
  *
  * @throws SetteeWrongInputException
  *
  * @param $id
  * Unique ID (usually: UUID) of the document to be retrieved.
  * @return
  * database document in PHP object format.
  */
  function get_rev($id) {
  if (empty($id)) {
  throw new SetteeWrongInputException("Error: Can't query a document without a uuid.");
  }
 
  $full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
  $headers = $this->rest_client->http_head($full_uri);
  if (empty($headers['Etag'])) {
  throw new SetteeRestClientException("Error: could not retrieve revision. Server unexpectedly returned empty Etag");
  }
  $etag = str_replace('"', '', $headers['Etag']);
  return $etag;
  }
 
  /**
  * Delete a document
  *
  * @param $document
  * a PHP object or JSON representation of the document that has _id and _rev fields.
  *
  * @return void
  */
  function delete($document) {
  if (!is_object($document)) {
  $document = json_decode($document);
  }
 
  $full_uri = $this->dbname . "/" . $this->safe_urlencode($document->_id) . "?rev=" . $document->_rev;
  $this->rest_client->http_delete($full_uri);
  }
 
 
  /*----------------- View-related functions --------------*/
 
  /**
  * Create a new view or update an existing one.
  *
  * @param $design_doc
  * @param $view_name
  * @param $map_src
  * Source code of the map function in Javascript
  * @param $reduce_src
  * Source code of the reduce function in Javascript (optional)
  * @return void
  */
  function save_view($design_doc, $view_name, $map_src, $reduce_src = null) {
  $obj = new stdClass();
  $obj->_id = "_design/" . urlencode($design_doc);
  $view_name = urlencode($view_name);
  $obj->views->$view_name->map = $map_src;
  if (!empty($reduce_src)) {
  $obj->views->$view_name->reduce = $reduce_src;
  }
 
  // allow safe updates (even if slightly slower due to extra: rev-detection check).
  return $this->save($obj, true);
  }
 
  /**
  * Create a new view or update an existing one.
  *
  * @param $design_doc
  * @param $view_name
  * @param $key
  * key parameter to a view. Can be a single value or an array (for a range). If passed an array, function assumes
  * that first element is startkey, second: endkey.
  * @param $descending
  * return results in descending order. Please don't forget that if you are using a startkey/endkey, when you change
  * order you also need to swap startkey and endkey values!
  *
  * @return void
  */
  function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false) {
  $id = "_design/" . urlencode($design_doc);
  $view_name = urlencode($view_name);
  $id .= "/_view/$view_name";
 
  $data = array();
  if (!empty($key)) {
  if (is_string($key)) {
  $data = "key=" . '"' . $key . '"';
  }
  elseif (is_array($key)) {
  list($startkey, $endkey) = $key;
  $data = "startkey=" . '"' . $startkey . '"&' . "endkey=" . '"' . $endkey . '"';
  }
 
  if ($descending) {
  $data .= "&descending=true";
  }
  if ($limit) {
  $data .= "&limit=".$limit;
  }
  }
 
 
 
  if (empty($id)) {
  throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid.");
  }
 
  $full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
  $full_uri = str_replace("%253Fgroup%253D","?group=",$full_uri);
  $full_uri = str_replace("%253Flimit%253D","?limit=",$full_uri);
  $ret = $this->rest_client->http_get($full_uri, $data);
  return $ret['decoded'];
 
  }
 
  /**
  * @param $id
  * @return
  * return a properly url-encoded id.
  */
  private function safe_urlencode($id) {
  //-- System views like _design can have "/" in their URLs.
  $id = rawurlencode($id);
  if (substr($id, 0, 1) == '_') {
  $id = str_replace('%2F', '/', $id);
  }
  return $id;
  }
 
  /** Getter for a database name */
  function get_name() {
  return $this->dbname;
  }
 
  }
  <?php
 
  /**
  * HTTP REST Client for CouchDB API
  */
  class SetteeRestClient {
 
  /**
  * HTTP Timeout in Milliseconds
  */
  const HTTP_TIMEOUT = 2000;
 
  private $base_url;
  private $curl;
 
  private static $curl_workers = array();
 
  /**
  * Singleton factory method
  */
  static function get_instance($base_url) {
 
  if (empty(self::$curl_workers[$base_url])) {
  self::$curl_workers[$base_url] = new SetteeRestClient($base_url);
  }
 
  return self::$curl_workers[$base_url];
  }
 
  /**
  * Class constructor
  */
  private function __construct($base_url) {
  $this->base_url = $base_url;
 
  $curl = curl_init();
  curl_setopt($curl, CURLOPT_USERAGENT, "Settee CouchDB Client/1.0");
  curl_setopt($curl, CURLOPT_HTTPHEADER, array('Content-Type: application/json'));
  curl_setopt($curl, CURLOPT_RETURNTRANSFER, 1);
  curl_setopt($curl, CURLOPT_HEADER, 0);
  curl_setopt($curl, CURLOPT_FOLLOWLOCATION, 1);
  curl_setopt($curl, CURLOPT_TIMEOUT_MS, self::HTTP_TIMEOUT);
  curl_setopt($curl, CURLOPT_FORBID_REUSE, false); // Connection-pool for CURL
 
  $this->curl = $curl;
 
  }
 
  /**
  * Class destructor cleans up any resources
  */
  function __destruct() {
  curl_close($this->curl);
  }
 
  /**
  * HTTP HEAD
  *
  * @return
  * Raw HTTP Headers of the response.
  *
  * @see: http://www.php.net/manual/en/context.params.php
  *
  */
  function http_head($uri) {
  curl_setopt($this->curl, CURLOPT_HEADER, 1);
 
  $full_url = $this->get_full_url($uri);
  curl_setopt($this->curl, CURLOPT_URL, $full_url);
  curl_setopt($this->curl, CURLOPT_CUSTOMREQUEST, 'HEAD');
  curl_setopt($this->curl, CURLOPT_NOBODY, true);
 
 
  $response = curl_exec($this->curl);
  // Restore default values
  curl_setopt($this->curl, CURLOPT_NOBODY, false);
  curl_setopt($this->curl, CURLOPT_HEADER, false);
 
  $resp_code = curl_getinfo($this->curl, CURLINFO_HTTP_CODE);
  if ($resp_code == 404 ) {
  throw new SetteeRestClientException("Couch document not found at: '$full_url'");
  }
 
  if (function_exists('http_parse_headers')) {
  $headers = http_parse_headers($response);
  }
  else {
  $headers = $this->_http_parse_headers($response);
  }
 
  return $headers;
  }
 
  /**
  * Backup PHP impl. for when PECL http_parse_headers() function is not available
  *
  * @param $header
  * @return array
  * @source http://www.php.net/manual/en/function.http-parse-headers.php#77241
  */
  private function _http_parse_headers( $header ) {
  $retVal = array();
  $fields = explode("\r\n", preg_replace('/\x0D\x0A[\x09\x20]+/', ' ', $header));
  foreach( $fields as $field ) {
  if( preg_match('/([^:]+): (.+)/m', $field, $match) ) {
  $match[1] = preg_replace('/(?<=^|[\x09\x20\x2D])./e', 'strtoupper("\0")', strtolower(trim($match[1])));
  if( isset($retVal[$match[1]]) ) {
  $retVal[$match[1]] = array($retVal[$match[1]], $match[2]);
  } else {
  $retVal[$match[1]] = trim($match[2]);
  }
  }
  }
  return $retVal;
  }
 
  /**
  * HTTP GET
  */
  function http_get($uri, $data = array()) {
  $data = (is_array($data)) ? http_build_query($data) : $data;
  if (!empty($data)) {
  $uri .= "?$data";
  }
  return $this->http_request('GET', $uri);
  }
 
  /**
  * HTTP PUT
  */
  function http_put($uri, $data = array()) {
  return $this->http_request('PUT', $uri, $data);
  }
 
  /**
  * HTTP DELETE
  */
  function http_delete($uri, $data = array()) {
  return $this->http_request('DELETE', $uri, $data);
  }
 
  /**
  * Generic implementation of a HTTP Request.
  *
  * @param $http_method
  * @param $uri
  * @param array $data
  * @return
  * an array containing json and decoded versions of the response.
  */
  private function http_request($http_method, $uri, $data = array()) {
  $data = (is_array($data)) ? http_build_query($data) : $data;
 
  if (!empty($data)) {
  curl_setopt($this->curl, CURLOPT_HTTPHEADER, array('Content-Length: ' . strlen($data)));
  curl_setopt($this->curl, CURLOPT_POSTFIELDS, $data);
  }
 
  curl_setopt($this->curl, CURLOPT_URL, $this->get_full_url($uri));
  curl_setopt($this->curl, CURLOPT_CUSTOMREQUEST, $http_method);
 
  $response = curl_exec($this->curl);
  $response_decoded = $this->decode_response($response);
  $response = array('json' => $response, 'decoded'=>$response_decoded);
 
  $this->check_status($response,$uri);
 
  return $response;
  }
 
  /**
  * Check http status for safe return codes
  *
  * @throws SetteeRestClientException
  */
  private function check_status($response,$uri) {
  $resp_code = curl_getinfo($this->curl, CURLINFO_HTTP_CODE);
 
  if ($resp_code < 199 || $resp_code > 399 || !empty($response['decoded']->error)) {
  $msg = "CouchDB returned: \"HTTP 1.1. $resp_code\". ERROR: " . $response['json'] . $uri;
  throw new SetteeRestClientException($msg);
  }
  }
 
  /**
  * @param $path
  * Full path to a file (e.g. as returned by PHP's realpath function).
  * @return void
  */
  public function file_mime_type ($path) {
  $ftype = 'application/octet-stream';
 
  if (function_exists("finfo_file")) {
  $finfo = new finfo(FILEINFO_MIME_TYPE | FILEINFO_SYMLINK);
  $fres = $finfo->file($path);
  if (is_string($fres) && !empty($fres)) {
  $ftype = $fres;
  }
  }
 
  return $ftype;
  }
 
  /**
  * @param $content
  * content of a file in a string buffer format.
  * @return void
  */
  public function content_mime_type ($content) {
  $ftype = 'application/octet-stream';
 
  if (function_exists("finfo_file")) {
  $finfo = new finfo(FILEINFO_MIME_TYPE | FILEINFO_SYMLINK);
  $fres = $finfo->buffer($content);
  if (is_string($fres) && !empty($fres)) {
  $ftype = $fres;
  }
  }
 
  return $ftype;
  }
 
 
  /**
  *
  * @param $json
  * json-encoded response from CouchDB
  *
  * @return
  * decoded PHP object
  */
  private function decode_response($json) {
  return json_decode($json);
  }
 
  /**
  * Get full URL from a partial one
  */
  private function get_full_url($uri) {
  // We do not want "/", "?", "&" and "=" separators to be encoded!!!
  $uri = str_replace(array('%2F', '%3F', '%3D', '%26'), array('/', '?', '=', '&'), urlencode($uri));
  return $this->base_url . '/' . $uri;
  }
  }
 
  class SetteeRestClientException extends Exception {}
  <?php
 
  /**
  * CouchDB Server Manager
  */
  class SetteeServer {
 
  /**
  * Base URL of the CouchDB REST API
  */
  private $conn_url;
 
  /**
  * HTTP REST Client instance
  */
  protected $rest_client;
 
 
  /**
  * Class constructor
  *
  * @param $conn_url
  * (optional) URL of the CouchDB server to connect to. Default value: http://127.0.0.1:5984
  */
  function __construct($conn_url = "http://127.0.0.1:5984") {
  $this->conn_url = rtrim($conn_url, ' /');
  $this->rest_client = SetteeRestClient::get_instance($this->conn_url);
  }
 
  /**
  * Create database
  *
  * @param $db
  * Either a database object or a String name of the database.
  *
  * @return
  * json string from the server.
  *
  * @throws SetteeCreateDatabaseException
  */
  function create_db($db) {
  if ($db instanceof SetteeDatabase) {
  $db = $db->get_name();
  }
  $ret = $this->rest_client->http_put($db);
  if (!empty($ret['decoded']->error)) {
  throw new SetteeDatabaseException("Could not create database: " . $ret["json"]);
  }
  return $ret['decoded'];
  }
 
  /**
  * Drop database
  *
  * @param $db
  * Either a database object or a String name of the database.
  *
  * @return
  * json string from the server.
  *
  * @throws SetteeDropDatabaseException
  */
  function drop_db($db) {
  if ($db instanceof SetteeDatabase) {
  $db = $db->get_name();
  }
  $ret = $this->rest_client->http_delete($db);
  if (!empty($ret['decoded']->error)) {
  throw new SetteeDatabaseException("Could not create database: " . $ret["json"]);
  }
  return $ret['decoded'];
  }
 
  /**
  * Instantiate a database object
  *
  * @param $dbname
  * name of the newly created database
  *
  * @return SetteeDatabase
  * new SetteeDatabase instance.
  */
  function get_db($dbname) {
  return new SetteeDatabase($this->conn_url, $dbname);
  }
 
 
  /**
  * Return an array containing all databases
  *
  * @return Array
  * an array of database names in the CouchDB instance
  */
  function list_dbs() {
  $ret = $this->rest_client->http_get('_all_dbs');
  if (!empty($ret['decoded']["error"])) {
  throw new SetteeDatabaseException("Could not get list of databases: " . $ret["json"]);
  }
  return $ret['decoded'];
  }
 
  }
 
  class SetteeServerErrorException extends Exception {}
  class SetteeDatabaseException extends Exception {}
  class SetteeWrongInputException extends Exception {}
  <?php
 
  require(dirname(__FILE__) . '/classes/SetteeRestClient.class.php');
 
  require(dirname(__FILE__) . '/classes/SetteeServer.class.php');
  require(dirname(__FILE__) . '/classes/SetteeDatabase.class.php');
  1. Make sure you have latest PEAR PHPUnit installed:
  > sudo upgrade pear
  > sudo pear channel-discover pear.phpunit.de
  > sudo pear install phpunit/PHPUnit
 
  2. You need PHP 5.3.2 or later to run some tests that deal with private or protected methods. If you use an earlier
  version of PHP, these tests will be skipped.
 
  3. Run all tests with:
  > phpunit .
  <?php
 
  require_once (realpath(dirname(__FILE__) . '/../src/settee.php'));
  require_once (dirname(__FILE__) . '/SetteeTestCase.class.php');
 
  class SetteeDatabaseTest extends SetteeTestCase {
 
  private $db;
 
  public function setUp() {
  parent::setUp();
  $dbname = "settee_tests_" . md5(microtime(true));
  $this->db = $this->server->get_db($dbname);
  $this->server->create_db($this->db);
  }
 
  public function test_document_lifecycle_objectbased() {
  $doc = new StdClass();
  $doc->firstName = "Irakli";
  $doc->lastName = "Nadareishvili";
  $doc->IQ = 200;
  $doc->hobbies = array("skiing", "swimming");
  $doc->pets = array ("whitey" => "labrador", "mikey" => "pug");
 
  $doc = $this->db->save($doc);
  $this->assertTrue(!empty($doc->_id) && !empty($doc->_rev), "Document creation success [object-based]");
 
  $_rev = $doc->_rev;
  $doc = $this->db->get($doc->_id);
  $this->assertEquals($_rev, $doc->_rev, "Document retrieval success [object-based] test");
 
  $doc->firstName = "Ika";
  $db_doc = $this->db->save($doc);
  $this->assertEquals($doc->firstName, $db_doc->firstName, "Document update success [object-based]");
 
  $this->db->delete($doc);
 
 
  try {
  $doc = $this->db->get($doc->_id);
  } catch (SetteeRestClientException $e) {
  // we expect exception to fire, so this is good.
  return;
  }
 
  $this->fail('Document still available for retrieval after being deleted. [object-based]');
  }
 
  // Should work with json string as well:
  //
 
 
  public function test_document_lifecycle_jsonbased() {
  $doc = '{"firstName":"Irakli","lastName":"Nadareishvili","IQ":200,"hobbies":["skiing","swimming"],"pets":{"whitey":"labrador","mikey":"pug"}}';
 
  $doc = $this->db->save($doc);
  $this->assertTrue(!empty($doc->_id) && !empty($doc->_rev), "Document creation success [json-based]");
 
  $_rev = $doc->_rev;
 
  $db_doc = $this->db->get($doc->_id);
  $this->assertEquals($_rev, $db_doc->_rev, "Document retrieval success [json-based] test");
 
  $doc = '{';
  $doc .= '"_id":"' . $db_doc->_id . '",';
  $doc .= '"_rev":"' . $db_doc->_rev . '",';
  $doc .= '"firstName":"Ika","lastName":"Nadareishvili","IQ":200,"hobbies":["skiing","swimming"],"pets":{"whitey":"labrador","mikey":"pug"}}';
 
  $orig_doc = json_decode($doc);
  $db_doc = $this->db->save($doc);
  $this->assertEquals($orig_doc->firstName, $db_doc->firstName, "Document update success [json-based]");
 
  $doc = '{';
  $doc .= '"_id":"' . $db_doc->_id . '",';
  $doc .= '"_rev":"' . $db_doc->_rev . '",';
  $doc .= '"firstName":"Ika","lastName":"Nadareishvili","IQ":200,"hobbies":["skiing","swimming"],"pets":{"whitey":"labrador","mikey":"pug"}}';
 
  $this->db->delete($doc);
 
  try {
  $doc = $this->db->get($db_doc->_id);
  } catch (SetteeRestClientException $e) {
  // we expect exception to fire, so this is good.
  return;
  }
 
  $this->fail('Document still available for retrieval after being deleted. [object-based]');
  }
 
  public function test_invalid_document() {
  $doc = 12345;
  try {
  $doc = $this->db->save($doc);
  } catch (SetteeRestClientException $e) {
  // we expect exception to fire, so this is good.
  return;
  }
 
  $this->fail('Document saved with invalid format');
  }
 
  public function test_get_rev() {
  $doc = new stdClass();
  $doc->_id = "some_fixed_id";
  $doc = $this->db->save($doc);
 
  $_rev = $doc->_rev;
 
  $db_rev = $this->db->get_rev($doc->_id);
  $this->assertEquals($_rev, $db_rev, "Document Revision retrieval success");
 
  // _rev is now attached to this object due to last ->save() call
  $doc->_id = "some_fixed_id";
  $doc->title = "Some Fixed ID";
  $doc = $this->db->save($doc);
 
  $_rev = $doc->_rev;
 
  $db_rev = $this->db->get_rev($doc->_id);
  $this->assertEquals($_rev, $db_rev, "Document Revision retrieval success after re-save");
 
  }
 
  public function test_save_auto_revision_detection() {
  $doc = new stdClass();
  $doc->_id = "some_fixed_id";
  $this->db->save($doc);
 
  $doc = new stdClass();
  $doc->_id = "some_fixed_id";
  $doc->extra_field = "some other value";
 
  $new_doc = $this->db->save($doc, true);
  $this->assertEquals ($new_doc->extra_field, "some other value", "Testing auto-rev detection by save method");
  }
 
  public function test_inline_attachment_json() {
  $doc = '{
  "_id":"attachment_doc",
  "_attachments":
  {
  "foo.txt":
  {
  "content_type":"text\/plain",
  "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
  }
  }
  }';
  $db_doc = $this->db->save($doc);
  $this->assertTrue(is_object($db_doc->_attachments), "Inline attachment save successful [json-based]");
  }
 
  public function test_inline_attachment_obj_content() {
  $doc = new stdClass();
  $doc->_id = "attachment_doc";
  $this->db->add_attachment($doc, "foo.txt", "This is some text to be encoded", "text/plain");
  $db_doc = $this->db->save($doc);
  $this->assertTrue(is_object($db_doc->_attachments), "Inline attachment save successful [object-based]");
 
  $doc = new stdClass();
  $doc->_id = "attachment_doc_autodetect";
  $this->db->add_attachment($doc, "foo.txt", "This is some other text to be encoded");
  $db_doc = $this->db->save($doc);
  $this->assertTrue(is_object($db_doc->_attachments), "Inline attachment save successful [object-based, mime auto-detection]");
  }
 
  public function test_inline_attachment_obj_file() {
  $doc = new stdClass();
  $doc->_id = "attachment_doc";
  $file_path = dirname(__FILE__) . "/resources/couch-logo.pdf";
  $this->db->add_attachment_file($doc, "foo.pdf", $file_path, "application/pdf");
  $db_doc = $this->db->save($doc);
  $this->assertTrue(is_object($db_doc->_attachments), "Inline attachment of file successful");
 
  $doc = new stdClass();
  $doc->_id = "attachment_doc_autodetect";
  $file_path = dirname(__FILE__) . "/resources/couch-logo.pdf";
  $this->db->add_attachment_file($doc, "foo.pdf", $file_path);
  $db_doc = $this->db->save($doc);
  $this->assertTrue(is_object($db_doc->_attachments), "Inline attachment of file successful w/ mime type auto-detection");
  }
 
  public function test_view_lifecycle() {
  $this->_create_some_sample_docs();
 
  $map_src = <<<VIEW
  function(doc) {
  if(doc.date && doc.title) {
  emit(doc.date, doc.title);
  }
  }
  VIEW;
 
  $view = $this->db->save_view("foo_views", "bar_view", $map_src);
  $this->assertEquals("_design/foo_views", $view->_id, "View Creation Success");
 
  $view = $this->db->get_view("foo_views", "bar_view");
  $this->assertEquals(3, $view->total_rows, "Running a View Success");
 
  $map_src = <<<VIEW
  function(doc) {
  if(doc.date) {
  emit(doc.date, doc);
  }
  }
  VIEW;
 
  $view = $this->db->save_view("foo_views", "bar_view", $map_src);
  $this->assertEquals("_design/foo_views", $view->_id, "View Update Success");
 
  $view = $this->db->get_view("foo_views", "bar_view");
  $this->assertEquals("Well hello and welcome to my new blog...", $view->rows[0]->value->body, "Running a View Success (after update)");
 
  $view = $this->db->get_view("foo_views", "bar_view", "2009/02/17 21:13:39");
  $this->assertEquals("Bought a Cat", $view->rows[0]->value->title, "Running a Parametrized View");
 
  $view = $this->db->get_view("foo_views", "bar_view", array("2009/01/30 18:04:11", "2009/02/17 21:13:39"));
  $this->assertEquals("Biking", $view->rows[0]->value->title, "Running a Parametrized View with range");
 
  $view = $this->db->get_view("foo_views", "bar_view", array("2009/02/17 21:13:39", "2009/01/30 18:04:11"), true);
  $this->assertEquals("Bought a Cat", $view->rows[0]->value->title, "Running a Parametrized View with range, descending");
  $this->assertEquals(2, count($view->rows), "Running a Parametrized View with range, descending [count]");
 
  }
 
  function test_two_views_in_a_design_doc() {
 
  $map_src = <<<VIEW
  function(doc) {
  if(doc.date && doc.title) {
  emit(doc.date, doc.title);
  }
  }
  VIEW;
 
  $view = $this->db->save_view("a_settee_design_doc", "foo_view", $map_src);
  $this->assertTrue(isset($view->views->foo_view), "View1 Creation Success");
 
  $view = $this->db->save_view("a_settee_design_doc", "bar_view", $map_src);
  $this->assertTrue(isset($view->views->bar_view), "View2 Creation Success");
  }
 
  /**
  * Create some sample docs for running tests on them.
  *
  * <p>This sample was taken from a wonderful book:
  * CouchDB: The Definitive Guide (Animal Guide) by J. Chris Anderson, Jan Lehnardt and Noah Slater
  * http://www.amazon.com/CouchDB-Definitive-Guide-Relax-Animal/dp/0596155891/ref=sr_1_1?ie=UTF8&qid=1311533443&sr=8-1
  *
  * @return void
  */
  private function _create_some_sample_docs() {
  $doc = new stdClass();
  $doc->_id = "biking";
  $doc->title = "Biking";
  $doc->body = "My biggest hobby is mountainbiking";
  $doc->date = "2009/01/30 18:04:11";
  $this->db->save($doc);
 
  $doc = new stdClass();
  $doc->_id = "bought-a-cat";
  $doc->title = "Bought a Cat";
  $doc->body = "I went to the the pet store earlier and brought home a little kitty...";
  $doc->date = "2009/02/17 21:13:39";
  $this->db->save($doc);
 
  $doc = new stdClass();
  $doc->_id = "hello-world";
  $doc->title = "Hello World";
  $doc->body = "Well hello and welcome to my new blog...";
  $doc->date = "2009/01/15 15:52:20";
  $this->db->save($doc);
  }
 
  public function tearDown() {
  $ret = $this->server->drop_db($this->db);
  }
 
  }
 
 
  <?php
 
  require_once (realpath(dirname(__FILE__) . '/../src/settee.php'));
  require_once (dirname(__FILE__) . '/SetteeTestCase.class.php');
 
  class SetteeRestClientTest extends SetteeTestCase {
 
  private $rest_client;
 
  public function setUp() {
  parent::setUp();
  $this->rest_client = SetteeRestClient::get_instance($this->db_url);
  }
 
  public function test_get_full_url() {
 
  //-- Can't run this test in PHP versions earlier than 5.3.2, which do not support ReflectionMethod class.
  if (!class_exists('ReflectionMethod')) {
  return;
  }
 
  //-- Prepare for testing the private full_url_method method.
  $get_full_url_method = new ReflectionMethod('SetteeRestClient', 'get_full_url');
  $get_full_url_method->setAccessible(TRUE);
 
  $uri = 'irakli/26cede9ab9cd8fcd67895eb05200d1ea';
  //-- Equivalent to: $calc = $this->rest_client->get_full_url($uri); but for a private method.
  $calc = $get_full_url_method->invokeArgs($this->rest_client, array($uri));
  //--
  $expected = $this->db_url . '/irakli/26cede9ab9cd8fcd67895eb05200d1ea';
  $this->assertEquals($expected, $calc, "Full URL Generation with DB and ID");
 
  $uri = 'irakli/26cede9ab9cd8fcd67895eb05200d1ea?rev=2-21587f7dffc43b4100f40168f309a267';
  $calc = $get_full_url_method->invokeArgs($this->rest_client, array($uri));
  $expected = $this->db_url . '/irakli/26cede9ab9cd8fcd67895eb05200d1ea?rev=2-21587f7dffc43b4100f40168f309a267';
  $this->assertEquals($expected, $calc, "Full URL Generation with DB, ID and Single Query Parameter");
 
  $uri = 'irakli/26cede9ab9cd8fcd67895eb05200d1ea?rev=2-21587f7dffc43b4100f40168f309a267&second=foo';
  $calc = $get_full_url_method->invokeArgs($this->rest_client, array($uri));
  $expected = $this->db_url . '/irakli/26cede9ab9cd8fcd67895eb05200d1ea?rev=2-21587f7dffc43b4100f40168f309a267&second=foo';
  $this->assertEquals($expected, $calc, "Full URL Generation with DB, ID and Two Query Parameters");
 
  }
 
  public function test_file_mime_type() {
 
  $type = $this->rest_client->file_mime_type(dirname(__FILE__) . "/resources/couch-logo.jpg");
  $this->assertEquals("image/jpeg", $type, "Jpeg Mime Type Detection");
 
  $type = $this->rest_client->file_mime_type(dirname(__FILE__) . "/resources/couch-logo.pdf");
  $this->assertEquals("application/pdf", $type, "PDF Mime Type Detection");
 
 
  $type = $this->rest_client->file_mime_type(dirname(__FILE__) . "/resources/couch-logo.png");
  $this->assertEquals("image/png", $type, "PNG Mime Type Detection");
 
  $type = $this->rest_client->file_mime_type(dirname(__FILE__) . "/resources/couch-tag.ini");
  $this->assertEquals("text/plain", $type, "Text Mime Type Detection");
 
  $type = $this->rest_client->file_mime_type(dirname(__FILE__) . "/resources/couch-tag.xml");
  $this->assertEquals("application/xml", $type, "XML Mime Type Detection");
  }
 
  public function test_content_mime_type() {
  $content = file_get_contents(dirname(__FILE__) . "/resources/couch-logo.jpg");
  $type = $this->rest_client->content_mime_type($content);
  $this->assertEquals("image/jpeg", $type, "Jpeg Mime Type Detection");
 
  $content = file_get_contents(dirname(__FILE__) . "/resources/couch-logo.pdf");
  $type = $this->rest_client->content_mime_type($content);
  $this->assertEquals("application/pdf", $type, "PDF Mime Type Detection");
 
  $content = file_get_contents(dirname(__FILE__) . "/resources/couch-logo.png");
  $type = $this->rest_client->content_mime_type($content);
  $this->assertEquals("image/png", $type, "PNG Mime Type Detection");
 
  $content = file_get_contents(dirname(__FILE__) . "/resources/couch-tag.ini");
  $type = $this->rest_client->content_mime_type($content);
  $this->assertEquals("text/plain", $type, "Text Mime Type Detection");
 
  $content = file_get_contents(dirname(__FILE__) . "/resources/couch-tag.xml");
  $type = $this->rest_client->content_mime_type($content);
  $this->assertEquals("application/xml", $type, "XML Mime Type Detection");
  }
 
 
 
  }
 
 
  <?php
 
  require_once (realpath(dirname(__FILE__) . '/../src/settee.php'));
  require_once (dirname(__FILE__) . '/SetteeTestCase.class.php');
 
  class SetteeServerTest extends SetteeTestCase {
 
  private $dbname;
 
  public function setUp() {
  parent::setUp();
  $this->dbname = "settee_tests_" . md5(microtime(true));
  }
 
  public function test_database_lifecycle_namebased() {
  $db = $this->server->get_db($this->dbname);
  $ret = $this->server->create_db($this->dbname);
  $this->assertTrue($ret->ok, "Database Creation Success Response [name-based]");
 
  $database_list = $this->server->list_dbs();
  $this->assertTrue(is_array($database_list) && in_array($this->dbname, $database_list),
  "Verifying Database in the List on the Server [name-based]");
 
  $ret = $this->server->drop_db($this->dbname);
  $this->assertTrue($ret->ok, "Database Deletion Success Response [name-based]");
  }
 
  public function test_database_lifecycle_objectbased() {
  $db = $this->server->get_db($this->dbname);
  $ret = $this->server->create_db($db);
  $this->assertTrue($ret->ok, "Database Creation Success Response [object-based]");
 
  $database_list = $this->server->list_dbs();
  $this->assertTrue(is_array($database_list) && in_array($this->dbname, $database_list),
  "Verifying Database in the List on the Server [object-based]");
 
  $ret = $this->server->drop_db($db);
  $this->assertTrue($ret->ok, "Database Deletion Success Response [object-based]");
  }
 
  }
 
 
  <?php
 
  /**
  * Abstract parent for Settee test classes.
  */
  abstract class SetteeTestCase extends PHPUnit_Framework_TestCase {
 
  protected $server;
  protected $db_url;
  protected $db_user;
  protected $db_pass;
 
  public function setUp() {
  $this->db_url = isset($GLOBALS['db_url']) ? $GLOBALS['db_url'] : 'http://127.0.0.1:5984';
  $this->db_user = isset($GLOBALS['db_user']) ? $GLOBALS['db_user'] : 'admin';
  $this->db_pass = isset($GLOBALS['db_pass']) ? $GLOBALS['db_pass'] : 'admin';
  $this->server = new SetteeServer($this->db_url);
  }
 
  }
  <phpunit>
  <php>
  <var name="db_url" value="http://127.0.0.1:5984"/>
  <var name="db_user" value="admin"/>
  <var name="db_pass" value="passwd"/>
  </php>
  </phpunit>
 
 Binary files /dev/null and b/couchdb/settee/tests/resources/couch-logo.jpg differ
 Binary files /dev/null and b/couchdb/settee/tests/resources/couch-logo.pdf differ
 Binary files /dev/null and b/couchdb/settee/tests/resources/couch-logo.png differ
  Couchdb=relax
 
  <?xml version="1.0" encoding="UTF-8" ?>
  <tagline>
  <main>CouchDB - Relax</main>
  </tagline>
 
 
  *.pyc
<?php <?php
   
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
   
echo "<table> echo "<table>
<tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>"; <tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>";
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
$docsdb = $server->get_db('disclosr-documents'); $docsdb = $server->get_db('disclosr-documents');
  $agencies = 0;
  $disclogs = 0;
  $red = 0;
  $green = 0;
  $yellow = 0;
  $orange = 0;
try { try {
$rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows; $rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows;
   
   
if ($rows) { if ($rows) {
foreach ($rows as $row) { foreach ($rows as $row) {
  if ((!isset($row->value->status) || $row->value->status != "suspended") && isset($row->value->foiEmail)) {
  echo "<tr><td>";
  if (isset($row->value->website)) echo "<a href='" . $row->value->website . "'>";
  echo "<b>" . $row->value->name . "</b>";
  if (isset($row->value->website)) echo "</a>";
  if ($ENV == "DEV")
  echo "<br>(" . $row->id . ")";
  echo "</td>\n";
  $agencies++;
   
echo "<tr><td><b>" . $row->value->name . "</b>"; echo "<td>";
if ($ENV == "DEV") if (isset($row->value->FOIDocumentsURL)) {
echo "<br>(" . $row->id . ")"; $disclogs++;
echo "</td>\n"; echo '<a href="' . $row->value->FOIDocumentsURL . '">'
  . $row->value->FOIDocumentsURL . '</a>';
  if ($ENV == "DEV")
echo "<td>"; echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">'
if (isset($row->value->FOIDocumentsURL)) { . 'view local copy</a>)</small>';
echo '<a href="' . $row->value->FOIDocumentsURL . '">' } else {
. $row->value->FOIDocumentsURL . '</a>'; echo "<font color='red'><abbr title='No'>✘</abbr></font>";
if ($ENV == "DEV") }
echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">' echo "</td>\n<td>";
. 'view local copy</a>)</small>'; if (isset($row->value->FOIDocumentsURL)) {
} else { if (file_exists("./scrapers/" . $row->id . '.py')) {
echo "<font color='red'>✘</font>"; echo "<font color='green'><abbr title='Yes'>✔</abbr></font>";
  $green++;
  } else if (file_exists("./scrapers/" . $row->id . '.txt')) {
  if (trim(file_get_contents("./scrapers/" . $row->id . '.txt')) == "no disclog") {
  echo "<font color='yellow'><abbr title='No log table exists at URL to scrape'><b>◎</b></abbr></font>";
  $yellow++;
  } else {
  echo file_get_contents("./scrapers/" . $row->id . '.txt');
  echo "<font color='orange'><abbr title='Work in progress'><b>▬</b></abbr></font>";
  $orange++;
  }
  } else {
  echo "<font color='red'><abbr title='No'>✘</abbr></font>";
  $red++;
  }
  }
  echo "</td></tr>\n";
} }
echo "</td>\n<td>";  
if (isset($row->value->FOIDocumentsURL)) {  
if (file_exists("./scrapers/" . $row->id . '.py')) {  
echo "<font color='green'>✔</font>";  
} else if (file_exists("./scrapers/" . $row->id . '.txt')) {  
echo "<font color='orange'><b>▬</b></font>";  
} else {  
echo "<font color='red'>✘</font>";  
}  
}  
echo "</td></tr>\n";  
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "</table>"; echo "</table>";
  echo $agencies . " agencies, " . round(($disclogs / $agencies) * 100) . "% with disclosure logs; "
  . round(($green / $disclogs) * 100) . "% logs with scrapers " . round(($red / $disclogs) * 100) . "% logs without scrapers " . round(($orange / $disclogs) * 100) . "% logs Work-In-Progress scrapers ";
   
include_footer_documents(); include_footer_documents();
?> ?>
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from time import mktime from time import mktime
import feedparser import feedparser
import abc import abc
import unicodedata, re import unicodedata, re
import dateutil import dateutil
from dateutil.parser import * from dateutil.parser import *
from datetime import * from datetime import *
  import codecs
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
def remove_control_chars(self, input): def remove_control_chars(self, input):
return "".join([i for i in input if ord(i) in range(32, 127)]) return "".join([i for i in input if ord(i) in range(32, 127)])
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
if self.agencyID == None: if self.agencyID == None:
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
return self.agencyID return self.agencyID
   
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
if self.disclogURL == None: if self.disclogURL == None:
agency = scrape.agencydb.get(self.getAgencyID()) agency = scrape.agencydb.get(self.getAgencyID())
self.disclogURL = agency['FOIDocumentsURL'] self.disclogURL = agency['FOIDocumentsURL']
return self.disclogURL return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def doScrape(self): def doScrape(self):
""" do the scraping """ """ do the scraping """
return return
   
@abc.abstractmethod @abc.abstractmethod
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description""" """ get description"""
return return
   
   
   
class GenericRSSDisclogScraper(GenericDisclogScraper): class GenericRSSDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
feed = feedparser.parse(content) feed = feedparser.parse(content)
for entry in feed.entries: for entry in feed.entries:
#print entry #print entry
print entry.id print entry.id
hash = scrape.mkhash(entry.id) hash = scrape.mkhash(entry.id)
#print hash #print hash
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
#print doc #print doc
if doc == None: if doc == None:
print "saving "+ hash print "saving "+ hash
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
"date": edate,"title": entry.title} "date": edate,"title": entry.title}
self.getDescription(entry,entry, doc) self.getDescription(entry,entry, doc)
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
doc.update({'description': content.summary}) doc.update({'description': content.summary})
return return
   
class GenericOAICDisclogScraper(GenericDisclogScraper): class GenericOAICDisclogScraper(GenericDisclogScraper):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
@abc.abstractmethod @abc.abstractmethod
def getColumns(self,columns): def getColumns(self,columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
descriptiontxt = "" descriptiontxt = ""
for string in content.stripped_strings: for string in content.stripped_strings:
descriptiontxt = descriptiontxt + " \n" + string descriptiontxt = descriptiontxt + " \n" + string
doc.update({'description': descriptiontxt}) doc.update({'description': descriptiontxt})
return return
def getTitle(self, content, entry, doc): def getTitle(self, content, entry, doc):
doc.update({'title': content.string}) doc.update({'title': (''.join(content.stripped_strings))})
return return
def getTable(self, soup): def getTable(self, soup):
return soup.table return soup.table
  def getRows(self, table):
  return table.find_all('tr')
def getDate(self, content, entry, doc): def getDate(self, content, entry, doc):
edate = parse(''.join(content.stripped_strings).strip(), dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") date = ''.join(content.stripped_strings).strip()
  (a,b,c) = date.partition("(")
  date = self.remove_control_chars(a.replace("Octber","October"))
  print date
  edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
print edate print edate
doc.update({'date': edate}) doc.update({'date': edate})
return return
def getLinks(self, content, entry, doc): def getLinks(self, content, entry, doc):
links = [] links = []
for atag in entry.find_all("a"): for atag in entry.find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(content,atag['href'])) links.append(scrape.fullurl(content,atag['href']))
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
return return
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
table = self.getTable(soup) table = self.getTable(soup)
for row in table.find_all('tr'): for row in self.getRows(table):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) == self.getColumnCount(): if len(columns) == self.getColumnCount():
(id, date, description, title, notes) = self.getColumns(columns) (id, date, title, description, notes) = self.getColumns(columns)
print ''.join(id.stripped_strings) print self.remove_control_chars(''.join(id.stripped_strings))
if id.string == None: if id.string == None:
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
else: else:
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
if doc == None: if doc == None:
print "saving " +hash print "saving " +hash
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string} doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))}
self.getLinks(self.getURL(),row,doc) self.getLinks(self.getURL(),row,doc)
self.getTitle(title,row, doc) self.getTitle(title,row, doc)
self.getDate(date,row, doc) self.getDate(date,row, doc)
self.getDescription(description,row, doc) self.getDescription(description,row, doc)
if notes != None: if notes != None:
doc.update({ 'notes': notes.string}) doc.update({ 'notes': (''.join(notes.stripped_strings))})
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved "+hash print "already saved "+hash
elif len(row.find_all('th')) == self.getColumnCount(): elif len(row.find_all('th')) == self.getColumnCount():
print "header row" print "header row"
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row
   
<?php <?php
   
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
  $startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99');
?> ?>
<?php <?php
   
   
   
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
try { try {
$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00'), true)->rows; $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows;
   
   
if ($rows) { if ($rows) {
foreach ($rows as $row) { foreach ($rows as $key => $row) {
displayLogEntry($row,$idtoname); echo displayLogEntry($row, $idtoname);
  $endkey = $row->key;
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
  echo "<a href='?start_key=$endkey'>next page</a>";
include_footer_documents(); include_footer_documents();
?> ?>
   
# www.robotstxt.org/ # www.robotstxt.org/
# http://code.google.com/web/controlcrawlindex/ # http://code.google.com/web/controlcrawlindex/
   
User-agent: * User-agent: *
  Disallow: /admin/
  Sitemap: http://disclosurelo.gs/sitemap.xml.php
<?php <?php
   
// Agency X updated Y, new files, diff of plain text/link text, // Agency X updated Y, new files, diff of plain text/link text,
// feed for just one agency or all // feed for just one agency or all
// This is a minimum example of using the Universal Feed Generator Class // This is a minimum example of using the Universal Feed Generator Class
include("lib/FeedWriter.php"); include("../lib/FeedWriter/FeedTypes.php");
  include_once('../include/common.inc.php');
//Creating an instance of FeedWriter class. //Creating an instance of FeedWriter class.
$TestFeed = new FeedWriter(RSS2); $TestFeed = new RSS2FeedWriter();
//Setting the channel elements //Setting the channel elements
//Use wrapper functions for common channelelements //Use wrapper functions for common channelelements
$TestFeed->setTitle('Last Modified - All'); $TestFeed->setTitle('Last Modified - All');
$TestFeed->setLink('http://disclosr.lambdacomplex.org/rss.xml.php'); $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php');
$TestFeed->setDescription('This is test of creating a RSS 2.0 feed Universal Feed Writer'); $TestFeed->setDescription('Latest entries');
  $TestFeed->setChannelElement('language', 'en-us');
  $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
//Retriving informations from database //Retriving informations from database
$rows = $db->get_view("app", "byLastModified")->rows; $idtoname = Array();
  $agenciesdb = $server->get_db('disclosr-agencies');
  foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
  $idtoname[$row->id] = trim($row->value->name);
  }
  $foidocsdb = $server->get_db('disclosr-foidocuments');
  $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00', 50), true)->rows;
//print_r($rows); //print_r($rows);
foreach ($rows as $row) { foreach ($rows as $row) {
//Create an empty FeedItem //Create an empty FeedItem
$newItem = $TestFeed->createNewItem(); $newItem = $TestFeed->createNewItem();
//Add elements to the feed item //Add elements to the feed item
$newItem->setTitle($row['name']); $newItem->setTitle($row->value->title);
$newItem->setLink($row['id']); $newItem->setLink("view.php?id=".$row->value->_id);
$newItem->setDate(date("c", $row['metadata']['lastModified'])); $newItem->setDate(date("c", strtotime($row->value->date)));
$newItem->setDescription($row['name']); $newItem->setDescription(displayLogEntry($row,$idtoname));
  $newItem->addElement('guid', $row->value->_id,array('isPermaLink'=>'true'));
//Now add the feed item //Now add the feed item
$TestFeed->addItem($newItem); $TestFeed->addItem($newItem);
} }
//OK. Everything is done. Now genarate the feed. //OK. Everything is done. Now genarate the feed.
$TestFeed->genarateFeed(); $TestFeed->generateFeed();
?> ?>
   
  for f in scrapers/*.py; do echo "Processing $f file.."; python $f; done
 
 
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
import mimetypes import mimetypes
import re import re
import urllib import urllib
import urlparse import urlparse
   
def mkhash(input): def mkhash(input):
return hashlib.md5(input).hexdigest().encode("utf-8") return hashlib.md5(input).hexdigest().encode("utf-8")
   
def canonurl(url): def canonurl(url):
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
if the URL looks invalid. if the URL looks invalid.
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
'http://xn--hgi.ws/' 'http://xn--hgi.ws/'
""" """
# strip spaces at the ends and ensure it's prefixed with 'scheme://' # strip spaces at the ends and ensure it's prefixed with 'scheme://'
url = url.strip() url = url.strip()
if not url: if not url:
return '' return ''
if not urlparse.urlsplit(url).scheme: if not urlparse.urlsplit(url).scheme:
url = 'http://' + url url = 'http://' + url
   
# turn it into Unicode # turn it into Unicode
#try: #try:
# url = unicode(url, 'utf-8') # url = unicode(url, 'utf-8')
#except UnicodeDecodeError: #except UnicodeDecodeError:
# return '' # bad UTF-8 chars in URL # return '' # bad UTF-8 chars in URL
   
# parse the URL into its components # parse the URL into its components
parsed = urlparse.urlsplit(url) parsed = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = parsed scheme, netloc, path, query, fragment = parsed
   
# ensure scheme is a letter followed by letters, digits, and '+-.' chars # ensure scheme is a letter followed by letters, digits, and '+-.' chars
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
return '' return ''
scheme = str(scheme) scheme = str(scheme)
   
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
if not match: if not match:
return '' return ''
domain, port = match.groups() domain, port = match.groups()
netloc = domain + (port if port else '') netloc = domain + (port if port else '')
netloc = netloc.encode('idna') netloc = netloc.encode('idna')
   
# ensure path is valid and convert Unicode chars to %-encoded # ensure path is valid and convert Unicode chars to %-encoded
if not path: if not path:
path = '/' # eg: 'http://google.com' -> 'http://google.com/' path = '/' # eg: 'http://google.com' -> 'http://google.com/'
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
   
# ensure query is valid # ensure query is valid
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
   
# ensure fragment is valid # ensure fragment is valid
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
   
# piece it all back together, truncating it to a maximum of 4KB # piece it all back together, truncating it to a maximum of 4KB
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
return url[:4096] return url[:4096]
   
def fullurl(url,href): def fullurl(url,href):
href = href.replace(" ","%20") href = href.replace(" ","%20")
href = re.sub('#.*$','',href) href = re.sub('#.*$','',href)
return urljoin(url,href) return urljoin(url,href)
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
url = canonurl(url) url = canonurl(url)
hash = mkhash(url) hash = mkhash(url)
req = urllib2.Request(url) req = urllib2.Request(url)
print "Fetching %s (%s)" % (url,hash) print "Fetching %s (%s)" % (url,hash)
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
print "Not a valid HTTP url" print "Not a valid HTTP url"
return (None,None,None) return (None,None,None)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc == None: if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
else: else:
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
print "Uh oh, trying to scrape URL again too soon!" print "Uh oh, trying to scrape URL again too soon!"
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
content = last_attachment content = last_attachment
return (doc['url'],doc['mime_type'],content) return (doc['url'],doc['mime_type'],content)
if scrape_again == False: if scrape_again == False:
print "Not scraping this URL again as requested" print "Not scraping this URL again as requested"
return (None,None,None) return (None,None,None)
   
time.sleep(3) # wait 3 seconds to give webserver time to recover time.sleep(3) # wait 3 seconds to give webserver time to recover
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if doc.has_key('etag'): if doc.has_key('etag'):
req.add_header("If-None-Match", doc['etag']) req.add_header("If-None-Match", doc['etag'])
if doc.has_key('last_modified'): if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", doc['last_modified']) req.add_header("If-Modified-Since", doc['last_modified'])
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
try: try:
url_handle = opener.open(req) url_handle = opener.open(req)
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
doc['etag'] = headers.getheader("ETag") doc['etag'] = headers.getheader("ETag")
doc['last_modified'] = headers.getheader("Last-Modified") doc['last_modified'] = headers.getheader("Last-Modified")
doc['date'] = headers.getheader("Date") doc['date'] = headers.getheader("Date")
doc['page_scraped'] = time.time() doc['page_scraped'] = time.time()
doc['web_server'] = headers.getheader("Server") doc['web_server'] = headers.getheader("Server")
doc['via'] = headers.getheader("Via") doc['via'] = headers.getheader("Via")
doc['powered_by'] = headers.getheader("X-Powered-By") doc['powered_by'] = headers.getheader("X-Powered-By")
doc['file_size'] = headers.getheader("Content-Length") doc['file_size'] = headers.getheader("Content-Length")
content_type = headers.getheader("Content-Type") content_type = headers.getheader("Content-Type")
if content_type != None: if content_type != None:
doc['mime_type'] = content_type.split(";")[0] doc['mime_type'] = content_type.split(";")[0]
else: else:
(type,encoding) = mimetypes.guess_type(url) (type,encoding) = mimetypes.guess_type(url)
doc['mime_type'] = type doc['mime_type'] = type
if hasattr(url_handle, 'code'): if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified" print "the web page has not been modified"
return (None,None,None) return (None,None,None)
else: else:
content = url_handle.read() content = url_handle.read()
docsdb.save(doc) docsdb.save(doc)
doc = docsdb.get(hash) # need to get a _rev doc = docsdb.get(hash) # need to get a _rev
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
return (doc['url'], doc['mime_type'], content) return (doc['url'], doc['mime_type'], content)
#store as attachment epoch-filename #store as attachment epoch-filename
except urllib2.URLError as e: except urllib2.URLError as e:
error = "" error = ""
if hasattr(e, 'reason'): if hasattr(e, 'reason'):
error = "error %s in downloading %s" % (str(e.reason), url) error = "error %s in downloading %s" % (str(e.reason), url)
elif hasattr(e, 'code'): elif hasattr(e, 'code'):
error = "error %s in downloading %s" % (e.code, url) error = "error %s in downloading %s" % (e.code, url)
print error print error
doc['error'] = error doc['error'] = error
docsdb.save(doc) docsdb.save(doc)
return (None,None,None) return (None,None,None)
   
   
   
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
(url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
for nav in navIDs: for nav in navIDs:
print "Removing element", nav['id'] print "Removing element", nav['id']
nav.extract() nav.extract()
navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
for nav in navClasses: for nav in navClasses:
print "Removing element", nav['class'] print "Removing element", nav['class']
nav.extract() nav.extract()
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
linkurls = set([]) linkurls = set([])
for link in links: for link in links:
if link.has_key("href"): if link.has_key("href"):
if link['href'].startswith("http"): if link['href'].startswith("http"):
# lets not do external links for now # lets not do external links for now
# linkurls.add(link['href']) # linkurls.add(link['href'])
None None
if link['href'].startswith("mailto"): if link['href'].startswith("mailto"):
# not http # not http
None None
if link['href'].startswith("javascript"): if link['href'].startswith("javascript"):
# not http # not http
None None
else: else:
# remove anchors and spaces in urls # remove anchors and spaces in urls
linkurls.add(fullurl(url,link['href'])) linkurls.add(fullurl(url,link['href']))
for linkurl in linkurls: for linkurl in linkurls:
#print linkurl #print linkurl
scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
   
#couch = couchdb.Server('http://192.168.1.148:5984/') #couch = couchdb.Server('http://192.168.1.148:5984/')
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
for key in agency.keys(): for key in agency.keys():
if key == "FOIDocumentsURL" and "status" not in agency.keys: if key == "FOIDocumentsURL" and "status" not in agency.keys:
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
if key == 'website' and False: if key == 'website' and False:
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
  agency['metadata']['lastScraped'] = time.time()
if key.endswith('URL') and False: if key.endswith('URL') and False:
print key print key
depth = 1 depth = 1
if 'scrapeDepth' in agency.keys(): if 'scrapeDepth' in agency.keys():
depth = agency['scrapeDepth'] depth = agency['scrapeDepth']
scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
agency['metadata']['lastScraped'] = time.time()  
agencydb.save(agency) agencydb.save(agency)
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getColumnCount(self):
  return 6
  def getColumns(self,columns):
  (id, date, title, description, notes,link) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
def getTable(self,soup): def getTable(self,soup):
return soup.find_all('table')[4] return soup.find_all('table')[4]
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "cphMain_C001_Col01").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "inner_content")
  def getColumnCount(self):
  return 2
  def getColumns(self,columns):
  (date, title) = columns
  return (date, date, title, title, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (id, title, date) = columns
  return (id, date, title, title, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
  import dateutil
  from dateutil.parser import *
  from datetime import *
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (date, title, description) = columns
  return (date, date, title, description, None)
  def getTitle(self, content, entry, doc):
  i = 0
  title = ""
  for string in content.stripped_strings:
  if i < 2:
  title = title + string
  i = i+1
  doc.update({'title': title})
  print title
  return
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getColumnCount(self):
  return 7
  def getColumns(self,columns):
  (id, date, title, description, link, deldate,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
#RSS feed not detailed #RSS feed not detailed
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumns(self,columns): def getColumns(self,columns):
(id, date, description, title, notes) = columns (id, date, description, title, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
  import dateutil
  from dateutil.parser import *
  from datetime import *
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(class_ = "inner-column").table
  def getRows(self,table):
  return table.tbody.find_all('tr',recursive=False)
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (date, title, description) = columns
  return (date, date, title, description, None)
  def getDate(self, content, entry, doc):
  i = 0
  date = ""
  for string in content.stripped_strings:
  if i ==1:
  date = string
  i = i+1
  edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  doc.update({'date': edate})
  return
  def getTitle(self, content, entry, doc):
  i = 0
  title = ""
  for string in content.stripped_strings:
  if i < 2:
  title = title + string
  i = i+1
  doc.update({'title': title})
  #print title
  return
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
  import dateutil
  from dateutil.parser import *
  from datetime import *
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description, notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  si = ScraperImplementation()
  si.doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup): def getTable(self,soup):
return soup.find(class_ = "ms-rtestate-field").table return soup.find(class_ = "ms-rtestate-field").table
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
   
def getLinks(self, content, entry, doc): def getLinks(self, content, entry, doc):
link = None link = None
links = [] links = []
for atag in entry.find_all('a'): for atag in entry.find_all('a'):
if atag.has_key('href'): if atag.has_key('href'):
link = scrape.fullurl(self.getURL(),atag['href']) link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None: if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent) soup = BeautifulSoup(htcontent)
for atag in soup.find(class_ = "article-content").find_all('a'): for atag in soup.find(class_ = "article-content").find_all('a'):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href'])) links.append(scrape.fullurl(link,atag['href']))
   
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
doc.update({'url': link}) doc.update({'url': link})
return return
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date, title, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date, title, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (date, id, title, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (date, title, description,notes) = columns
  return (title, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
  import dateutil
  from dateutil.parser import *
  from datetime import *
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description, notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  si = ScraperImplementation()
  si.doScrape()
  si.disclogURL = "http://www.fahcsia.gov.au/disclosure-log-2011-12-financial-year"
  si.doScrape()
  si.disclogURL = "http://www.fahcsia.gov.au/disclosure-log-2010-11-financial-year"
  si.doScrape()
 
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id="node-30609")
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (id, date, description) = columns
  return (id, date, description, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "centercontent").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
  import dateutil
  from dateutil.parser import *
  from datetime import *
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description, notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  si = ScraperImplementation()
  si.doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "content_div_50269").table
  def getColumns(self,columns):
  (id, date, title, description, notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os  
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))  
import genericScrapers  
import scrape  
from bs4 import BeautifulSoup  
 
#http://www.doughellmann.com/PyMOTW/abc/  
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):  
def getTable(self,soup):  
return soup.find(id = "content_div_50269").table  
def getColumns(self,columns):  
(id, date, title, description, notes) = columns  
return (id, date, title, description, notes)  
 
if __name__ == '__main__':  
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)  
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)  
ScraperImplementation().doScrape()  
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  weird div based log with tables of links
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "content-middle").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find("table",width="571")
  #findAll("table")[3]
  def getColumnCount(self):
  return 7
  def getColumns(self,columns):
  (id, date, title, description,link,deldate,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "cphMain_C001_Col01").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumnCount(self): def getColumnCount(self):
return 7 return 7
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes, deletedate, otherinfo) = columns (id, date, title, description, notes, deletedate, otherinfo) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
#def getTable(self,soup): #def getTable(self,soup):
# return soup.find(class_ = "box").table # return soup.find(class_ = "box").table
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (date, title, description) = columns
  return (date, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getTitle(self, content, entry, doc):
  doc.update({'title': content.stripped_strings.next()})
  return
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (date, id, description) = columns
  return (id, date, description, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
#RSS feed not detailed #RSS feed not detailed
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
  import codecs
  #http://www.doughellmann.com/PyMOTW/abc/
  class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getDescription(self,content, entry,doc):
  link = None
  links = []
  description = ""
  for atag in entry.find_all('a'):
  if atag.has_key('href'):
  link = scrape.fullurl(self.getURL(),atag['href'])
  (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
  if htcontent != None:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  soup = BeautifulSoup(htcontent)
  for text in soup.find(id="divFullWidthColumn").stripped_strings:
  description = description + text.encode('ascii', 'ignore')
 
  for atag in soup.find(id="divFullWidthColumn").find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(link,atag['href']))
 
  if links != []:
  doc.update({'links': links})
  if description != "":
  doc.update({ 'description': description})
 
  def getColumnCount(self):
  return 2
  def getTable(self,soup):
  return soup.find(id = "TwoColumnSorting")
  def getColumns(self,columns):
  ( title, date) = columns
  return (title, date, title, title, None)
  class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getDescription(self,content, entry,doc):
  link = None
  links = []
  description = ""
  for atag in entry.find_all('a'):
  if atag.has_key('href'):
  link = scrape.fullurl(self.getURL(),atag['href'])
  (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
  if htcontent != None:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  soup = BeautifulSoup(htcontent)
  for text in soup.find(id="content-item").stripped_strings:
  description = description + text + " \n"
  for atag in soup.find(id="content-item").find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(link,atag['href']))
  if links != []:
  doc.update({'links': links})
  if description != "":
  doc.update({ 'description': description})
 
  if links != []:
  doc.update({'links': links})
  if description != "":
  doc.update({ 'description': description})
 
  def getColumnCount(self):
  return 2
  def getTable(self,soup):
  return soup.find(class_ = "doc-list")
  def getColumns(self,columns):
  (date, title) = columns
  return (title, date, title, title, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  #NewScraperImplementation().doScrape()
  print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  osi = OldScraperImplementation()
  osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI"
  osi.doScrape()
  # old site too
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumnCount(self): def getColumnCount(self):
return 4; return 4;
def getTable(self,soup): def getTable(self,soup):
return soup.find(class_ = "content").table return soup.find(class_ = "content").table
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description) = columns (id, date, title, description) = columns
return (id, date, description, title, None) return (id, date, title, description, None)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
  import dateutil
  from dateutil.parser import *
  from datetime import *
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description, notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  si = ScraperImplementation()
  si.doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date, title, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date,logdate, description) = columns
  return (id, date, description, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (date, title, description) = columns
  return (date, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id="ctl00_ContentPlaceHolderMainNoAjax_EdtrTD1494_2").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (blank,id, title,date) = columns
  return (id, date, title, title, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "_ctl0__ctl0_MainContentPlaceHolder_MainContentPlaceHolder_ContentSpan").findAll("table")[3]
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getColumnCount(self):
  return 6
  def getColumns(self,columns):
  (id, date, title, description,link,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "ctl00_PlaceHolderMain_Content__ControlWrapper_RichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
# does not have any disclog entries or table no disclog
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date, title, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(summary="This table shows every FOI request to date.")
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(class_ = "ms-rtestate-field").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date, title, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os  
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))  
import genericScrapers  
import scrape  
from bs4 import BeautifulSoup  
 
#http://www.doughellmann.com/PyMOTW/abc/  
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):  
 
def getColumnCount(self):  
return 4  
def getColumns(self,columns):  
(id, date, title, description) = columns  
return (id, date, title, description, None)  
 
if __name__ == '__main__':  
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)  
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)  
ScraperImplementation().doScrape()  
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
#RSS feed not detailed #RSS feed not detailed
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title,description,notes) = columns (id, date, title,description,notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "ctl00_PlaceHolderMain_ctl01__ControlWrapper_RichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(summary="This table lists the schedule of upcoming courses.")
  def getColumnCount(self):
  return 7
  def getColumns(self,columns):
  (id, date, title, description,link,deldate,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id="main").table
  def getColumnCount(self):
  return 7
  def getColumns(self,columns):
  (id, date, title, description,link,deldate,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
def getTable(self,soup): def getTable(self,soup):
return soup.find(class_ = "content") return soup.find(class_ = "content")
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 6
  def getColumns(self,columns):
  (id, date, title, description,deldate, notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
#RSS feed not detailed #RSS feed not detailed
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
www.finance.gov.au/foi/disclosure-log/foi-rss.xml www.finance.gov.au/foi/disclosure-log/foi-rss.xml
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "inner_content")
  def getColumnCount(self):
  return 2
  def getColumns(self,columns):
  (date, title) = columns
  return (date, date, title, title, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, title, date, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "genericContent").table.tbody
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (id, date,title, description, notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os  
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))  
import genericScrapers  
import scrape  
from bs4 import BeautifulSoup  
 
#http://www.doughellmann.com/PyMOTW/abc/  
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):  
def getTable(self,soup):  
return soup.find(id = "genericContent").table.tbody  
def getColumnCount(self):  
return 5  
def getColumns(self,columns):  
(id, date,title, description, notes) = columns  
return (id, date, title, description, notes)  
 
if __name__ == '__main__':  
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)  
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)  
ScraperImplementation().doScrape()  
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
def getTable(self,soup): def getTable(self,soup):
return soup.find(id = "content").table return soup.find(id = "content").table
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "ctl00_PlaceHolderMain_PublishingPageContent__ControlWrapper_RichHtmlField").table
  def getColumnCount(self):
  return 7
  def getColumns(self,columns):
  (id, date, title, description,link,deldate, notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
#RSS feed not detailed #RSS feed not detailed
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, description, title, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful) http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful)
   
  <?php
 
  include ('../include/common.inc.php');
  $last_updated = date('Y-m-d', @filemtime('cbrfeed.zip'));
  header("Content-Type: text/xml");
  echo "<?xml version='1.0' encoding='UTF-8'?>";
  echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' . "\n";
  echo " <url><loc>" . local_url() . "index.php</loc><priority>1.0</priority></url>\n";
  foreach (scandir("./") as $file) {
  if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php")
  echo " <url><loc>" . local_url() . "$file</loc><priority>0.6</priority></url>\n";
  }
 
  $db = $server->get_db('disclosr-foidocuments');
  try {
  $rows = $db->get_view("app", "all")->rows;
  foreach ($rows as $row) {
  echo '<url><loc>' . local_url() . 'view.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n";
  }
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
  echo '</urlset>';
  ?>
 
<?php <?php
   
function include_header_documents($title) { function include_header_documents($title) {
?> ?>
<!doctype html> <!doctype html>
<!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> <!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ -->
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> <!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> <!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> <!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]-->
<!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> <!-- Consider adding a manifest.appcache: h5bp.com/d/Offline -->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> <!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]-->
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
   
<!-- Use the .htaccess and remove these lines to avoid edge case issues. <!-- Use the .htaccess and remove these lines to avoid edge case issues.
More info: h5bp.com/i/378 --> More info: h5bp.com/i/378 -->
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
   
<title>Australian Disclosure Logs<?php if ($title != "") echo " - $title";?></title> <title>Australian Disclosure Logs<?php if ($title != "") echo " - $title";?></title>
<meta name="description" content=""> <meta name="description" content="">
   
<!-- Mobile viewport optimized: h5bp.com/viewport --> <!-- Mobile viewport optimized: h5bp.com/viewport -->
<meta name="viewport" content="width=device-width"> <meta name="viewport" content="width=device-width">
  <link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php" />
<!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> <!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons -->
<meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" /> <meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" />
   
<!-- Le styles --> <!-- Le styles -->
<link href="css/bootstrap.min.css" rel="stylesheet"> <link href="css/bootstrap.min.css" rel="stylesheet">
<style type="text/css"> <style type="text/css">
body { body {
padding-top: 60px; padding-top: 60px;
padding-bottom: 40px; padding-bottom: 40px;
} }
.sidebar-nav { .sidebar-nav {
padding: 9px 0; padding: 9px 0;
} }
</style> </style>
<link href="css/bootstrap-responsive.min.css" rel="stylesheet"> <link href="css/bootstrap-responsive.min.css" rel="stylesheet">
   
<!-- HTML5 shim, for IE6-8 support of HTML5 elements --> <!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
<!--[if lt IE 9]> <!--[if lt IE 9]>
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> <script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]--> <![endif]-->
<!-- More ideas for your <head> here: h5bp.com/d/head-Tips --> <!-- More ideas for your <head> here: h5bp.com/d/head-Tips -->
   
<!-- All JavaScript at the bottom, except this Modernizr build. <!-- All JavaScript at the bottom, except this Modernizr build.
Modernizr enables HTML5 elements & feature detects for optimal performance. Modernizr enables HTML5 elements & feature detects for optimal performance.
Create your own custom Modernizr build: www.modernizr.com/download/ Create your own custom Modernizr build: www.modernizr.com/download/
<script src="js/libs/modernizr-2.5.3.min.js"></script>--> <script src="js/libs/modernizr-2.5.3.min.js"></script>-->
<script src="js/jquery.js"></script> <script src="js/jquery.js"></script>
<script type="text/javascript" src="js/flotr2.min.js"></script> <script type="text/javascript" src="js/flotr2.min.js"></script>
</head> </head>
<body> <body>
<div class="navbar navbar-inverse navbar-fixed-top"> <div class="navbar navbar-inverse navbar-fixed-top">
<div class="navbar-inner"> <div class="navbar-inner">
<div class="container-fluid"> <div class="container-fluid">
<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> <a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse">
<span class="icon-bar"></span> <span class="icon-bar"></span>
<span class="icon-bar"></span> <span class="icon-bar"></span>
<span class="icon-bar"></span> <span class="icon-bar"></span>
</a> </a>
<a class="brand" href="#">Australian Disclosure Logs</a> <a class="brand" href="#">Australian Disclosure Logs</a>
<div class="nav-collapse collapse"> <div class="nav-collapse collapse">
<p class="navbar-text pull-right"> <p class="navbar-text pull-right">
Check out our subsites on: Check out our subsites on:
<a href="http://orgs.disclosurelo.gs">Government Agencies</a> <a href="http://orgs.disclosurelo.gs">Government Agencies</a>
• <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a> • <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a>
• <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a> • <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a>
   
</p> </p>
<ul class="nav"> <ul class="nav">
<li><a href="index.php">Home</a></li> <li><a href="index.php">Home</a></li>
<li><a href="disclogsList.php">List of Disclosure Logs</a></li> <li><a href="disclogsList.php">List of Disclosure Logs</a></li>
<li><a href="about.php">About</a></li> <li><a href="about.php">About</a></li>
</ul> </ul>
</div><!--/.nav-collapse --> </div><!--/.nav-collapse -->
</div> </div>
</div> </div>
</div> </div>
<div class="container"> <div class="container">
<?php <?php
} }
function include_footer_documents() { function include_footer_documents() {
?> ?>
</div> <!-- /container --> </div> <!-- /container -->
<hr> <hr>
   
<footer> <footer>
<p>&copy; Company 2012</p> <p>&copy; Company 2012</p>
</footer> </footer>
<script type="text/javascript"> <script type="text/javascript">
   
var _gaq = _gaq || []; var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-12341040-4']); _gaq.push(['_setAccount', 'UA-12341040-4']);
_gaq.push(['_setDomainName', 'disclosurelo.gs']); _gaq.push(['_setDomainName', 'disclosurelo.gs']);
_gaq.push(['_setAllowLinker', true]); _gaq.push(['_setAllowLinker', true]);
_gaq.push(['_trackPageview']); _gaq.push(['_trackPageview']);
   
(function() { (function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})(); })();
   
</script> </script>
<!-- Le javascript <!-- Le javascript
================================================== --> ================================================== -->
<!-- Placed at the end of the document so the pages load faster --> <!-- Placed at the end of the document so the pages load faster -->
<!-- <!--
<script src="js/bootstrap-transition.js"></script> <script src="js/bootstrap-transition.js"></script>
<script src="js/bootstrap-alert.js"></script> <script src="js/bootstrap-alert.js"></script>
<script src="js/bootstrap-modal.js"></script> <script src="js/bootstrap-modal.js"></script>
<script src="js/bootstrap-dropdown.js"></script> <script src="js/bootstrap-dropdown.js"></script>
<script src="js/bootstrap-scrollspy.js"></script> <script src="js/bootstrap-scrollspy.js"></script>
<script src="js/bootstrap-tab.js"></script> <script src="js/bootstrap-tab.js"></script>
<script src="js/bootstrap-tooltip.js"></script> <script src="js/bootstrap-tooltip.js"></script>
<script src="js/bootstrap-popover.js"></script> <script src="js/bootstrap-popover.js"></script>
<script src="js/bootstrap-button.js"></script> <script src="js/bootstrap-button.js"></script>
<script src="js/bootstrap-collapse.js"></script> <script src="js/bootstrap-collapse.js"></script>
<script src="js/bootstrap-carousel.js"></script> <script src="js/bootstrap-carousel.js"></script>
<script src="js/bootstrap-typeahead.js"></script>--> <script src="js/bootstrap-typeahead.js"></script>-->
   
   
</body> </body>
</html> </html>
<?php <?php
} }
   
function displayLogEntry($row, $idtoname) { function displayLogEntry($row, $idtoname) {
echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2> <p>".str_replace("\n","<br>",$row->value->description); $result = "";
  $result .= "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2> <p>".str_replace("\n","<br>",$row->value->description);
if (isset($row->value->notes)) { if (isset($row->value->notes)) {
echo " <br>Note: ".$row->value->notes; $result .= " <br>Note: ".$row->value->notes;
} }
echo "</p>"; $result .= "</p>";
   
if (isset($row->value->links)){ if (isset($row->value->links)){
echo "<h3>Links/Documents</h3><ul>"; $result .= "<h3>Links/Documents</h3><ul>";
foreach ($row->value->links as $link) { foreach ($row->value->links as $link) {
echo "<li><a href='$link'>".$link."</a></li>"; $result .= "<li><a href='$link'>".$link."</a></li>";
} }
   
echo "</ul>"; $result .= "</ul>";
} }
echo "<small><A href='".$row->value->url."'>View original source...</a> ID: ".$row->value->docID."</small>"; $result .= "<small><A href='".$row->value->url."'>View original source...</a> ID: ".strip_tags($row->value->docID)."</small>";
echo"</div>"; $result .= "</div>";
  return $result;
} }
   
  <?php
  include('template.inc.php');
  include_header_documents("");
  include_once('../include/common.inc.php');
  ?>
  <?php
 
 
 
  $agenciesdb = $server->get_db('disclosr-agencies');
 
  $idtoname = Array();
  foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
  $idtoname[$row->id] = trim($row->value->name);
  }
  $foidocsdb = $server->get_db('disclosr-foidocuments');
  try {
  $obj = new stdClass();
  $obj->value = $foidocsdb->get($_REQUEST['id']);
  echo displayLogEntry($obj,$idtoname);
 
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
  include_footer_documents();
  ?>
 
<?php <?php
   
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$hash = $_REQUEST['hash']; $hash = $_REQUEST['hash'];
$docsdb = $server->get_db('disclosr-documents'); $docsdb = $server->get_db('disclosr-documents');
  try {
$doc = object_to_array($docsdb->get($hash)); $doc = object_to_array($docsdb->get($hash));
   
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
   
   
if (!isset($doc['_attachments']) || count($doc['_attachments']) == 0) die ("no attachments"); if (!isset($doc['_attachments']) || count($doc['_attachments']) == 0) die ("no attachments");
$attachments = $doc['_attachments']; $attachments = $doc['_attachments'];
$attachment_filenames = array_keys($attachments); $attachment_filenames = array_keys($attachments);
//print_r($attachments); //print_r($attachments);
$url = $serverAddr.'disclosr-documents/'.$hash.'/'.urlencode($attachment_filenames[0]); $url = $serverAddr.'disclosr-documents/'.$hash.'/'.urlencode($attachment_filenames[0]);
//echo $url; //echo $url;
$request = Requests::get($url); $request = Requests::get($url);
echo ($request->body); echo ($request->body);
   
<?php <?php
   
include $basePath . "schemas/schemas.inc.php"; include $basePath . "schemas/schemas.inc.php";
   
require ($basePath . 'couchdb/settee/src/settee.php'); require ($basePath . 'couchdb/settee/src/settee.php');
   
if (php_uname('n') == "vanille") { if (php_uname('n') == "vanille") {
$serverAddr = 'http://192.168.178.21:5984/'; $serverAddr = 'http://192.168.178.21:5984/';
} else } else
if (php_uname('n') == "KYUUBEY") { if (php_uname('n') == "KYUUBEY") {
   
$serverAddr = 'http://192.168.1.148:5984/'; $serverAddr = 'http://192.168.1.148:5984/';
  $serverAddr = 'http://127.0.0.1:5984/';
} else { } else {
$serverAddr = 'http://127.0.0.1:5984/'; $serverAddr = 'http://127.0.0.1:5984/';
} }
$server = new SetteeServer($serverAddr); $server = new SetteeServer($serverAddr);
   
function setteErrorHandler($e) { function setteErrorHandler($e) {
if (class_exists('Amon')) { if (class_exists('Amon')) {
Amon::log($e->getMessage() . " " . print_r($_SERVER,true), array('error')); Amon::log($e->getMessage() . " " . print_r($_SERVER,true), array('error'));
} }
echo $e->getMessage() . "<br>" . PHP_EOL; echo $e->getMessage() . "<br>" . PHP_EOL;
} }
   
file:a/lib/FeedItem.php (deleted)
<?php  
/**  
* Univarsel Feed Writer  
*  
* FeedItem class - Used as feed element in FeedWriter class  
*  
* @package UnivarselFeedWriter  
* @author Anis uddin Ahmad <anisniit@gmail.com>  
* @link http://www.ajaxray.com/projects/rss  
*/  
class FeedItem  
{  
private $elements = array(); //Collection of feed elements  
private $version;  
 
/**  
* Constructor  
*  
* @param contant (RSS1/RSS2/ATOM) RSS2 is default.  
*/  
function __construct($version = RSS2)  
{  
$this->version = $version;  
}  
 
/**  
* Add an element to elements array  
*  
* @access public  
* @param srting The tag name of an element  
* @param srting The content of tag  
* @param array Attributes(if any) in 'attrName' => 'attrValue' format  
* @return void  
*/  
public function addElement($elementName, $content, $attributes = null)  
{  
$this->elements[$elementName]['name'] = $elementName;  
$this->elements[$elementName]['content'] = $content;  
$this->elements[$elementName]['attributes'] = $attributes;  
}  
 
/**  
* Set multiple feed elements from an array.  
* Elements which have attributes cannot be added by this method  
*  
* @access public  
* @param array array of elements in 'tagName' => 'tagContent' format.  
* @return void  
*/  
public function addElementArray($elementArray)  
{  
if(! is_array($elementArray)) return;  
foreach ($elementArray as $elementName => $content)  
{  
$this->addElement($elementName, $content);  
}  
}  
 
/**  
* Return the collection of elements in this feed item  
*  
* @access public  
* @return array  
*/  
public function getElements()  
{  
return $this->elements;  
}  
 
// Wrapper functions ------------------------------------------------------  
 
/**  
* Set the 'dscription' element of feed item  
*  
* @access public  
* @param string The content of 'description' element  
* @return void  
*/  
public function setDescription($description)  
{  
$tag = ($this->version == ATOM)? 'summary' : 'description';  
$this->addElement($tag, $description);  
}  
 
/**  
* @desc Set the 'title' element of feed item  
* @access public  
* @param string The content of 'title' element  
* @return void  
*/  
public function setTitle($title)  
{  
$this->addElement('title', $title);  
}  
 
/**  
* Set the 'date' element of feed item  
*  
* @access public  
* @param string The content of 'date' element  
* @return void  
*/  
public function setDate($date)  
{  
if(! is_numeric($date))  
{  
$date = strtotime($date);  
}  
 
if($this->version == ATOM)  
{  
$tag = 'updated';  
$value = date(DATE_ATOM, $date);  
}  
elseif($this->version == RSS2)  
{  
$tag = 'pubDate';  
$value = date(DATE_RSS, $date);  
}  
else  
{  
$tag = 'dc:date';  
$value = date("Y-m-d", $date);  
}  
 
$this->addElement($tag, $value);  
}  
 
/**  
* Set the 'link' element of feed item  
*  
* @access public  
* @param string The content of 'link' element  
* @return void  
*/  
public function setLink($link)  
{  
if($this->version == RSS2 || $this->version == RSS1)  
{  
$this->addElement('link', $link);  
}  
else  
{  
$this->addElement('link','',array('href'=>$link));  
$this->addElement('id', FeedWriter::uuid($link,'urn:uuid:'));  
}  
 
}  
 
/**  
* Set the 'encloser' element of feed item  
* For RSS 2.0 only  
*  
* @access public  
* @param string The url attribute of encloser tag  
* @param string The length attribute of encloser tag  
* @param string The type attribute of encloser tag  
* @return void  
*/  
public function setEncloser($url, $length, $type)  
{  
$attributes = array('url'=>$url, 'length'=>$length, 'type'=>$type);  
$this->addElement('enclosure','',$attributes);  
}  
 
} // end of class FeedItem  
?>  
 
file:a/lib/FeedWriter.php (deleted)
<?php  
// RSS 0.90 Officially obsoleted by 1.0  
// RSS 0.91, 0.92, 0.93 and 0.94 Officially obsoleted by 2.0  
// So, define constants for RSS 1.0, RSS 2.0 and ATOM  
 
define('RSS1', 'RSS 1.0', true);  
define('RSS2', 'RSS 2.0', true);  
define('ATOM', 'ATOM', true);  
 
/**  
* Univarsel Feed Writer class  
*  
* Genarate RSS 1.0, RSS2.0 and ATOM Feed  
*  
* @package UnivarselFeedWriter  
* @author Anis uddin Ahmad <anisniit@gmail.com>  
* @link http://www.ajaxray.com/projects/rss  
*/  
class FeedWriter  
{  
private $channels = array(); // Collection of channel elements  
private $items = array(); // Collection of items as object of FeedItem class.  
private $data = array(); // Store some other version wise data  
private $CDATAEncoding = array(); // The tag names which have to encoded as CDATA  
 
private $version = null;  
 
/**  
* Constructor  
*  
* @param constant the version constant (RSS1/RSS2/ATOM).  
*/  
function __construct($version = RSS2)  
{  
$this->version = $version;  
 
// Setting default value for assential channel elements  
$this->channels['title'] = $version . ' Feed';  
$this->channels['link'] = 'http://www.ajaxray.com/blog';  
 
//Tag names to encode in CDATA  
$this->CDATAEncoding = array('description', 'content:encoded', 'summary');  
}  
 
// Start # public functions ---------------------------------------------  
 
/**  
* Set a channel element  
* @access public  
* @param srting name of the channel tag  
* @param string content of the channel tag  
* @return void  
*/  
public function setChannelElement($elementName, $content)  
{  
$this->channels[$elementName] = $content ;  
}  
 
/**  
* Set multiple channel elements from an array. Array elements  
* should be 'channelName' => 'channelContent' format.  
*  
* @access public  
* @param array array of channels  
* @return void  
*/  
public function setChannelElementsFromArray($elementArray)  
{  
if(! is_array($elementArray)) return;  
foreach ($elementArray as $elementName => $content)  
{  
$this->setChannelElement($elementName, $content);  
}  
}  
 
/**  
* Genarate the actual RSS/ATOM file  
*  
* @access public  
* @return void  
*/  
public function genarateFeed()  
{  
header("Content-type: text/xml");  
 
$this->printHead();  
$this->printChannels();  
$this->printItems();  
$this->printTale();  
}  
 
/**  
* Create a new FeedItem.  
*  
* @access public  
* @return object instance of FeedItem class  
*/  
public function createNewItem()  
{  
$Item = new FeedItem($this->version);  
return $Item;  
}  
 
/**  
* Add a FeedItem to the main class  
*  
* @access public  
* @param object instance of FeedItem class  
* @return void  
*/  
public function addItem($feedItem)  
{  
$this->items[] = $feedItem;  
}  
 
 
// Wrapper functions -------------------------------------------------------------------  
 
/**  
* Set the 'title' channel element  
*  
* @access public  
* @param srting value of 'title' channel tag  
* @return void  
*/  
public function setTitle($title)  
{  
$this->setChannelElement('title', $title);  
}  
 
/**  
* Set the 'description' channel element  
*  
* @access public  
* @param srting value of 'description' channel tag  
* @return void  
*/  
public function setDescription($desciption)  
{  
$this->setChannelElement('description', $desciption);  
}  
 
/**  
* Set the 'link' channel element  
*  
* @access public  
* @param srting value of 'link' channel tag  
* @return void  
*/  
public function setLink($link)  
{  
$this->setChannelElement('link', $link);  
}  
 
/**  
* Set the 'image' channel element  
*  
* @access public  
* @param srting title of image  
* @param srting link url of the imahe  
* @param srting path url of the image  
* @return void  
*/  
public function setImage($title, $link, $url)  
{  
$this->setChannelElement('image', array('title'=>$title, 'link'=>$link, 'url'=>$url));  
}  
 
/**  
* Set the 'about' channel element. Only for RSS 1.0  
*  
* @access public  
* @param srting value of 'about' channel tag  
* @return void  
*/  
public function setChannelAbout($url)  
{  
$this->data['ChannelAbout'] = $url;  
}  
 
/**  
* Genarates an UUID  
* @author Anis uddin Ahmad <admin@ajaxray.com>  
* @param string an optional prefix  
* @return string the formated uuid  
*/  
public function uuid($key = null, $prefix = '')  
{  
$key = ($key == null)? uniqid(rand()) : $key;  
$chars = md5($key);  
$uuid = substr($chars,0,8) . '-';  
$uuid .= substr($chars,8,4) . '-';  
$uuid .= substr($chars,12,4) . '-';  
$uuid .= substr($chars,16,4) . '-';  
$uuid .= substr($chars,20,12);  
 
return $prefix . $uuid;  
}  
// End # public functions ----------------------------------------------  
 
// Start # private functions ----------------------------------------------  
 
/**  
* Prints the xml and rss namespace  
*  
* @access private  
* @return void  
*/  
private function printHead()  
{  
$out = '<?xml version="1.0" encoding="utf-8"?>' . "\n";  
 
if($this->version == RSS2)  
{  
$out .= '<rss version="2.0"  
xmlns:content="http://purl.org/rss/1.0/modules/content/"  
xmlns:wfw="http://wellformedweb.org/CommentAPI/"  
>' . PHP_EOL;  
}  
elseif($this->version == RSS1)  
{  
$out .= '<rdf:RDF  
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"  
xmlns="http://purl.org/rss/1.0/"  
xmlns:dc="http://purl.org/dc/elements/1.1/"  
>' . PHP_EOL;;  
}  
else if($this->version == ATOM)  
{  
$out .= '<feed xmlns="http://www.w3.org/2005/Atom">' . PHP_EOL;;  
}  
echo $out;  
}  
 
/**  
* Closes the open tags at the end of file  
*  
* @access private  
* @return void  
*/  
private function printTale()  
{  
if($this->version == RSS2)  
{  
echo '</channel>' . PHP_EOL . '</rss>';  
}  
elseif($this->version == RSS1)  
{  
echo '</rdf:RDF>';  
}  
else if($this->version == ATOM)  
{  
echo '</feed>';  
}  
 
}  
 
/**  
* Creates a single node as xml format  
*  
* @access private  
* @param srting name of the tag  
* @param mixed tag value as string or array of nested tags in 'tagName' => 'tagValue' format  
* @param array Attributes(if any) in 'attrName' => 'attrValue' format  
* @return string formatted xml tag  
*/  
private function makeNode($tagName, $tagContent, $attributes = null)  
{  
$nodeText = '';  
$attrText = '';  
 
if(is_array($attributes))  
{  
foreach ($attributes as $key => $value)  
{  
$attrText .= " $key=\"$value\" ";  
}  
}  
 
if(is_array($tagContent) && $this->version == RSS1)  
{  
$attrText = ' rdf:parseType="Resource"';  
}  
 
 
$attrText .= (in_array($tagName, $this->CDATAEncoding) && $this->version == ATOM)? ' type="html" ' : '';  
$nodeText .= (in_array($tagName, $this->CDATAEncoding))? "<{$tagName}{$attrText}><![CDATA[" : "<{$tagName}{$attrText}>";  
 
if(is_array($tagContent))  
{  
foreach ($tagContent as $key => $value)  
{  
$nodeText .= $this->makeNode($key, $value);  
}  
}  
else  
{  
$nodeText .= (in_array($tagName, $this->CDATAEncoding))? $tagContent : htmlentities($tagContent);  
}  
 
$nodeText .= (in_array($tagName, $this->CDATAEncoding))? "]]></$tagName>" : "</$tagName>";  
 
return $nodeText . PHP_EOL;  
}  
 
/**  
* @desc Print channels  
* @access private  
* @return void  
*/  
private function printChannels()  
{  
//Start channel tag  
switch ($this->version)  
{  
case RSS2:  
echo '<channel>' . PHP_EOL;  
break;  
case RSS1:  
echo (isset($this->data['ChannelAbout']))? "<channel rdf:about=\"{$this->data['ChannelAbout']}\">" : "<channel rdf:about=\"{$this->channels['link']}\">";  
break;  
}  
 
//Print Items of channel  
foreach ($this->channels as $key => $value)  
{  
if($this->version == ATOM && $key == 'link')  
{  
// ATOM prints link element as href attribute  
echo $this->makeNode($key,'',array('href'=>$value));  
//Add the id for ATOM  
echo $this->makeNode('id',$this->uuid($value,'urn:uuid:'));  
}  
else  
{  
echo $this->makeNode($key, $value);  
}  
 
}  
 
//RSS 1.0 have special tag <rdf:Seq> with channel  
if($this->version == RSS1)  
{  
echo "<items>" . PHP_EOL . "<rdf:Seq>" . PHP_EOL;  
foreach ($this->items as $item)  
{  
$thisItems = $item->getElements();  
echo "<rdf:li resource=\"{$thisItems['link']['content']}\"/>" . PHP_EOL;  
}  
echo "</rdf:Seq>" . PHP_EOL . "</items>" . PHP_EOL . "</channel>" . PHP_EOL;  
}  
}  
 
/**  
* Prints formatted feed items  
*  
* @access private  
* @return void  
*/  
private function printItems()  
{  
foreach ($this->items as $item)  
{  
$thisItems = $item->getElements();  
 
//the argument is printed as rdf:about attribute of item in rss 1.0  
echo $this->startItem($thisItems['link']['content']);  
 
foreach ($thisItems as $feedItem )  
{  
echo $this->makeNode($feedItem['name'], $feedItem['content'], $feedItem['attributes']);  
}  
echo $this->endItem();  
}  
}  
 
/**  
* Make the starting tag of channels  
*  
* @access private  
* @param srting The vale of about tag which is used for only RSS 1.0  
* @return void  
*/  
private function startItem($about = false)  
{  
if($this->version == RSS2)  
{  
echo '<item>' . PHP_EOL;  
}  
elseif($this->version == RSS1)  
{  
if($about)  
{  
echo "<item rdf:about=\"$about\">" . PHP_EOL;  
}  
else  
{  
die('link element is not set .\n It\'s required for RSS 1.0 to be used as about attribute of item');  
}  
}  
else if($this->version == ATOM)  
{  
echo "<entry>" . PHP_EOL;  
}  
}  
 
/**  
* Closes feed item tag  
*  
* @access private  
* @return void  
*/  
private function endItem()  
{  
if($this->version == RSS2 || $this->version == RSS1)  
{  
echo '</item>' . PHP_EOL;  
}  
else if($this->version == ATOM)  
{  
echo "</entry>" . PHP_EOL;  
}  
}  
 
 
 
// End # private functions ----------------------------------------------  
 
} // end of class FeedWriter  
 
// autoload classes  
function __autoload($class_name)  
{  
require_once $class_name . '.php';  
}