fix reduce
fix reduce


Former-commit-id: 9bf62392d9d505afcc2ed31f23d49c84a98a2091

<?php <?php
   
/** /**
* Databaase class. * Databaase class.
*/ */
class SetteeDatabase { class SetteeDatabase {
   
/** /**
* Base URL of the CouchDB REST API * Base URL of the CouchDB REST API
*/ */
private $conn_url; private $conn_url;
   
/** /**
* HTTP REST Client instance * HTTP REST Client instance
*/ */
protected $rest_client; protected $rest_client;
   
/** /**
* Name of the database * Name of the database
*/ */
private $dbname; private $dbname;
   
/** /**
* Default constructor * Default constructor
*/ */
function __construct($conn_url, $dbname) { function __construct($conn_url, $dbname) {
$this->conn_url = $conn_url; $this->conn_url = $conn_url;
$this->dbname = $dbname; $this->dbname = $dbname;
$this->rest_client = SetteeRestClient::get_instance($this->conn_url); $this->rest_client = SetteeRestClient::get_instance($this->conn_url);
} }
   
  /**
/** * Get UUID from CouchDB
* Get UUID from CouchDB *
* * @return
* @return * CouchDB-generated UUID string
* CouchDB-generated UUID string *
* */
*/ function gen_uuid() {
function gen_uuid() { $ret = $this->rest_client->http_get('_uuids');
$ret = $this->rest_client->http_get('_uuids'); return $ret['decoded']->uuids[0]; // should never be empty at this point, so no checking
return $ret['decoded']->uuids[0]; // should never be empty at this point, so no checking }
}  
  /**
/** * Create or update a document database
* Create or update a document database *
* * @param $document
* @param $document * PHP object, a PHP associative array, or a JSON String representing the document to be saved. PHP Objects and arrays are JSON-encoded automatically.
* PHP object, a PHP associative array, or a JSON String representing the document to be saved. PHP Objects and arrays are JSON-encoded automatically. *
* * <p>If $document has a an "_id" property set, it will be used as document's unique id (even for "create" operation).
* <p>If $document has a an "_id" property set, it will be used as document's unique id (even for "create" operation). * If "_id" is missing, CouchDB will be used to generate a UUID.
* If "_id" is missing, CouchDB will be used to generate a UUID. *
* * <p>If $document has a "_rev" property (revision), document will be updated, rather than creating a new document.
* <p>If $document has a "_rev" property (revision), document will be updated, rather than creating a new document. * You have to provide "_rev" if you want to update an existing document, otherwise operation will be assumed to be
* You have to provide "_rev" if you want to update an existing document, otherwise operation will be assumed to be * one of creation and you will get a duplicate document exception from CouchDB. Also, you may not provide "_rev" but
* one of creation and you will get a duplicate document exception from CouchDB. Also, you may not provide "_rev" but * not provide "_id" since that is an invalid input.
* not provide "_id" since that is an invalid input. *
* * @param $allowRevAutoDetection
* @param $allowRevAutoDetection * Default: false. When true and _rev is missing from the document, save() function will auto-detect latest revision
* Default: false. When true and _rev is missing from the document, save() function will auto-detect latest revision * for a document and use it. This option is "false" by default because it involves an extra http HEAD request and
* for a document and use it. This option is "false" by default because it involves an extra http HEAD request and * therefore can make save() operation slightly slower if such auto-detection is not required.
* therefore can make save() operation slightly slower if such auto-detection is not required. *
* * @return
* @return * document object with the database id (uuid) and revision attached;
* document object with the database id (uuid) and revision attached; *
* * @throws SetteeCreateDatabaseException
* @throws SetteeCreateDatabaseException */
*/ function save($document, $allowRevAutoDetection = false) {
function save($document, $allowRevAutoDetection = false) { if (is_string($document)) {
if (is_string($document)) { $document = json_decode($document);
$document = json_decode($document); }
}  
  // Allow passing of $document as an array (for syntactic simplicity and also because in JSON world it does not matter)
// Allow passing of $document as an array (for syntactic simplicity and also because in JSON world it does not matter) if (is_array($document)) {
if(is_array($document)) { $document = (object) $document;
$document = (object) $document; }
}  
  if (empty($document->_id) && empty($document->_rev)) {
if (empty($document->_id) && empty($document->_rev)) { $id = $this->gen_uuid();
$id = $this->gen_uuid(); } elseif (empty($document->_id) && !empty($document->_rev)) {
} throw new SetteeWrongInputException("Error: You can not save a document with a revision provided, but missing id");
elseif (empty($document->_id) && !empty($document->_rev)) { } else {
throw new SetteeWrongInputException("Error: You can not save a document with a revision provided, but missing id"); $id = $document->_id;
}  
else { if ($allowRevAutoDetection) {
$id = $document->_id; try {
  $rev = $this->get_rev($id);
if ($allowRevAutoDetection) { } catch (SetteeRestClientException $e) {
try { // auto-detection may fail legitimately, if a document has never been saved before (new doc), so skipping error
$rev = $this->get_rev($id); }
} catch (SetteeRestClientException $e) { if (!empty($rev)) {
// auto-detection may fail legitimately, if a document has never been saved before (new doc), so skipping error $document->_rev = $rev;
} }
if (!empty($rev)) { }
$document->_rev = $rev; }
}  
} $full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
} $document_json = json_encode($document, JSON_NUMERIC_CHECK);
   
$full_uri = $this->dbname . "/" . $this->safe_urlencode($id); $ret = $this->rest_client->http_put($full_uri, $document_json);
$document_json = json_encode($document, JSON_NUMERIC_CHECK);  
  $document->_id = $ret['decoded']->id;
$ret = $this->rest_client->http_put($full_uri, $document_json); $document->_rev = $ret['decoded']->rev;
   
$document->_id = $ret['decoded']->id; return $document;
$document->_rev = $ret['decoded']->rev; }
   
return $document; /**
} * @param $doc
  * @param $name
/** * @param $content
* @param $doc * Content of the attachment in a string-buffer format. This function will automatically base64-encode content for
* @param $name * you, so you don't have to do it.
* @param $content * @param $mime_type
* Content of the attachment in a string-buffer format. This function will automatically base64-encode content for * Optional. Will be auto-detected if not provided
* you, so you don't have to do it. * @return void
* @param $mime_type */
* Optional. Will be auto-detected if not provided public function add_attachment($doc, $name, $content, $mime_type = null) {
* @return void if (empty($doc->_attachments) || !is_object($doc->_attachments)) {
*/ $doc->_attachments = new stdClass();
public function add_attachment($doc, $name, $content, $mime_type = null) { }
if (empty($doc->_attachments) || !is_object($doc->_attachments)) {  
$doc->_attachments = new stdClass(); if (empty($mime_type)) {
} $mime_type = $this->rest_client->content_mime_type($content);
  }
if (empty($mime_type)) {  
$mime_type = $this->rest_client->content_mime_type($content); $doc->_attachments->$name = new stdClass();
} $doc->_attachments->$name->content_type = $mime_type;
  $doc->_attachments->$name->data = base64_encode($content);
$doc->_attachments->$name = new stdClass(); }
$doc->_attachments->$name->content_type = $mime_type;  
$doc->_attachments->$name->data = base64_encode($content); /**
} * @param $doc
  * @param $name
/** * @param $file
* @param $doc * Full path to a file (e.g. as returned by PHP's realpath function).
* @param $name * @param $mime_type
* @param $file * Optional. Will be auto-detected if not provided
* Full path to a file (e.g. as returned by PHP's realpath function). * @return void
* @param $mime_type */
* Optional. Will be auto-detected if not provided public function add_attachment_file($doc, $name, $file, $mime_type = null) {
* @return void $content = file_get_contents($file);
*/ $this->add_attachment($doc, $name, $content, $mime_type);
public function add_attachment_file($doc, $name, $file, $mime_type = null) { }
$content = file_get_contents($file);  
$this->add_attachment($doc, $name, $content, $mime_type); /**
} *
  * Retrieve a document from CouchDB
/** *
* * @throws SetteeWrongInputException
* Retrieve a document from CouchDB *
* * @param $id
* @throws SetteeWrongInputException * Unique ID (usually: UUID) of the document to be retrieved.
* * @return
* @param $id * database document in PHP object format.
* Unique ID (usually: UUID) of the document to be retrieved. */
* @return function get($id) {
* database document in PHP object format. if (empty($id)) {
*/ throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid.");
function get($id) { }
if (empty($id)) {  
throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid."); $full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
} $full_uri = str_replace("%3Frev%3D", "?rev=", $full_uri);
  $ret = $this->rest_client->http_get($full_uri);
$full_uri = $this->dbname . "/" . $this->safe_urlencode($id); return $ret['decoded'];
$full_uri = str_replace("%3Frev%3D","?rev=",$full_uri); }
$ret = $this->rest_client->http_get($full_uri);  
return $ret['decoded']; /**
} *
  * Get the latest revision of a document with document id: $id in CouchDB.
/** *
* * @throws SetteeWrongInputException
* Get the latest revision of a document with document id: $id in CouchDB. *
* * @param $id
* @throws SetteeWrongInputException * Unique ID (usually: UUID) of the document to be retrieved.
* * @return
* @param $id * database document in PHP object format.
* Unique ID (usually: UUID) of the document to be retrieved. */
* @return function get_rev($id) {
* database document in PHP object format. if (empty($id)) {
*/ throw new SetteeWrongInputException("Error: Can't query a document without a uuid.");
function get_rev($id) { }
if (empty($id)) {  
throw new SetteeWrongInputException("Error: Can't query a document without a uuid."); $full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
} $headers = $this->rest_client->http_head($full_uri);
  if (empty($headers['Etag'])) {
$full_uri = $this->dbname . "/" . $this->safe_urlencode($id); throw new SetteeRestClientException("Error: could not retrieve revision. Server unexpectedly returned empty Etag");
$headers = $this->rest_client->http_head($full_uri); }
if (empty($headers['Etag'])) { $etag = str_replace('"', '', $headers['Etag']);
throw new SetteeRestClientException("Error: could not retrieve revision. Server unexpectedly returned empty Etag"); return $etag;
} }
$etag = str_replace('"', '', $headers['Etag']);  
return $etag; /**
} * Delete a document
  *
/** * @param $document
* Delete a document * a PHP object or JSON representation of the document that has _id and _rev fields.
* *
* @param $document * @return void
* a PHP object or JSON representation of the document that has _id and _rev fields. */
* function delete($document) {
* @return void if (!is_object($document)) {
*/ $document = json_decode($document);
function delete($document) { }
if (!is_object($document)) {  
$document = json_decode($document); $full_uri = $this->dbname . "/" . $this->safe_urlencode($document->_id) . "?rev=" . $document->_rev;
} $this->rest_client->http_delete($full_uri);
  }
$full_uri = $this->dbname . "/" . $this->safe_urlencode($document->_id) . "?rev=" . $document->_rev;  
$this->rest_client->http_delete($full_uri); /* ----------------- View-related functions -------------- */
}  
  /**
  * Create a new view or update an existing one.
/*----------------- View-related functions --------------*/ *
  * @param $design_doc
/** * @param $view_name
* Create a new view or update an existing one. * @param $map_src
* * Source code of the map function in Javascript
* @param $design_doc * @param $reduce_src
* @param $view_name * Source code of the reduce function in Javascript (optional)
* @param $map_src * @return void
* Source code of the map function in Javascript */
* @param $reduce_src function save_view($design_doc, $view_name, $map_src, $reduce_src = null) {
* Source code of the reduce function in Javascript (optional) $obj = new stdClass();
* @return void $obj->_id = "_design/" . urlencode($design_doc);
*/ $view_name = urlencode($view_name);
function save_view($design_doc, $view_name, $map_src, $reduce_src = null) { $obj->views->$view_name->map = $map_src;
$obj = new stdClass(); if (!empty($reduce_src)) {
$obj->_id = "_design/" . urlencode($design_doc); $obj->views->$view_name->reduce = $reduce_src;
$view_name = urlencode($view_name); }
$obj->views->$view_name->map = $map_src;  
if (!empty($reduce_src)) { // allow safe updates (even if slightly slower due to extra: rev-detection check).
$obj->views->$view_name->reduce = $reduce_src; return $this->save($obj, true);
} }
   
// allow safe updates (even if slightly slower due to extra: rev-detection check). /**
return $this->save($obj, true); * Create a new view or update an existing one.
} *
  * @param $design_doc
/** * @param $view_name
* Create a new view or update an existing one. * @param $key
* * key parameter to a view. Can be a single value or an array (for a range). If passed an array, function assumes
* @param $design_doc * that first element is startkey, second: endkey.
* @param $view_name * @param $descending
* @param $key * return results in descending order. Please don't forget that if you are using a startkey/endkey, when you change
* key parameter to a view. Can be a single value or an array (for a range). If passed an array, function assumes * order you also need to swap startkey and endkey values!
* that first element is startkey, second: endkey. *
* @param $descending * @return void
* return results in descending order. Please don't forget that if you are using a startkey/endkey, when you change */
* order you also need to swap startkey and endkey values! function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce = null, $startdocid = null) {
* $id = "_design/" . urlencode($design_doc);
* @return void $view_name = urlencode($view_name);
*/ $id .= "/_view/$view_name";
function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false) {  
$id = "_design/" . urlencode($design_doc); $data = array();
$view_name = urlencode($view_name); if (!empty($key)) {
$id .= "/_view/$view_name"; if (is_string($key)) {
  $data = "key=" . '"' . $key . '"';
$data = array(); } elseif (is_array($key)) {
if (!empty($key)) { list($startkey, $endkey) = $key;
if (is_string($key)) { $data = "startkey=" . '"' . $startkey . '"&' . "endkey=" . '"' . $endkey . '"';
$data = "key=" . '"' . $key . '"'; }
}  
elseif (is_array($key)) { if ($descending) {
list($startkey, $endkey) = $key; $data .= "&descending=true";
$data = "startkey=" . '"' . $startkey . '"&' . "endkey=" . '"' . $endkey . '"'; }
} if ($startdocid != null) {
  $data .= "&startkey_docid='$startdocid'";
if ($descending) { }
$data .= "&descending=true"; if ($reduce === true) {
} $data .= "&reduce=true";
if ($limit) { } else if ($reduce === false){
$data .= "&limit=".$limit;  
} $data .= "&reduce=false";
} }
  if ($limit) {
  $data .= "&limit=" . $limit;
  }
if (empty($id)) { }
throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid.");  
}  
   
$full_uri = $this->dbname . "/" . $this->safe_urlencode($id); if (empty($id)) {
$full_uri = str_replace("%253Fgroup%253D","?group=",$full_uri); throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid.");
$full_uri = str_replace("%253Flimit%253D","?limit=",$full_uri); }
$ret = $this->rest_client->http_get($full_uri, $data);  
return $ret['decoded']; $full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
   
} $full_uri = str_replace("%253Fgroup%253D", "?group=", $full_uri);
  $full_uri = str_replace("%253Flimit%253D", "?limit=", $full_uri);
/** $ret = $this->rest_client->http_get($full_uri, $data);
* @param $id //$ret['decoded'] = str_replace("?k","&k",$ret['decoded']);
* @return return $ret['decoded'];
* return a properly url-encoded id. }
*/  
private function safe_urlencode($id) { /**
//-- System views like _design can have "/" in their URLs. * @param $id
$id = rawurlencode($id); * @return
if (substr($id, 0, 1) == '_') { * return a properly url-encoded id.
$id = str_replace('%2F', '/', $id); */
} private function safe_urlencode($id) {
return $id; //-- System views like _design can have "/" in their URLs.
} $id = rawurlencode($id);
  if (substr($id, 0, 1) == '_') {
/** Getter for a database name */ $id = str_replace('%2F', '/', $id);
function get_name() { }
return $this->dbname; return $id;
} }
   
  /** Getter for a database name */
  function get_name() {
  return $this->dbname;
  }
   
} }
   
<?php <?php
   
/** /**
* HTTP REST Client for CouchDB API * HTTP REST Client for CouchDB API
*/ */
class SetteeRestClient { class SetteeRestClient {
/** /**
* HTTP Timeout in Milliseconds * HTTP Timeout in Milliseconds
*/ */
const HTTP_TIMEOUT = 2000; const HTTP_TIMEOUT = 2000;
private $base_url; private $base_url;
private $curl; private $curl;
private static $curl_workers = array(); private static $curl_workers = array();
   
/** /**
* Singleton factory method * Singleton factory method
*/ */
static function get_instance($base_url) { static function get_instance($base_url) {
   
if (empty(self::$curl_workers[$base_url])) { if (empty(self::$curl_workers[$base_url])) {
self::$curl_workers[$base_url] = new SetteeRestClient($base_url); self::$curl_workers[$base_url] = new SetteeRestClient($base_url);
} }
return self::$curl_workers[$base_url]; return self::$curl_workers[$base_url];
} }
/** /**
* Class constructor * Class constructor
*/ */
private function __construct($base_url) { private function __construct($base_url) {
$this->base_url = $base_url; $this->base_url = $base_url;
   
$curl = curl_init(); $curl = curl_init();
curl_setopt($curl, CURLOPT_USERAGENT, "Settee CouchDB Client/1.0"); curl_setopt($curl, CURLOPT_USERAGENT, "Settee CouchDB Client/1.0");
curl_setopt($curl, CURLOPT_HTTPHEADER, array('Content-Type: application/json')); curl_setopt($curl, CURLOPT_HTTPHEADER, array('Content-Type: application/json'));
curl_setopt($curl, CURLOPT_RETURNTRANSFER, 1); curl_setopt($curl, CURLOPT_RETURNTRANSFER, 1);
curl_setopt($curl, CURLOPT_HEADER, 0); curl_setopt($curl, CURLOPT_HEADER, 0);
curl_setopt($curl, CURLOPT_FOLLOWLOCATION, 1); curl_setopt($curl, CURLOPT_FOLLOWLOCATION, 1);
curl_setopt($curl, CURLOPT_TIMEOUT_MS, self::HTTP_TIMEOUT); curl_setopt($curl, CURLOPT_TIMEOUT_MS, self::HTTP_TIMEOUT);
curl_setopt($curl, CURLOPT_FORBID_REUSE, false); // Connection-pool for CURL curl_setopt($curl, CURLOPT_FORBID_REUSE, false); // Connection-pool for CURL
   
$this->curl = $curl; $this->curl = $curl;
} }
   
/** /**
* Class destructor cleans up any resources * Class destructor cleans up any resources
*/ */
function __destruct() { function __destruct() {
curl_close($this->curl); curl_close($this->curl);
} }
   
/** /**
* HTTP HEAD * HTTP HEAD
* *
* @return * @return
* Raw HTTP Headers of the response. * Raw HTTP Headers of the response.
* *
* @see: http://www.php.net/manual/en/context.params.php * @see: http://www.php.net/manual/en/context.params.php
* *
*/ */
function http_head($uri) { function http_head($uri) {
curl_setopt($this->curl, CURLOPT_HEADER, 1); curl_setopt($this->curl, CURLOPT_HEADER, 1);
   
$full_url = $this->get_full_url($uri); $full_url = $this->get_full_url($uri);
curl_setopt($this->curl, CURLOPT_URL, $full_url); curl_setopt($this->curl, CURLOPT_URL, $full_url);
curl_setopt($this->curl, CURLOPT_CUSTOMREQUEST, 'HEAD'); curl_setopt($this->curl, CURLOPT_CUSTOMREQUEST, 'HEAD');
curl_setopt($this->curl, CURLOPT_NOBODY, true); curl_setopt($this->curl, CURLOPT_NOBODY, true);
   
   
$response = curl_exec($this->curl); $response = curl_exec($this->curl);
// Restore default values // Restore default values
curl_setopt($this->curl, CURLOPT_NOBODY, false); curl_setopt($this->curl, CURLOPT_NOBODY, false);
curl_setopt($this->curl, CURLOPT_HEADER, false); curl_setopt($this->curl, CURLOPT_HEADER, false);
$resp_code = curl_getinfo($this->curl, CURLINFO_HTTP_CODE); $resp_code = curl_getinfo($this->curl, CURLINFO_HTTP_CODE);
if ($resp_code == 404 ) { if ($resp_code == 404 ) {
throw new SetteeRestClientException("Couch document not found at: '$full_url'"); throw new SetteeRestClientException("Couch document not found at: '$full_url'");
} }
   
if (function_exists('http_parse_headers')) { if (function_exists('http_parse_headers')) {
$headers = http_parse_headers($response); $headers = http_parse_headers($response);
} }
else { else {
$headers = $this->_http_parse_headers($response); $headers = $this->_http_parse_headers($response);
} }
return $headers; return $headers;
} }
   
/** /**
* Backup PHP impl. for when PECL http_parse_headers() function is not available * Backup PHP impl. for when PECL http_parse_headers() function is not available
* *
* @param $header * @param $header
* @return array * @return array
* @source http://www.php.net/manual/en/function.http-parse-headers.php#77241 * @source http://www.php.net/manual/en/function.http-parse-headers.php#77241
*/ */
private function _http_parse_headers( $header ) { private function _http_parse_headers( $header ) {
$retVal = array(); $retVal = array();
$fields = explode("\r\n", preg_replace('/\x0D\x0A[\x09\x20]+/', ' ', $header)); $fields = explode("\r\n", preg_replace('/\x0D\x0A[\x09\x20]+/', ' ', $header));
foreach( $fields as $field ) { foreach( $fields as $field ) {
if( preg_match('/([^:]+): (.+)/m', $field, $match) ) { if( preg_match('/([^:]+): (.+)/m', $field, $match) ) {
$match[1] = preg_replace('/(?<=^|[\x09\x20\x2D])./e', 'strtoupper("\0")', strtolower(trim($match[1]))); $match[1] = preg_replace('/(?<=^|[\x09\x20\x2D])./e', 'strtoupper("\0")', strtolower(trim($match[1])));
if( isset($retVal[$match[1]]) ) { if( isset($retVal[$match[1]]) ) {
$retVal[$match[1]] = array($retVal[$match[1]], $match[2]); $retVal[$match[1]] = array($retVal[$match[1]], $match[2]);
} else { } else {
$retVal[$match[1]] = trim($match[2]); $retVal[$match[1]] = trim($match[2]);
} }
} }
} }
return $retVal; return $retVal;
} }
   
/** /**
* HTTP GET * HTTP GET
*/ */
function http_get($uri, $data = array()) { function http_get($uri, $data = array()) {
$data = (is_array($data)) ? http_build_query($data) : $data; $data = (is_array($data)) ? http_build_query($data) : $data;
if (!empty($data)) { if (!empty($data)) {
$uri .= "?$data"; $uri .= "?$data";
} }
return $this->http_request('GET', $uri); return $this->http_request('GET', $uri);
} }
/** /**
* HTTP PUT * HTTP PUT
*/ */
function http_put($uri, $data = array()) { function http_put($uri, $data = array()) {
return $this->http_request('PUT', $uri, $data); return $this->http_request('PUT', $uri, $data);
} }
   
/** /**
* HTTP DELETE * HTTP DELETE
*/ */
function http_delete($uri, $data = array()) { function http_delete($uri, $data = array()) {
return $this->http_request('DELETE', $uri, $data); return $this->http_request('DELETE', $uri, $data);
} }
   
/** /**
* Generic implementation of a HTTP Request. * Generic implementation of a HTTP Request.
* *
* @param $http_method * @param $http_method
* @param $uri * @param $uri
* @param array $data * @param array $data
* @return * @return
* an array containing json and decoded versions of the response. * an array containing json and decoded versions of the response.
*/ */
private function http_request($http_method, $uri, $data = array()) { private function http_request($http_method, $uri, $data = array()) {
$data = (is_array($data)) ? http_build_query($data) : $data; $data = (is_array($data)) ? http_build_query($data) : $data;
   
if (!empty($data)) { if (!empty($data)) {
curl_setopt($this->curl, CURLOPT_HTTPHEADER, array('Content-Length: ' . strlen($data))); curl_setopt($this->curl, CURLOPT_HTTPHEADER, array('Content-Length: ' . strlen($data)));
curl_setopt($this->curl, CURLOPT_POSTFIELDS, $data); curl_setopt($this->curl, CURLOPT_POSTFIELDS, $data);
} }
   
curl_setopt($this->curl, CURLOPT_URL, $this->get_full_url($uri)); curl_setopt($this->curl, CURLOPT_URL, $this->get_full_url($uri));
curl_setopt($this->curl, CURLOPT_CUSTOMREQUEST, $http_method); curl_setopt($this->curl, CURLOPT_CUSTOMREQUEST, $http_method);
   
$response = curl_exec($this->curl); $response = curl_exec($this->curl);
$response_decoded = $this->decode_response($response); $response_decoded = $this->decode_response($response);
$response = array('json' => $response, 'decoded'=>$response_decoded); $response = array('json' => $response, 'decoded'=>$response_decoded);
   
$this->check_status($response,$uri); $this->check_status($response,$uri);
   
return $response; return $response;
} }
/** /**
* Check http status for safe return codes * Check http status for safe return codes
* *
* @throws SetteeRestClientException * @throws SetteeRestClientException
*/ */
private function check_status($response,$uri) { private function check_status($response,$uri) {
$resp_code = curl_getinfo($this->curl, CURLINFO_HTTP_CODE); $resp_code = curl_getinfo($this->curl, CURLINFO_HTTP_CODE);
   
if ($resp_code < 199 || $resp_code > 399 || !empty($response['decoded']->error)) { if ($resp_code < 199 || $resp_code > 399 || !empty($response['decoded']->error)) {
$msg = "CouchDB returned: \"HTTP 1.1. $resp_code\". ERROR: " . $response['json'] . $uri; $msg = "CouchDB returned: \"HTTP 1.1. $resp_code\". ERROR: " . $response['json'] . $uri;
throw new SetteeRestClientException($msg); throw new SetteeRestClientException($msg);
} }
} }
   
/** /**
* @param $path * @param $path
* Full path to a file (e.g. as returned by PHP's realpath function). * Full path to a file (e.g. as returned by PHP's realpath function).
* @return void * @return void
*/ */
public function file_mime_type ($path) { public function file_mime_type ($path) {
$ftype = 'application/octet-stream'; $ftype = 'application/octet-stream';
if (function_exists("finfo_file")) { if (function_exists("finfo_file")) {
$finfo = new finfo(FILEINFO_MIME_TYPE | FILEINFO_SYMLINK); $finfo = new finfo(FILEINFO_MIME_TYPE | FILEINFO_SYMLINK);
$fres = $finfo->file($path); $fres = $finfo->file($path);
if (is_string($fres) && !empty($fres)) { if (is_string($fres) && !empty($fres)) {
$ftype = $fres; $ftype = $fres;
} }
} }
   
return $ftype; return $ftype;
} }
   
/** /**
* @param $content * @param $content
* content of a file in a string buffer format. * content of a file in a string buffer format.
* @return void * @return void
*/ */
public function content_mime_type ($content) { public function content_mime_type ($content) {
$ftype = 'application/octet-stream'; $ftype = 'application/octet-stream';
   
if (function_exists("finfo_file")) { if (function_exists("finfo_file")) {
$finfo = new finfo(FILEINFO_MIME_TYPE | FILEINFO_SYMLINK); $finfo = new finfo(FILEINFO_MIME_TYPE | FILEINFO_SYMLINK);
$fres = $finfo->buffer($content); $fres = $finfo->buffer($content);
if (is_string($fres) && !empty($fres)) { if (is_string($fres) && !empty($fres)) {
$ftype = $fres; $ftype = $fres;
} }
} }
   
return $ftype; return $ftype;
} }
   
/** /**
* *
* @param $json * @param $json
* json-encoded response from CouchDB * json-encoded response from CouchDB
* *
* @return * @return
* decoded PHP object * decoded PHP object
*/ */
private function decode_response($json) { private function decode_response($json) {
return json_decode($json); return json_decode($json);
} }
   
/** /**
* Get full URL from a partial one * Get full URL from a partial one
*/ */
private function get_full_url($uri) { private function get_full_url($uri) {
// We do not want "/", "?", "&" and "=" separators to be encoded!!! // We do not want "/", "?", "&" and "=" separators to be encoded!!!
$uri = str_replace(array('%2F', '%3F', '%3D', '%26'), array('/', '?', '=', '&'), urlencode($uri)); $uri = str_replace(array('%2F', '%3F', '%3D', '%26'), array('/', '?', '=', '&'), urlencode($uri));
return $this->base_url . '/' . $uri; return $this->base_url . '/' . $uri;
} }
} }
   
class SetteeRestClientException extends Exception {} class SetteeRestClientException extends Exception {}
   
<?php <?php
   
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("About");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
?> ?>
<h1>About</h1> <h1>About</h1>
<?php <?php
include_footer_documents(); include_footer_documents();
?> ?>
   
  <?php
  include('template.inc.php');
  include_once('../include/common.inc.php');
  $agenciesdb = $server->get_db('disclosr-agencies');
 
  $idtoname = Array();
  foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
  $idtoname[$row->id] = trim($row->value->name);
  }
  $foidocsdb = $server->get_db('disclosr-foidocuments');
 
  include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency'));
  $endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
  ?>
  <div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
  <a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
  <?php
  try {
  if ($_REQUEST['id']) {
  $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
  foreach ($rows as $row) {
  //print_r($rows);
  echo displayLogEntry($row, $idtoname);
  if (!isset($startkey))
  $startkey = $row->key;
  $endkey = $row->key;
  }
  } else {
  $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
  if ($rows) {
  foreach ($rows as $row) {
  echo '<a href="agency.php?id=' . $row->key . '">' . $idtoname[$row->key] . " (" . $row->value . " records)</a> <br>\n";
  }
  }
  }
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
  echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
  include_footer_documents();
  ?>
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("Charts");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
   
?> ?>
<div class="foundation-header"> <div class="foundation-header">
<h1><a href="about.php">Charts</a></h1> <h1><a href="about.php">Charts</a></h1>
<h4 class="subheader">Lorem ipsum.</h4> <h4 class="subheader">Lorem ipsum.</h4>
</div> </div>
<div id="employees" style="width:1000px;height:900px;"></div> <div id="bydate" style="width:1000px;height:300px;"></div>
  <div id="byagency" style="width:1200px;height:300px;"></div>
<script id="source"> <script id="source">
window.onload = function() { window.onload = function() {
$(document).ready(function() { $(document).ready(function() {
var var
d1 = [], d1 = [],
start = new Date("2009/01/01 01:00").getTime(), options1,
options, o1;
graph,  
i, x, o;  
   
<?php <?php
try { try {
$rows = $foidocsdb->get_view("app", "byDate?group=true", null, true)->rows; $rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true",null, false,false,true)->rows;
   
   
$dataValues = Array(); $dataValues = Array();
foreach ($rows as $row) { foreach ($rows as $row) {
$dataValues[$row->value] = $row->key; $dataValues[$row->key] = $row->value;
} }
$i = 0; $i = 0;
ksort($dataValues); ksort($dataValues);
foreach ($dataValues as $value => $key) { foreach ($dataValues as $key => $value) {
$date = date_create_from_format('Y-m-d', $key); $date = date_create_from_format('Y-m-d', $key);
if (date_format($date, 'U') != "") { if (date_format($date, 'U') != "") {
echo " d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL; echo " d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL;
// echo " emplabels.push('$key');" . PHP_EOL; // echo " emplabels.push('$key');" . PHP_EOL;
$i++; $i++;
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
?> ?>
   
   
options = { options1 = {
xaxis : { xaxis : {
mode : 'time', mode : 'time',
labelsAngle : 45 labelsAngle : 45
}, },
selection : { selection : {
mode : 'x' mode : 'x'
}, },
HtmlText : false, HtmlText : false,
title : 'Time' title : 'Time'
}; };
// Draw graph with default options, overwriting with passed options // Draw graph with default options, overwriting with passed options
function drawGraph (opts) { function drawGraph (opts) {
   
// Clone the options, so the 'options' variable always keeps intact. // Clone the options, so the 'options' variable always keeps intact.
o = Flotr._.extend(Flotr._.clone(options), opts || {}); o1 = Flotr._.extend(Flotr._.clone(options1), opts || {});
   
// Return a new graph. // Return a new graph.
return Flotr.draw( return Flotr.draw(
document.getElementById("employees"), document.getElementById("bydate"),
[ d1 ], [ d1 ],
o o1
); );
} }
   
graph = drawGraph(); graph = drawGraph();
Flotr.EventAdapter.observe(container, 'flotr:select', function(area){ Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:select', function(area){
// Draw selected area // Draw selected area
graph = drawGraph({ graph = drawGraph({
xaxis : { min : area.x1, max : area.x2, mode : 'time', labelsAngle : 45 }, xaxis : { min : area.x1, max : area.x2, mode : 'time', labelsAngle : 45 },
yaxis : { min : area.y1, max : area.y2 } yaxis : { min : area.y1, max : area.y2 }
}); });
}); });
// When graph is clicked, draw the graph with default area. // When graph is clicked, draw the graph with default area.
Flotr.EventAdapter.observe(container, 'flotr:click', function () { graph = drawGraph(); }); Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:click', function () { graph = drawGraph(); });
   
}); });
}; };
   
  var d2 = [];
  var agencylabels = [];
  function agencytrackformatter(obj) {
   
  return agencylabels[Math.floor(obj.x)] +" = "+obj.y;
   
  }
  function agencytickformatter(val, axis) {
  if (agencylabels[Math.floor(val)]) {
  return '<p style="margin-top:8em;-webkit-transform:rotate(-90deg);">'+(agencylabels[Math.floor(val)])+"</b>";
   
  } else {
  return "";
  }
  }
  <?php
  try {
  $rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows;
   
   
  $dataValues = Array();
  $i = 0;
  foreach ($rows as $row) {
  echo " d2.push([".$i.", $row->value]);" . PHP_EOL;
  echo " agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL;
   
  $i++;
  }
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
  ?>
  // Draw the graph
  Flotr.draw(
  document.getElementById("byagency"),
  [d2],
  {
  bars : {
  show : true,
  horizontal : false,
  shadowSize : 0,
  barWidth : 0.5
  },
  mouse : {
  track : true,
  relative : true,
  trackFormatter: agencytrackformatter
  },
  yaxis : {
  min : 0,
  autoscaleMargin : 1
  },
  xaxis: {
  minorTickFreq: 1,
  noTicks: agencylabels.length,
  showMinorLabels: true,
  tickFormatter: agencytickformatter
  },
  legend: {
  show: false
  }
  }
  );
</script> </script>
   
<?php <?php
include_footer_documents(); include_footer_documents();
?> ?>
   
   
  <?php
 
  include('template.inc.php');
  include_header_documents("Entries by Date");
  include_once('../include/common.inc.php');
  $endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
  ?>
  <div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
  <a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
  <?php
  /*$agenciesdb = $server->get_db('disclosr-agencies');
 
  $idtoname = Array();
  foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
  $idtoname[$row->id] = trim($row->value->name);
  }
  $foidocsdb = $server->get_db('disclosr-foidocuments');
  try {
  $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
  if ($rows) {
  foreach ($rows as $key => $row) {
  echo displayLogEntry($row, $idtoname);
  if (!isset($startkey)) $startkey = $row->key;
  $endkey = $row->key;
  }
  }
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
  echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
  */
  include_footer_documents();
  ?>
 
<?php <?php
   
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("List of Disclosure Logs");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
   
echo "<table> echo "<table>
<tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>"; <tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>";
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
$docsdb = $server->get_db('disclosr-documents'); $docsdb = $server->get_db('disclosr-documents');
$agencies = 0; $agencies = 0;
$disclogs = 0; $disclogs = 0;
$red = 0; $red = 0;
$green = 0; $green = 0;
$yellow = 0; $yellow = 0;
$orange = 0; $orange = 0;
try { try {
$rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows; $rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows;
   
   
if ($rows) { if ($rows) {
foreach ($rows as $row) { foreach ($rows as $row) {
if ((!isset($row->value->status) || $row->value->status != "suspended") && isset($row->value->foiEmail)) { if ((!isset($row->value->status) || $row->value->status != "suspended") && isset($row->value->foiEmail)) {
echo "<tr><td>"; echo "<tr><td>";
if (isset($row->value->website)) echo "<a href='" . $row->value->website . "'>"; if (isset($row->value->website)) echo "<a href='" . $row->value->website . "'>";
echo "<b>" . $row->value->name . "</b>"; echo "<b>" . $row->value->name . "</b>";
if (isset($row->value->website)) echo "</a>"; if (isset($row->value->website)) echo "</a>";
if ($ENV == "DEV") if ($ENV == "DEV")
echo "<br>(" . $row->id . ")"; echo "<br>(" . $row->id . ")";
echo "</td>\n"; echo "</td>\n";
$agencies++; $agencies++;
   
echo "<td>"; echo "<td>";
if (isset($row->value->FOIDocumentsURL)) { if (isset($row->value->FOIDocumentsURL)) {
$disclogs++; $disclogs++;
echo '<a href="' . $row->value->FOIDocumentsURL . '">' echo '<a href="' . $row->value->FOIDocumentsURL . '">'
. $row->value->FOIDocumentsURL . '</a>'; . $row->value->FOIDocumentsURL . '</a>';
if ($ENV == "DEV") if ($ENV == "DEV")
echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">' echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">'
. 'view local copy</a>)</small>'; . 'view local copy</a>)</small>';
} else { } else {
echo "<font color='red'><abbr title='No'>✘</abbr></font>"; echo "<font color='red'><abbr title='No'>✘</abbr></font>";
} }
echo "</td>\n<td>"; echo "</td>\n<td>";
if (isset($row->value->FOIDocumentsURL)) { if (isset($row->value->FOIDocumentsURL)) {
if (file_exists("./scrapers/" . $row->id . '.py')) { if (file_exists("./scrapers/" . $row->id . '.py')) {
echo "<font color='green'><abbr title='Yes'>✔</abbr></font>"; echo "<font color='green'><abbr title='Yes'>✔</abbr></font>";
$green++; $green++;
} else if (file_exists("./scrapers/" . $row->id . '.txt')) { } else if (file_exists("./scrapers/" . $row->id . '.txt')) {
if (trim(file_get_contents("./scrapers/" . $row->id . '.txt')) == "no disclog") { if (trim(file_get_contents("./scrapers/" . $row->id . '.txt')) == "no disclog") {
echo "<font color='yellow'><abbr title='No log table exists at URL to scrape'><b>◎</b></abbr></font>"; echo "<font color='yellow'><abbr title='No log table exists at URL to scrape'><b>◎</b></abbr></font>";
$yellow++; $yellow++;
} else { } else {
echo file_get_contents("./scrapers/" . $row->id . '.txt'); echo file_get_contents("./scrapers/" . $row->id . '.txt');
echo "<font color='orange'><abbr title='Work in progress'><b>▬</b></abbr></font>"; echo "<font color='orange'><abbr title='Work in progress'><b>▬</b></abbr></font>";
$orange++; $orange++;
} }
} else { } else {
echo "<font color='red'><abbr title='No'>✘</abbr></font>"; echo "<font color='red'><abbr title='No'>✘</abbr></font>";
$red++; $red++;
} }
} }
echo "</td></tr>\n"; echo "</td></tr>\n";
} }
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "</table>"; echo "</table>";
echo $agencies . " agencies, " . round(($disclogs / $agencies) * 100) . "% with disclosure logs; " echo $agencies . " agencies, " . round(($disclogs / $agencies) * 100) . "% with disclosure logs; "
. round(($green / $disclogs) * 100) . "% logs with scrapers " . round(($red / $disclogs) * 100) . "% logs without scrapers " . round(($orange / $disclogs) * 100) . "% logs Work-In-Progress scrapers "; . round(($green / $disclogs) * 100) . "% logs with scrapers " . round(($red / $disclogs) * 100) . "% logs without scrapers " . round(($orange / $disclogs) * 100) . "% logs Work-In-Progress scrapers ";
   
include_footer_documents(); include_footer_documents();
?> ?>
   
import sys import sys
import os import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from time import mktime from time import mktime
import feedparser import feedparser
import abc import abc
import unicodedata import unicodedata
import re import re
import dateutil import dateutil
from dateutil.parser import * from dateutil.parser import *
from datetime import * from datetime import *
import codecs import codecs
   
  import difflib
   
from StringIO import StringIO from StringIO import StringIO
   
from pdfminer.pdfparser import PDFDocument, PDFParser from pdfminer.pdfparser import PDFDocument, PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
from pdfminer.pdfdevice import PDFDevice, TagExtractor from pdfminer.pdfdevice import PDFDevice, TagExtractor
from pdfminer.converter import TextConverter from pdfminer.converter import TextConverter
from pdfminer.cmapdb import CMapDB from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams from pdfminer.layout import LAParams
   
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
   
def remove_control_chars(self, input): def remove_control_chars(self, input):
return "".join([i for i in input if ord(i) in range(32, 127)]) return "".join([i for i in input if ord(i) in range(32, 127)])
   
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
if self.agencyID is None: if self.agencyID is None:
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
return self.agencyID return self.agencyID
   
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
if self.disclogURL is None: if self.disclogURL is None:
agency = scrape.agencydb.get(self.getAgencyID()) agency = scrape.agencydb.get(self.getAgencyID())
self.disclogURL = agency['FOIDocumentsURL'] self.disclogURL = agency['FOIDocumentsURL']
return self.disclogURL return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def doScrape(self): def doScrape(self):
""" do the scraping """ """ do the scraping """
return return
   
  class GenericHTMLDisclogScraper(GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
  content = rcontent
  dochash = scrape.mkhash(content)
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
  last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
  if last_attach != None:
  html_diff = difflib.HtmlDiff()
  description = description + "\nChanges: "
  description = description + html_diff.make_table(last_attach.read().split('\n'),
  content.split('\n'))
  edate = date.today().strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "date": edate, "title": "Disclosure Log Updated", "description": description}
  foidocsdb.save(doc)
  else:
  print "already saved"
   
class GenericPDFDisclogScraper(GenericDisclogScraper): class GenericPDFDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
laparams = LAParams() laparams = LAParams()
rsrcmgr = PDFResourceManager(caching=True) rsrcmgr = PDFResourceManager(caching=True)
outfp = StringIO() outfp = StringIO()
device = TextConverter(rsrcmgr, outfp, codec='utf-8', device = TextConverter(rsrcmgr, outfp, codec='utf-8',
laparams=laparams) laparams=laparams)
fp = StringIO() fp = StringIO()
fp.write(content.read()) fp.write(content)
   
process_pdf(rsrcmgr, device, fp, set(), caching=True, process_pdf(rsrcmgr, device, fp, set(), caching=True,
check_extractable=True) check_extractable=True)
description = outfp.getvalue() description = outfp.getvalue()
fp.close() fp.close()
device.close() device.close()
outfp.close() outfp.close()
dochash = scrape.mkhash(description) dochash = scrape.mkhash(description)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": description} "date": edate, "title": "Disclosure Log Updated", "description": description}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericDOCXDisclogScraper(GenericDisclogScraper): class GenericDOCXDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
, self.getURL(), "foidocuments", self.getAgencyID()) , self.getURL(), "foidocuments", self.getAgencyID())
mydoc = zipfile.ZipFile(file) mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml') xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent) document = etree.fromstring(xmlcontent)
## Fetch all the text out of the document we just created ## Fetch all the text out of the document we just created
paratextlist = getdocumenttext(document) paratextlist = getdocumenttext(document)
# Make explicit unicode version # Make explicit unicode version
newparatextlist = [] newparatextlist = []
for paratext in paratextlist: for paratext in paratextlist:
newparatextlist.append(paratext.encode("utf-8")) newparatextlist.append(paratext.encode("utf-8"))
## Print our documnts test with two newlines under each paragraph ## Print our documnts test with two newlines under each paragraph
description = '\n\n'.join(newparatextlist).strip(' \t\n\r') description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
dochash = scrape.mkhash(description) dochash = scrape.mkhash(description)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
   
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = time().strftime("%Y-%m-%d") edate = time().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": description} "date": edate, "title": "Disclosure Log Updated", "description": description}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericRSSDisclogScraper(GenericDisclogScraper): class GenericRSSDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
feed = feedparser.parse(content) feed = feedparser.parse(content)
for entry in feed.entries: for entry in feed.entries:
#print entry #print entry
print entry.id print entry.id
dochash = scrape.mkhash(entry.id) dochash = scrape.mkhash(entry.id)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
#print doc #print doc
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = datetime.fromtimestamp( edate = datetime.fromtimestamp(
mktime(entry.published_parsed)).strftime("%Y-%m-%d") mktime(entry.published_parsed)).strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID(), doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
'url': entry.link, 'docID': entry.id, 'url': entry.link, 'docID': entry.id,
"date": edate, "title": entry.title} "date": edate, "title": entry.title}
self.getDescription(entry, entry, doc) self.getDescription(entry, entry, doc)
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
doc.update({'description': content.summary}) doc.update({'description': content.summary})
return return
   
   
class GenericOAICDisclogScraper(GenericDisclogScraper): class GenericOAICDisclogScraper(GenericDisclogScraper):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
   
@abc.abstractmethod @abc.abstractmethod
def getColumns(self, columns): def getColumns(self, columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
   
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
   
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
descriptiontxt = "" descriptiontxt = ""
for string in content.stripped_strings: for string in content.stripped_strings:
descriptiontxt = descriptiontxt + " \n" + string descriptiontxt = descriptiontxt + " \n" + string
doc.update({'description': descriptiontxt}) doc.update({'description': descriptiontxt})
   
def getTitle(self, content, entry, doc): def getTitle(self, content, entry, doc):
doc.update({'title': (''.join(content.stripped_strings))}) doc.update({'title': (''.join(content.stripped_strings))})
   
def getTable(self, soup): def getTable(self, soup):
return soup.table return soup.table
   
def getRows(self, table): def getRows(self, table):
return table.find_all('tr') return table.find_all('tr')
   
def getDate(self, content, entry, doc): def getDate(self, content, entry, doc):
date = ''.join(content.stripped_strings).strip() date = ''.join(content.stripped_strings).strip()
(a, b, c) = date.partition("(") (a, b, c) = date.partition("(")
date = self.remove_control_chars(a.replace("Octber", "October")) date = self.remove_control_chars(a.replace("Octber", "October"))
print date print date
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
print edate print edate
doc.update({'date': edate}) doc.update({'date': edate})
return return
   
def getLinks(self, content, entry, doc): def getLinks(self, content, entry, doc):
links = [] links = []
for atag in entry.find_all("a"): for atag in entry.find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(content, atag['href'])) links.append(scrape.fullurl(content, atag['href']))
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
return return
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
if content is not None: if content is not None:
if mime_type is "text/html"\ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
or mime_type is "application/xhtml+xml"\  
or mime_type is"application/xml":  
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
  print "parsing"
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
table = self.getTable(soup) table = self.getTable(soup)
for row in self.getRows(table): for row in self.getRows(table):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) is self.getColumnCount(): if len(columns) is self.getColumnCount():
(id, date, title, (id, date, title,
description, notes) = self.getColumns(columns) description, notes) = self.getColumns(columns)
print self.remove_control_chars( print self.remove_control_chars(
''.join(id.stripped_strings)) ''.join(id.stripped_strings))
if id.string is None: if id.string is None:
dochash = scrape.mkhash( dochash = scrape.mkhash(
self.remove_control_chars( self.remove_control_chars(
url + (''.join(date.stripped_strings)))) url + (''.join(date.stripped_strings))))
else: else:
dochash = scrape.mkhash( dochash = scrape.mkhash(
self.remove_control_chars( self.remove_control_chars(
url + (''.join(id.stripped_strings)))) url + (''.join(id.stripped_strings))))
doc = foidocsdb.get(hash) doc = foidocsdb.get(dochash)
   
if doc is None: if doc is None:
print "saving " + hash print "saving " + dochash
doc = {'_id': hash, doc = {'_id': dochash,
'agencyID': self.getAgencyID(), 'agencyID': self.getAgencyID(),
'url': self.getURL(), 'url': self.getURL(),
'docID': (''.join(id.stripped_strings))} 'docID': (''.join(id.stripped_strings))}
self.getLinks(self.getURL(), row, doc) self.getLinks(self.getURL(), row, doc)
self.getTitle(title, row, doc) self.getTitle(title, row, doc)
self.getDate(date, row, doc) self.getDate(date, row, doc)
self.getDescription(description, row, doc) self.getDescription(description, row, doc)
if notes is not None: if notes is not None:
doc.update({ 'notes': ( doc.update({ 'notes': (
''.join(notes.stripped_strings))}) ''.join(notes.stripped_strings))})
badtitles = ['-','Summary of FOI Request' badtitles = ['-','Summary of FOI Request'
, 'FOI request(in summary form)' , 'FOI request(in summary form)'
, 'Summary of FOI request received by the ASC', , 'Summary of FOI request received by the ASC',
'Summary of FOI request received by agency/minister', 'Summary of FOI request received by agency/minister',
'Description of Documents Requested','FOI request', 'Description of Documents Requested','FOI request',
'Description of FOI Request','Summary of request','Description','Summary', 'Description of FOI Request','Summary of request','Description','Summary',
'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67'] 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67']
if doc['title'] not in badtitles\ if doc['title'] not in badtitles\
and doc['description'] != '': and doc['description'] != '':
print "saving" print "saving"
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved " + dochash print "already saved " + dochash
   
elif len(row.find_all('th')) is self.getColumnCount(): elif len(row.find_all('th')) is self.getColumnCount():
print "header row" print "header row"
   
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row
   
 Binary files /dev/null and b/documents/img/feed-icon-14x14.png differ
<?php <?php
   
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99'); $endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
  $enddocid = (isset($_REQUEST['end_docid']) ? $_REQUEST['end_docid'] : null);
?> ?>
  <div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
  <a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
<?php <?php
   
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
try { try {
$rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows; $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20,null, $enddocid)->rows;
if ($rows) { if ($rows) {
foreach ($rows as $key => $row) { foreach ($rows as $key => $row) {
echo displayLogEntry($row, $idtoname); echo displayLogEntry($row, $idtoname);
  if (!isset($startkey))
  $startkey = $row->key;
$endkey = $row->key; $endkey = $row->key;
  $enddocid = $row->value->_id;
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "<a href='?start_key=$endkey'>next page</a>"; echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey&amp;end_docid=$enddocid' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
include_footer_documents(); include_footer_documents();
?> ?>
   
<?php <?php
   
// Agency X updated Y, new files, diff of plain text/link text, // Agency X updated Y, new files, diff of plain text/link text,
// feed for just one agency or all // feed for just one agency or all
// This is a minimum example of using the Universal Feed Generator Class // This is a minimum example of using the Universal Feed Generator Class
include("../lib/FeedWriter/FeedTypes.php"); include("../lib/FeedWriter/FeedTypes.php");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
//Creating an instance of FeedWriter class. //Creating an instance of FeedWriter class.
$TestFeed = new RSS2FeedWriter(); $TestFeed = new RSS2FeedWriter();
//Setting the channel elements //Setting the channel elements
//Use wrapper functions for common channelelements ////Retriving informations from database
$TestFeed->setTitle('disclosurelo.gs Newest Entries - All');  
$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php');  
$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies');  
$TestFeed->setChannelElement('language', 'en-us');  
$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));  
   
//Retriving informations from database  
$idtoname = Array(); $idtoname = Array();
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows; if (isset($_REQUEST['id'])) {
  $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
  $title = $idtoname[$_REQUEST['id']];
  } else {
  $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
  $title = 'All Agencies';
  }
  //Use wrapper functions for common channelelements
  $TestFeed->setTitle('disclosurelo.gs Newest Entries - '.$title);
  $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'.(isset($_REQUEST['id'])? '?id='.$_REQUEST['id'] : ''));
  $TestFeed->setDescription('disclosurelo.gs Newest Entries - '.$title);
  $TestFeed->setChannelElement('language', 'en-us');
  $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
   
   
//print_r($rows); //print_r($rows);
foreach ($rows as $row) { foreach ($rows as $row) {
//Create an empty FeedItem //Create an empty FeedItem
$newItem = $TestFeed->createNewItem(); $newItem = $TestFeed->createNewItem();
//Add elements to the feed item //Add elements to the feed item
$newItem->setTitle($row->value->title); $newItem->setTitle($row->value->title);
$newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id); $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id);
$newItem->setDate(strtotime($row->value->date)); $newItem->setDate(strtotime($row->value->date));
$newItem->setDescription(displayLogEntry($row, $idtoname)); $newItem->setDescription(displayLogEntry($row, $idtoname));
$newItem->setAuthor($idtoname[$row->value->agencyID]); $newItem->setAuthor($idtoname[$row->value->agencyID]);
$newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true')); $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true'));
//Now add the feed item //Now add the feed item
$TestFeed->addItem($newItem); $TestFeed->addItem($newItem);
} }
//OK. Everything is done. Now genarate the feed. //OK. Everything is done. Now genarate the feed.
$TestFeed->generateFeed(); $TestFeed->generateFeed();
?> ?>
   
for f in scrapers/*.py; do echo "Processing $f file.."; python $f; done for f in scrapers/*.py;
  do echo "Processing $f file..";
  python $f;
  if [ "$?" -ne "0" ]; then
  echo "error";
  sleep 2;
  fi
  done
   
   
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
import mimetypes import mimetypes
import urllib import urllib
import urlparse import urlparse
   
def mkhash(input): def mkhash(input):
return hashlib.md5(input).hexdigest().encode("utf-8") return hashlib.md5(input).hexdigest().encode("utf-8")
   
def canonurl(url): def canonurl(url):
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
if the URL looks invalid. if the URL looks invalid.
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
'http://xn--hgi.ws/' 'http://xn--hgi.ws/'
""" """
# strip spaces at the ends and ensure it's prefixed with 'scheme://' # strip spaces at the ends and ensure it's prefixed with 'scheme://'
url = url.strip() url = url.strip()
if not url: if not url:
return '' return ''
if not urlparse.urlsplit(url).scheme: if not urlparse.urlsplit(url).scheme:
url = 'http://' + url url = 'http://' + url
   
# turn it into Unicode # turn it into Unicode
#try: #try:
# url = unicode(url, 'utf-8') # url = unicode(url, 'utf-8')
#except UnicodeDecodeError: #except UnicodeDecodeError:
# return '' # bad UTF-8 chars in URL # return '' # bad UTF-8 chars in URL
   
# parse the URL into its components # parse the URL into its components
parsed = urlparse.urlsplit(url) parsed = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = parsed scheme, netloc, path, query, fragment = parsed
   
# ensure scheme is a letter followed by letters, digits, and '+-.' chars # ensure scheme is a letter followed by letters, digits, and '+-.' chars
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
return '' return ''
scheme = str(scheme) scheme = str(scheme)
   
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
if not match: if not match:
return '' return ''
domain, port = match.groups() domain, port = match.groups()
netloc = domain + (port if port else '') netloc = domain + (port if port else '')
netloc = netloc.encode('idna') netloc = netloc.encode('idna')
   
# ensure path is valid and convert Unicode chars to %-encoded # ensure path is valid and convert Unicode chars to %-encoded
if not path: if not path:
path = '/' # eg: 'http://google.com' -> 'http://google.com/' path = '/' # eg: 'http://google.com' -> 'http://google.com/'
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
   
# ensure query is valid # ensure query is valid
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
   
# ensure fragment is valid # ensure fragment is valid
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
   
# piece it all back together, truncating it to a maximum of 4KB # piece it all back together, truncating it to a maximum of 4KB
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
return url[:4096] return url[:4096]
   
def fullurl(url,href): def fullurl(url,href):
href = href.replace(" ","%20") href = href.replace(" ","%20")
href = re.sub('#.*$','',href) href = re.sub('#.*$','',href)
return urljoin(url,href) return urljoin(url,href)
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
  def getLastAttachment(docsdb,url):
  hash = mkhash(url)
  doc = docsdb.get(hash)
  if doc != None:
  last_attachment_fname = doc["_attachments"].keys()[-1]
  last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
  return last_attachment
  else:
  return None
   
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
url = canonurl(url) url = canonurl(url)
hash = mkhash(url) hash = mkhash(url)
req = urllib2.Request(url) req = urllib2.Request(url)
print "Fetching %s (%s)" % (url,hash) print "Fetching %s (%s)" % (url,hash)
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
print "Not a valid HTTP url" print "Not a valid HTTP url"
return (None,None,None) return (None,None,None)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc == None: if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
else: else:
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
print "Uh oh, trying to scrape URL again too soon!"+hash print "Uh oh, trying to scrape URL again too soon!"+hash
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
content = last_attachment content = last_attachment
return (doc['url'],doc['mime_type'],content) return (doc['url'],doc['mime_type'],content.read())
if scrape_again == False: if scrape_again == False:
print "Not scraping this URL again as requested" print "Not scraping this URL again as requested"
return (None,None,None) return (doc['url'],doc['mime_type'],content.read())
   
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if doc.has_key('etag'): if doc.has_key('etag'):
req.add_header("If-None-Match", doc['etag']) req.add_header("If-None-Match", doc['etag'])
if doc.has_key('last_modified'): if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", doc['last_modified']) req.add_header("If-Modified-Since", doc['last_modified'])
   
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
try: try:
url_handle = opener.open(req) url_handle = opener.open(req)
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
doc['etag'] = headers.getheader("ETag") doc['etag'] = headers.getheader("ETag")
doc['last_modified'] = headers.getheader("Last-Modified") doc['last_modified'] = headers.getheader("Last-Modified")
doc['date'] = headers.getheader("Date") doc['date'] = headers.getheader("Date")
doc['page_scraped'] = time.time() doc['page_scraped'] = time.time()
doc['web_server'] = headers.getheader("Server") doc['web_server'] = headers.getheader("Server")
doc['via'] = headers.getheader("Via") doc['via'] = headers.getheader("Via")
doc['powered_by'] = headers.getheader("X-Powered-By") doc['powered_by'] = headers.getheader("X-Powered-By")
doc['file_size'] = headers.getheader("Content-Length") doc['file_size'] = headers.getheader("Content-Length")
content_type = headers.getheader("Content-Type") content_type = headers.getheader("Content-Type")
if content_type != None: if content_type != None:
doc['mime_type'] = content_type.split(";")[0] doc['mime_type'] = content_type.split(";")[0]
else: else:
(type,encoding) = mimetypes.guess_type(url) (type,encoding) = mimetypes.guess_type(url)
doc['mime_type'] = type doc['mime_type'] = type
if hasattr(url_handle, 'code'): if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified"+hash print "the web page has not been modified"+hash
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
content = last_attachment content = last_attachment
return (doc['url'],doc['mime_type'],content) return (doc['url'],doc['mime_type'],content.read())
else: else:
print "new webpage loaded" print "new webpage loaded"
content = url_handle.read() content = url_handle.read()
docsdb.save(doc) docsdb.save(doc)
doc = docsdb.get(hash) # need to get a _rev doc = docsdb.get(hash) # need to get a _rev
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
return (doc['url'], doc['mime_type'], content) return (doc['url'], doc['mime_type'], content)
#store as attachment epoch-filename #store as attachment epoch-filename
   
except urllib2.URLError as e: except urllib2.URLError as e:
print "error!" print "error!"
error = "" error = ""
if hasattr(e, 'reason'): if hasattr(e, 'reason'):
error = "error %s in downloading %s" % (str(e.reason), url) error = "error %s in downloading %s" % (str(e.reason), url)
elif hasattr(e, 'code'): elif hasattr(e, 'code'):
error = "error %s in downloading %s" % (e.code, url) error = "error %s in downloading %s" % (e.code, url)
print error print error
doc['error'] = error doc['error'] = error
docsdb.save(doc) docsdb.save(doc)
return (None,None,None) return (None,None,None)
   
   
   
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
(url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
for nav in navIDs: for nav in navIDs:
print "Removing element", nav['id'] print "Removing element", nav['id']
nav.extract() nav.extract()
navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
for nav in navClasses: for nav in navClasses:
print "Removing element", nav['class'] print "Removing element", nav['class']
nav.extract() nav.extract()
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
linkurls = set([]) linkurls = set([])
for link in links: for link in links:
if link.has_key("href"): if link.has_key("href"):
if link['href'].startswith("http"): if link['href'].startswith("http"):
# lets not do external links for now # lets not do external links for now
# linkurls.add(link['href']) # linkurls.add(link['href'])
None None
if link['href'].startswith("mailto"): if link['href'].startswith("mailto"):
# not http # not http
None None
if link['href'].startswith("javascript"): if link['href'].startswith("javascript"):
# not http # not http
None None
else: else:
# remove anchors and spaces in urls # remove anchors and spaces in urls
linkurls.add(fullurl(url,link['href'])) linkurls.add(fullurl(url,link['href']))
for linkurl in linkurls: for linkurl in linkurls:
#print linkurl #print linkurl
scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
   
#couch = couchdb.Server('http://192.168.1.148:5984/') #couch = couchdb.Server('http://192.168.1.148:5984/')
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
for key in agency.keys(): for key in agency.keys():
if key == "FOIDocumentsURL" and "status" not in agency.keys: if key == "FOIDocumentsURL" and "status" not in agency.keys:
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
if key == 'website' and False: if key == 'website' and False:
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
agency['metadata']['lastScraped'] = time.time() agency['metadata']['lastScraped'] = time.time()
if key.endswith('URL') and False: if key.endswith('URL') and False:
print key print key
depth = 1 depth = 1
if 'scrapeDepth' in agency.keys(): if 'scrapeDepth' in agency.keys():
depth = agency['scrapeDepth'] depth = agency['scrapeDepth']
scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
agencydb.save(agency) agencydb.save(agency)
   
import sys import sys
import os import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
   
   
class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
   
def __init__(self): def __init__(self):
super(ScraperImplementation, self).__init__() super(ScraperImplementation, self).__init__()
   
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, print 'Subclass:', issubclass(ScraperImplementation,
genericScrapers.GenericOAICDisclogScraper) genericScrapers.GenericPDFDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), print 'Instance:', isinstance(ScraperImplementation(),
genericScrapers.GenericOAICDisclogScraper) genericScrapers.GenericPDFDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericPDFDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericPDFDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
  from datetime import date
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id= "ctl00_MSO_ContentDiv").table
 
  def getColumns(self,columns):
  (id, title, description, notes) = columns
  return (id, title, title, description, notes)
  def getDate(self, content, entry, doc):
  edate = date.today().strftime("%Y-%m-%d")
  doc.update({'date': edate})
  return
  def getColumnCount(self):
  return 4
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from datetime import date
  from pyquery import PyQuery as pq
  from lxml import etree
  import urllib
  import dateutil
  from dateutil.parser import *
 
  class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
 
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
 
  d = pq(content)
  d.make_links_absolute(base_url = self.getURL())
  for table in d('table').items():
  title= table('thead').text()
  print title
  (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
  links = table('a').map(lambda i, e: pq(e).attr('href'))
  description = descA+" "+descB
  edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
  print edate
  dochash = scrape.mkhash(self.remove_control_chars(title))
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  edate = date.today().strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "links": links,
  "date": edate, "notes": notes, "title": title, "description": description}
  #print doc
  foidocsdb.save(doc)
  else:
  print "already saved"
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ACMADisclogScraper,
  genericScrapers.GenericDisclogScraper)
  print 'Instance:', isinstance(ACMADisclogScraper(),
  genericScrapers.GenericDisclogScraper)
  ACMADisclogScraper().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from datetime import date
  from pyquery import PyQuery as pq
  from lxml import etree
  import urllib
  import dateutil
  from dateutil.parser import *
 
  class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
 
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
 
  d = pq(content)
  d.make_links_absolute(base_url = self.getURL())
  for item in d('.item-list').items():
  title= item('h3').text()
  print title
  links = item('a').map(lambda i, e: pq(e).attr('href'))
  description = title= item('ul').text()
  edate = date.today().strftime("%Y-%m-%d")
  print edate
  dochash = scrape.mkhash(self.remove_control_chars(title))
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "links": links,
  "date": edate, "title": title, "description": description}
  #print doc
  foidocsdb.save(doc)
  else:
  print "already saved"
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ACMADisclogScraper,
  genericScrapers.GenericDisclogScraper)
  print 'Instance:', isinstance(ACMADisclogScraper(),
  genericScrapers.GenericDisclogScraper)
  ACMADisclogScraper().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
 
 
  class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
 
  def __init__(self):
  super(ScraperImplementation, self).__init__()
 
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation,
  genericScrapers.GenericHTMLDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(),
  genericScrapers.GenericHTMLDisclogScraper)
  ScraperImplementation().doScrape()
 
<?php <?php
   
include ('../include/common.inc.php'); include ('../include/common.inc.php');
$last_updated = date('Y-m-d', @filemtime('cbrfeed.zip')); $last_updated = date('Y-m-d', @filemtime('cbrfeed.zip'));
header("Content-Type: text/xml"); header("Content-Type: text/xml");
echo "<?xml version='1.0' encoding='UTF-8'?>"; echo "<?xml version='1.0' encoding='UTF-8'?>";
echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' . "\n"; echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' . "\n";
echo " <url><loc>" . local_url() . "index.php</loc><priority>1.0</priority></url>\n"; echo " <url><loc>" . local_url() . "index.php</loc><priority>1.0</priority></url>\n";
foreach (scandir("./") as $file) { foreach (scandir("./") as $file) {
if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php") if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php")
echo " <url><loc>" . local_url() . "$file</loc><priority>0.6</priority></url>\n"; echo " <url><loc>" . local_url() . "$file</loc><priority>0.6</priority></url>\n";
} }
  $agenciesdb = $server->get_db('disclosr-agencies');
$db = $server->get_db('disclosr-foidocuments');  
try { try {
$rows = $db->get_view("app", "all")->rows; $rows = $agenciesdb->get_view("app", "byCanonicalName")->rows;
  foreach ($rows as $row) {
  echo '<url><loc>' . local_url() . 'agency.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n";
  }
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
  $foidocsdb = $server->get_db('disclosr-foidocuments');
  try {
  $rows = $foidocsdb->get_view("app", "all")->rows;
foreach ($rows as $row) { foreach ($rows as $row) {
echo '<url><loc>' . local_url() . 'view.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n"; echo '<url><loc>' . local_url() . 'view.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n";
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo '</urlset>'; echo '</urlset>';
?> ?>
   
<?php <?php
   
function include_header_documents($title) { function include_header_documents($title) {
header('X-UA-Compatible: IE=edge,chrome=1'); header('X-UA-Compatible: IE=edge,chrome=1');
?> ?>
<!doctype html> <!doctype html>
<!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> <!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ -->
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> <!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> <!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> <!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]-->
<!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> <!-- Consider adding a manifest.appcache: h5bp.com/d/Offline -->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> <!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]-->
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
   
<title>Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?></title> <title>Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?></title>
<meta name="description" content=""> <meta name="description" content="">
   
<!-- Mobile viewport optimized: h5bp.com/viewport --> <!-- Mobile viewport optimized: h5bp.com/viewport -->
<meta name="viewport" content="width=device-width"> <meta name="viewport" content="width=device-width">
<link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php" /> <link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php" />
<!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> <!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons -->
<meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" /> <meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" />
   
<!-- Le styles --> <!-- Le styles -->
<link href="css/bootstrap.min.css" rel="stylesheet"> <link href="css/bootstrap.min.css" rel="stylesheet">
<style type="text/css"> <style type="text/css">
body { body {
padding-top: 60px; padding-top: 60px;
padding-bottom: 40px; padding-bottom: 40px;
} }
.sidebar-nav { .sidebar-nav {
padding: 9px 0; padding: 9px 0;
} }
</style> </style>
<link href="css/bootstrap-responsive.min.css" rel="stylesheet"> <link href="css/bootstrap-responsive.min.css" rel="stylesheet">
   
<!-- HTML5 shim, for IE6-8 support of HTML5 elements --> <!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
<!--[if lt IE 9]> <!--[if lt IE 9]>
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> <script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]--> <![endif]-->
<!-- More ideas for your <head> here: h5bp.com/d/head-Tips --> <!-- More ideas for your <head> here: h5bp.com/d/head-Tips -->
   
<!-- All JavaScript at the bottom, except this Modernizr build. <!-- All JavaScript at the bottom, except this Modernizr build.
Modernizr enables HTML5 elements & feature detects for optimal performance. Modernizr enables HTML5 elements & feature detects for optimal performance.
Create your own custom Modernizr build: www.modernizr.com/download/ Create your own custom Modernizr build: www.modernizr.com/download/
<script src="js/libs/modernizr-2.5.3.min.js"></script>--> <script src="js/libs/modernizr-2.5.3.min.js"></script>-->
<script src="js/jquery.js"></script> <script src="js/jquery.js"></script>
<script type="text/javascript" src="js/flotr2.min.js"></script> <script type="text/javascript" src="js/flotr2.min.js"></script>
   
</head> </head>
<body> <body>
<div class="navbar navbar-inverse navbar-fixed-top"> <div class="navbar navbar-inverse navbar-fixed-top">
<div class="navbar-inner"> <div class="navbar-inner">
<div class="container-fluid"> <div class="container-fluid">
<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> <a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse">
<span class="icon-bar"></span> <span class="icon-bar"></span>
<span class="icon-bar"></span> <span class="icon-bar"></span>
<span class="icon-bar"></span> <span class="icon-bar"></span>
</a> </a>
<a class="brand" href="#">Australian Disclosure Logs</a> <a class="brand" href="#">Australian Disclosure Logs</a>
<div class="nav-collapse collapse"> <div class="nav-collapse collapse">
<p class="navbar-text pull-right"> <p class="navbar-text pull-right">
Check out our subsites on: <small>
  Subsites on:
  </small>
<a href="http://orgs.disclosurelo.gs">Government Agencies</a> <a href="http://orgs.disclosurelo.gs">Government Agencies</a>
• <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a> • <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a>
• <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a> • <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a>
   
</p> </p>
<ul class="nav"> <ul class="nav">
<li><a href="index.php">Home</a></li> <li><a href="agency.php">By Agency</a></li>
  <li><a href="date.php">By Date</a></li>
<li><a href="disclogsList.php">List of Disclosure Logs</a></li> <li><a href="disclogsList.php">List of Disclosure Logs</a></li>
<li><a href="about.php">About</a></li> <li><a href="about.php">About</a></li>
   
</ul> </ul>
</div><!--/.nav-collapse --> </div><!--/.nav-collapse -->
</div> </div>
</div> </div>
</div> </div>
<div class="container"> <div class="container">
<?php <?php
} }
   
function include_footer_documents() { function include_footer_documents() {
  global $ENV;
?> ?>
</div> <!-- /container --> </div> <!-- /container -->
<hr> <hr>
   
<footer> <footer>
<p>Not affiliated with or endorsed by any government agency.</p> <p>Not affiliated with or endorsed by any government agency.</p>
</footer> </footer>
<script type="text/javascript"> <?php
  if ($ENV != "DEV") {
  echo "<script type='text/javascript'>
   
var _gaq = _gaq || []; var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-12341040-4']); _gaq.push(['_setAccount', 'UA-12341040-4']);
_gaq.push(['_setDomainName', 'disclosurelo.gs']); _gaq.push(['_setDomainName', 'disclosurelo.gs']);
_gaq.push(['_setAllowLinker', true]); _gaq.push(['_setAllowLinker', true]);
_gaq.push(['_trackPageview']); _gaq.push(['_trackPageview']);
   
(function() { (function() {
var ga = document.createElement('script'); var ga = document.createElement('script');
ga.type = 'text/javascript'; ga.type = 'text/javascript';
ga.async = true; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(ga, s); s.parentNode.insertBefore(ga, s);
})(); })();
   
</script> </script>";
  }
  ?>
<!-- Le javascript <!-- Le javascript
================================================== --> ================================================== -->
<!-- Placed at the end of the document so the pages load faster --> <!-- Placed at the end of the document so the pages load faster -->
<!-- <!--
<script src="js/bootstrap-transition.js"></script> <script src="js/bootstrap-transition.js"></script>
<script src="js/bootstrap-alert.js"></script> <script src="js/bootstrap-alert.js"></script>
<script src="js/bootstrap-modal.js"></script> <script src="js/bootstrap-modal.js"></script>
<script src="js/bootstrap-dropdown.js"></script> <script src="js/bootstrap-dropdown.js"></script>
<script src="js/bootstrap-scrollspy.js"></script> <script src="js/bootstrap-scrollspy.js"></script>
<script src="js/bootstrap-tab.js"></script> <script src="js/bootstrap-tab.js"></script>
<script src="js/bootstrap-tooltip.js"></script> <script src="js/bootstrap-tooltip.js"></script>
<script src="js/bootstrap-popover.js"></script> <script src="js/bootstrap-popover.js"></script>
<script src="js/bootstrap-button.js"></script> <script src="js/bootstrap-button.js"></script>
<script src="js/bootstrap-collapse.js"></script> <script src="js/bootstrap-collapse.js"></script>
<script src="js/bootstrap-carousel.js"></script> <script src="js/bootstrap-carousel.js"></script>
<script src="js/bootstrap-typeahead.js"></script>--> <script src="js/bootstrap-typeahead.js"></script>-->
   
   
</body> </body>
</html> </html>
<?php <?php
} }
   
function truncate($string, $length, $stopanywhere = false) { function truncate($string, $length, $stopanywhere = false) {
//truncates a string to a certain char length, stopping on a word if not specified otherwise. //truncates a string to a certain char length, stopping on a word if not specified otherwise.
if (strlen($string) > $length) { if (strlen($string) > $length) {
//limit hit! //limit hit!
$string = substr($string, 0, ($length - 3)); $string = substr($string, 0, ($length - 3));
if ($stopanywhere) { if ($stopanywhere) {
//stop anywhere //stop anywhere
$string .= '...'; $string .= '...';
} else { } else {
//stop on a word. //stop on a word.
$string = substr($string, 0, strrpos($string, ' ')) . '...'; $string = substr($string, 0, strrpos($string, ' ')) . '...';
} }
} }
return $string; return $string;
} }
   
function displayLogEntry($row, $idtoname) { function displayLogEntry($row, $idtoname) {
$result = ""; $result = "";
$result .= '<div itemscope itemtype="http://schema.org/Article">'; $result .= '<div itemscope itemtype="http://schema.org/Article">';
$result .= '<h2> <span itemprop="datePublished">' . $row->value->date . "</span>: <span itemprop='name headline'>" . truncate($row->value->title, 120) . "</span>"; $result .= '<h2><a href="http://disclosurelo.gs/view.php?id='.$row->value->_id.'"> <span itemprop="datePublished">' . $row->value->date . "</span>: <span itemprop='name headline'>" . truncate($row->value->title, 120) . "</span>";
$result .= ' (<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</h2>'; $result .= ' (<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</a></h2>';
$result .= "<p itemprop='description articleBody text'> Title: " . $row->value->title . "<br/>"; $result .= "<p itemprop='description articleBody text'> Title: " . $row->value->title . "<br/>";
if (isset($row->value->description)) { if (isset($row->value->description)) {
$result .= str_replace("\n", "<br>", preg_replace("/(^[\r\n]*|[\r\n]+)[\s\t]*[\r\n]+/", "",trim($row->value->description))); $result .= str_replace("\n", "<br>", preg_replace("/(^[\r\n]*|[\r\n]+)[\s\t]*[\r\n]+/", "",trim($row->value->description)));
} }
if (isset($row->value->notes)) { if (isset($row->value->notes)) {
$result .= " <br>Note: " . $row->value->notes; $result .= " <br>Note: " . $row->value->notes;
} }
$result .= "</p>"; $result .= "</p>";
   
if (isset($row->value->links)) { if (isset($row->value->links)) {
$result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">'; $result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">';
foreach ($row->value->links as $link) { foreach ($row->value->links as $link) {
$result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href=' . $link . ' itemprop="url contentURL">' . urlencode($link) . "</a></li>"; $result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href="' . htmlspecialchars ($link) . '" itemprop="url contentURL">' . htmlspecialchars ( $link) . "</a></li>";
} }
   
$result .= "</ul>"; $result .= "</ul>";
} }
$result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>"; $result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>";
$result .= "</div>"; $result .= "</div>\n";
return $result; return $result;
} }
   
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_header_documents("");  
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
?> ?>
<?php <?php
   
   
   
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
try { try {
$obj = new stdClass(); $obj = new stdClass();
$obj->value = $foidocsdb->get($_REQUEST['id']); $obj->value = $foidocsdb->get($_REQUEST['id']);
  include_header_documents($obj->value->title);
   
echo displayLogEntry($obj,$idtoname); echo displayLogEntry($obj,$idtoname);
   
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
include_footer_documents(); include_footer_documents();
?> ?>
   
<?php <?php
   
date_default_timezone_set("Australia/Sydney"); date_default_timezone_set("Australia/Sydney");
   
$basePath = ""; $basePath = "";
if (strstr($_SERVER['PHP_SELF'], "alaveteli/") if (strstr($_SERVER['PHP_SELF'], "alaveteli/")
|| strstr($_SERVER['PHP_SELF'], "admin/") || strstr($_SERVER['PHP_SELF'], "admin/")
|| strstr($_SERVER['PHP_SELF'], "lib/") || strstr($_SERVER['PHP_SELF'], "lib/")
|| strstr($_SERVER['PHP_SELF'], "include/") || strstr($_SERVER['PHP_SELF'], "include/")
|| strstr($_SERVER['PHP_SELF'], "documents/") || strstr($_SERVER['PHP_SELF'], "documents/")
|| $_SERVER['SERVER_NAME'] == "disclosurelo.gs" || $_SERVER['SERVER_NAME'] == "disclosurelo.gs"
  || $_SERVER['SERVER_NAME'] == "www.disclosurelo.gs"
) )
$basePath = "../"; $basePath = "../";
   
include_once ('couchdb.inc.php'); include_once ('couchdb.inc.php');
include_once ('template.inc.php'); include_once ('template.inc.php');
require_once $basePath.'lib/Requests/library/Requests.php'; require_once $basePath.'lib/Requests/library/Requests.php';
   
Requests::register_autoloader(); Requests::register_autoloader();
$ENV = "DEV"; $ENV = "DEV";
if (isset($_SERVER['SERVER_NAME']) && $_SERVER['SERVER_NAME'] != 'localhost') { if (isset($_SERVER['SERVER_NAME']) && $_SERVER['SERVER_NAME'] != 'localhost') {
   
require $basePath."lib/amon-php/amon.php"; require $basePath."lib/amon-php/amon.php";
Amon::config(array('address'=> 'http://127.0.0.1:2464', Amon::config(array('address'=> 'http://127.0.0.1:2464',
'protocol' => 'http', 'protocol' => 'http',
'secret_key' => "I2LJ6dOMmlnXgVAkTPFXd5M3ejkga8Gd2FbBt6iqZdw")); 'secret_key' => "I2LJ6dOMmlnXgVAkTPFXd5M3ejkga8Gd2FbBt6iqZdw"));
Amon::setup_exception_handler(); Amon::setup_exception_handler();
$ENV = "PROD"; $ENV = "PROD";
} }
   
# Convert a stdClass to an Array. http://www.php.net/manual/en/language.types.object.php#102735 # Convert a stdClass to an Array. http://www.php.net/manual/en/language.types.object.php#102735
   
function object_to_array(stdClass $Class) { function object_to_array(stdClass $Class) {
# Typecast to (array) automatically converts stdClass -> array. # Typecast to (array) automatically converts stdClass -> array.
$Class = (array) $Class; $Class = (array) $Class;
   
# Iterate through the former properties looking for any stdClass properties. # Iterate through the former properties looking for any stdClass properties.
# Recursively apply (array). # Recursively apply (array).
foreach ($Class as $key => $value) { foreach ($Class as $key => $value) {
if (is_object($value) && get_class($value) === 'stdClass') { if (is_object($value) && get_class($value) === 'stdClass') {
$Class[$key] = object_to_array($value); $Class[$key] = object_to_array($value);
} }
} }
return $Class; return $Class;
} }
   
# Convert an Array to stdClass. http://www.php.net/manual/en/language.types.object.php#102735 # Convert an Array to stdClass. http://www.php.net/manual/en/language.types.object.php#102735
   
function array_to_object(array $array) { function array_to_object(array $array) {
# Iterate through our array looking for array values. # Iterate through our array looking for array values.
# If found recurvisely call itself. # If found recurvisely call itself.
foreach ($array as $key => $value) { foreach ($array as $key => $value) {
if (is_array($value)) { if (is_array($value)) {
$array[$key] = array_to_object($value); $array[$key] = array_to_object($value);
} }
} }
   
# Typecast to (object) will automatically convert array -> stdClass # Typecast to (object) will automatically convert array -> stdClass
return (object) $array; return (object) $array;
} }
   
function dept_to_portfolio($deptName) { function dept_to_portfolio($deptName) {
return trim(str_replace("Department of", "", str_replace("Department of the", "Department of", $deptName))); return trim(str_replace("Department of", "", str_replace("Department of the", "Department of", $deptName)));
} }
function phrase_to_tag ($phrase) { function phrase_to_tag ($phrase) {
return str_replace(" ","_",str_replace("'","",str_replace(",","",strtolower($phrase)))); return str_replace(" ","_",str_replace("'","",str_replace(",","",strtolower($phrase))));
} }
function local_url() { function local_url() {
return "http://" . $_SERVER['HTTP_HOST'] . rtrim(dirname($_SERVER['PHP_SELF']), '/\\') . "/"; return "http://" . $_SERVER['HTTP_HOST'] . rtrim(dirname($_SERVER['PHP_SELF']), '/\\') . "/";
} }
function GetDomain($url) function GetDomain($url)
{ {
$nowww = ereg_replace('www\.','',$url); $nowww = ereg_replace('www\.','',$url);
$domain = parse_url($nowww); $domain = parse_url($nowww);
if(!empty($domain["host"])) if(!empty($domain["host"]))
{ {
return $domain["host"]; return $domain["host"];
} else } else
{ {
return $domain["path"]; return $domain["path"];
} }
} }