Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr
Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr

Conflicts:
admin/logo.svg

Former-commit-id: 850f69e8af3bbfb3467f5708676d7bf99fce2571

<?php <?php
   
require_once '../include/common.inc.php'; require_once '../include/common.inc.php';
   
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
$rows = $db->get_view("app", "byName")->rows; $rows = $db->get_view("app", "byName")->rows;
$nametoid = Array(); $nametoid = Array();
$stats = Array(); $stats = Array();
foreach ($rows as $row) { foreach ($rows as $row) {
$nametoid[trim($row->key)] = $row->value; $nametoid[trim($row->key)] = $row->value;
} }
$row = 0; $row = 0;
$headers = Array(); $headers = Array();
// source: http://data.gov.au/dataset/freedom-of-information-quarterly-request-and-review-statistical-data-2011-12/ // source: http://data.gov.au/dataset/freedom-of-information-quarterly-request-and-review-statistical-data-2011-12/
if (($handle = fopen("FOI-quarterly-requests-and-reviews-2011-12.csv", "r")) !== FALSE) { if (($handle = fopen("FOI-quarterly-requests-and-reviews-2011-12.csv", "r")) !== FALSE) {
while (($data = fgetcsv($handle, 10000, ",")) !== FALSE) { while (($data = fgetcsv($handle, 10000, ",")) !== FALSE) {
if ($row >= 1) { if ($row >= 1) {
// print_r($data); // print_r($data);
$name = trim($data[2]); $name = trim($data[2]);
echo "$name <br>"; // echo "$name <br>";
if ($data[0] != "TOTALS" && $data[0] != "") { if ($data[0] != "TOTALS" && $data[0] != "") {
if (isset($nametoid[$name])) { if (isset($nametoid[$name])) {
$id = $nametoid[$name]; $id = $nametoid[$name];
$timePeriod = $data[0] . "-Q" . $data[1]; $timePeriod = $data[0] . "-Q" . $data[1];
   
echo "$timePeriod <br>"; // echo "$timePeriod <br>";
unset($data[0]); unset($data[0]);
unset($data[1]); unset($data[1]);
unset($data[2]); unset($data[2]);
unset($data[3]); unset($data[3]);
unset($data[4]); unset($data[4]);
unset($data[5]); unset($data[5]);
unset($data[6]); unset($data[6]);
unset($data[7]); unset($data[7]);
unset($data[8]); unset($data[8]);
   
//echo $id . "<br>" . PHP_EOL; //echo $id . "<br>" . PHP_EOL;
$result = Array("source" => "http://data.gov.au/dataset/freedom-of-information-quarterly-request-and-review-statistical-data-2011-12/"); $result = Array("source" => "http://data.gov.au/dataset/freedom-of-information-quarterly-request-and-review-statistical-data-2011-12/");
foreach ($data as $key => $datum) { foreach ($data as $key => $datum) {
if ($datum != 0) { if ($datum != 0) {
  // tODO prefix header with "FOI"
  if (isset($stats[$id][$timePeriod][$key])) $datum += $stats[$id][$timePeriod][$key];
$result[trim($headers[$key])] = $datum; $result[trim($headers[$key])] = $datum;
} }
} }
$stats[$id][$timePeriod] = $result; $stats[$id][$timePeriod] = $result;
  // TODO merge if already exists
//print_r($stats); //print_r($stats);
} else { } else {
echo "<br>ERROR NAME MISSING FROM ID LIST<br><bR> $row" . PHP_EOL; echo "<br>ERROR NAME MISSING FROM ID LIST<br><bR> $row" . PHP_EOL;
print_r($data); print_r($data);
die(); die();
} }
} }
} else { } else {
$headers = $data; $headers = $data;
//print_r($headers); //print_r($headers);
} }
$row++; $row++;
} }
fclose($handle); fclose($handle);
} }
  echo "all stats loaded successfuly";
foreach ($stats as $id => $stat) { foreach ($stats as $id => $stat) {
echo $id . "<br>" . PHP_EOL; echo $id . "<br>" . PHP_EOL;
$doc = $db->get($id); $doc = $db->get($id);
echo $doc->name . "<br>" . PHP_EOL; echo $doc->name . "<br>" . PHP_EOL;
print_r($stat); // print_r($stat);
die();  
// print_r($doc); // print_r($doc);
$changed = false; $changed = false;
if (!isset($doc->statistics)) { if (!isset($doc->statistics)) {
$changed = true; $changed = true;
$doc->statistics = Array(); $doc->statistics = Array();
  } else {
  $doc->statistics = object_to_array($doc->statistics);
} }
foreach ($stat as $timePeriod => $value) { foreach ($stat as $timePeriod => $value) {
if (!isset($doc->statistics->foiRequests->$timePeriod) if (!isset($doc->statistics["foiRequests"][$timePeriod])
|| $doc->statistics->foiRequests->$timePeriod != $value) { || $doc->statistics["foiRequests"][$timePeriod] != $value
  ) {
$changed = true; $changed = true;
$doc->statistics["foiRequests"][$timePeriod] = $value; $doc->statistics["foiRequests"][$timePeriod] = $value;
} }
} }
if ($changed) { if ($changed) {
$db->save($doc); $db->save($doc);
} else { } else {
echo "not changed" . "<br>" . PHP_EOL; echo "not changed" . "<br>" . PHP_EOL;
} }
  //print_r($doc);die();
} }
?> ?>
   
file:a/admin/logo.svg (deleted)
<?xml version="1.0" encoding="utf-8"?>  
<!-- Generator: Adobe Illustrator 15.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->  
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">  
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"  
width="100px" height="100px" viewBox="0 0 100 100" enable-background="new 0 0 100 100" xml:space="preserve">  
<g id="docs">  
<path d="M86.107,11.001l5.229,9.952c0,0-1.832,0.083-5.297,1.95c-2.312,1.249-6.468,6.246-6.468,6.246l-7.745-11.148  
c0,0,4.2-3.996,6.513-5.242C81.805,10.889,86.107,11.001,86.107,11.001"/>  
<path d="M65.604,41.642l-3.151-9.868c0,0-5.44,3.56-8.017,4.074c-1.009,0.202-1.931,0.335-2.75,0.425L65.604,41.642z"/>  
<path d="M72.326,44.231c0.268-0.226,0.537-0.44,0.804-0.616c3.104-2.054,6.14-3.685,6.269-3.755l1.881-1.005l1.369,1.634  
l2.865,3.417l3.197-4.334L76.68,30.694l-8.74,11.847L72.326,44.231z"/>  
<path d="M39.918,31.734l4.825,1.86l3.33,0.212c0.04,0.001,0.269,0.015,0.652,0.015c0.91,0,2.798-0.072,5.196-0.551  
c1.427-0.284,5.007-2.332,7.093-3.695l2.889-1.888l1.05,3.285l2.496,7.812l5.889-7.985l-4.625,0.163l1.349-6.225l-14.928-3.233  
l-2.095,9.667c-0.531-2.599-1.841-5.727-1.841-5.727l-13.488,1.522c0,0,0.885,2.206,1.586,4.529L39.918,31.734z"/>  
<path d="M91.232,66.473c-1.102-0.691-2.322-1.143-3.414-1.434l-3.779,9.805c1.932,1.246,5.197,5.738,5.197,5.738l7.336-9.207  
C96.572,71.377,93.162,67.682,91.232,66.473z"/>  
<path d="M93.191,53.076l-3.655,1.225c-0.019,0.007-1.779,0.613-4.117,2.068l2.817,4.869l0.625,1.08  
c3.307-0.562,7.728-1.923,7.728-1.923l-2.332-15.261c0,0-2.934,1.277-5.853,2.221l2.318,2.766L93.191,53.076z"/>  
<path d="M79.271,46.91l0.865,0.334l0.459,0.801l3.504,6.05c2.646-1.636,4.61-2.287,4.61-2.287l-8.075-9.632  
c0,0-2.584,1.391-5.376,3.188L79.271,46.91z"/>  
</g>  
<g id="trunk">  
 
<radialGradient id="SVGID_1_" cx="66.0195" cy="72.8555" r="7.877" gradientTransform="matrix(1 0 0 2 0 -72.8555)" gradientUnits="userSpaceOnUse">  
<stop offset="0.6503" style="stop-color:#E61E24"/>  
<stop offset="1" style="stop-color:#9D1F1F"/>  
</radialGradient>  
<circle fill="url(#SVGID_1_)" cx="66.02" cy="72.855" r="7.877"/>  
 
<radialGradient id="SVGID_2_" cx="22.6929" cy="72.8555" r="7.877" gradientTransform="matrix(1 0 0 2 0 -72.8555)" gradientUnits="userSpaceOnUse">  
<stop offset="0.6503" style="stop-color:#E61E24"/>  
<stop offset="1" style="stop-color:#9D1F1F"/>  
</radialGradient>  
<circle fill="url(#SVGID_2_)" cx="22.693" cy="72.855" r="7.877"/>  
 
<radialGradient id="SVGID_3_" cx="15.4717" cy="59.7266" r="12.1478" gradientTransform="matrix(1 0 0 2 0 -59.7266)" gradientUnits="userSpaceOnUse">  
<stop offset="0.6503" style="stop-color:#E61E24"/>  
<stop offset="1" style="stop-color:#9D1F1F"/>  
</radialGradient>  
<path fill="url(#SVGID_3_)" d="M22.693,62.352c1.915,0,3.705,0.521,5.251,1.42V47.912H12.191L3,57.103v14.438h9.281  
C12.931,66.369,17.347,62.352,22.693,62.352z"/>  
 
<radialGradient id="SVGID_4_" cx="43.4023" cy="68.917" r="9.0473" gradientTransform="matrix(1 0 0 2 0 -68.917)" gradientUnits="userSpaceOnUse">  
<stop offset="0.6503" style="stop-color:#E61E24"/>  
<stop offset="1" style="stop-color:#9D1F1F"/>  
</radialGradient>  
<path fill="url(#SVGID_4_)" d="M55.925,69.986l-9.583-3.695H30.88c1.186,1.475,1.978,3.279,2.225,5.252h22.502  
C55.674,71.01,55.779,70.49,55.925,69.986z"/>  
 
<radialGradient id="SVGID_5_" cx="58.7637" cy="54.9512" r="25.2357" gradientTransform="matrix(1 0 0 2 0 -54.9512)" gradientUnits="userSpaceOnUse">  
<stop offset="0.6503" style="stop-color:#E61E24"/>  
<stop offset="1" style="stop-color:#9D1F1F"/>  
</radialGradient>  
<path fill="url(#SVGID_5_)" d="M79.312,48.238L39.961,33.06l-9.384,24.335l26.381,10.174c1.824-3.115,5.198-5.218,9.062-5.218  
c5.791,0,10.504,4.712,10.504,10.501c0,0.744-0.082,1.473-0.229,2.174l4.713,1.816l5.943-15.411L79.312,48.238z"/>  
</g>  
</svg>  
 
  for line in `curl "http://localhost:5984/disclosr-foidocuments/_design/app/_view/byAgencyID?reduce=false&keys=%5B\"5716ce0aacfe98f7d638b7a66b7f1040\"%5D&limit=600" | xargs -L1`; do
  # echo $line
  id=`echo $line | grep -Po '_id:.*?[^\\\],' | perl -pe 's/_id://; s/^//; s/,$//'`
  rev=`echo $line | grep -Po 'rev:.*?[^\\\],'| perl -pe 's/rev://; s/^//; s/,$//'`
  if [ -n "$id" ]; then
  echo "curl -X DELETE http://localhost:5984/disclosr-foidocuments/$id?rev=$rev"
  curl -X DELETE http://localhost:5984/disclosr-foidocuments/$id?rev=$rev
  fi
  done;
 
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>org.lambdacomplex.contractdashboard</groupId> <groupId>org.lambdacomplex.contractdashboard</groupId>
<artifactId>neo4jimporter</artifactId> <artifactId>neo4jimporter</artifactId>
<version>0.0.1-SNAPSHOT</version> <version>0.0.1-SNAPSHOT</version>
<repositories> <repositories>
<repository> <repository>
<id>http://repository.codehaus.org/</id> <id>http://repository.codehaus.org/</id>
<url>http://repository.codehaus.org/</url> <url>http://repository.codehaus.org/</url>
</repository> </repository>
</repositories> </repositories>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.neo4j</groupId> <groupId>org.neo4j</groupId>
<artifactId>neo4j</artifactId> <artifactId>neo4j</artifactId>
<version>2.0.0-M03</version> <version>2.0.0-M03</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>postgresql</groupId> <groupId>postgresql</groupId>
<artifactId>postgresql</artifactId> <artifactId>postgresql</artifactId>
<version>9.1-901.jdbc4</version> <version>9.1-901.jdbc4</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.commons</groupId> <groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId> <artifactId>commons-lang3</artifactId>
<version>3.0.1</version> <version>3.0.1</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.codehaus.woodstox</groupId> <groupId>org.codehaus.woodstox</groupId>
<artifactId>woodstox-core-asl</artifactId> <artifactId>woodstox-core-asl</artifactId>
<version>4.2.0</version> <version>4.2.0</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.codehaus.woodstox</groupId> <groupId>org.codehaus.woodstox</groupId>
<artifactId>stax2-api</artifactId> <artifactId>stax2-api</artifactId>
<version>3.1.2</version> <version>3.1.2</version>
</dependency> </dependency>
   
</dependencies> </dependencies>
<build> <build>
<plugins> <plugins>
<plugin> <plugin>
<groupId>org.codehaus.mojo</groupId> <groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId> <artifactId>exec-maven-plugin</artifactId>
<version>1.2.1</version> <version>1.2.1</version>
<executions> <executions>
<execution> <execution>
<goals> <goals>
<goal>java</goal> <goal>java</goal>
</goals> </goals>
</execution> </execution>
</executions> </executions>
<configuration> <configuration>
<mainClass>Importer</mainClass> <mainClass>StAXSample</mainClass>
</configuration> </configuration>
</plugin> </plugin>
<plugin> <plugin>
<groupId>org.apache.maven.plugins</groupId> <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId> <artifactId>maven-compiler-plugin</artifactId>
<configuration> <configuration>
<source>1.6</source> <source>1.6</source>
<target>1.6</target> <target>1.6</target>
</configuration> </configuration>
</plugin> </plugin>
</plugins> </plugins>
</build> </build>
</project> </project>
   
import org.neo4j.graphdb.DynamicLabel; import org.neo4j.graphdb.DynamicLabel;
import org.neo4j.graphdb.DynamicRelationshipType; import org.neo4j.graphdb.DynamicRelationshipType;
import org.neo4j.graphdb.Label; import org.neo4j.graphdb.Label;
import org.neo4j.unsafe.batchinsert.BatchInserter; import org.neo4j.unsafe.batchinsert.BatchInserter;
import org.neo4j.unsafe.batchinsert.BatchInserters; import org.neo4j.unsafe.batchinsert.BatchInserters;
   
import java.io.File; import java.io.File;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
   
import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLInputFactory;
import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLEventReader; import javax.xml.stream.XMLEventReader;
import javax.xml.stream.events.XMLEvent; import javax.xml.stream.events.XMLEvent;
   
public class StAXSample { public class StAXSample {
   
   
HashMap<String, Long> agencyIDs = new HashMap<String, Long>(); HashMap<String, Long> agencyIDs = new HashMap<String, Long>();
HashMap<String, Boolean> agencyFullVersion = new HashMap<String, Boolean>(); HashMap<String, Boolean> agencyFullVersion = new HashMap<String, Boolean>();
Label agencyLabel = DynamicLabel.label("Agency"); Label agencyLabel = DynamicLabel.label("Agency");
HashMap<String, Long> locationIDs = new HashMap<String, Long>(); HashMap<String, Long> locationIDs = new HashMap<String, Long>();
Label locationLabel = DynamicLabel.label("Location"); Label locationLabel = DynamicLabel.label("Location");
HashMap<String, Long> functionIDs = new HashMap<String, Long>(); HashMap<String, Long> functionIDs = new HashMap<String, Long>();
Label functionLabel = DynamicLabel.label("Function"); Label functionLabel = DynamicLabel.label("Function");
HashMap<String, Long> statusIDs = new HashMap<String, Long>(); HashMap<String, Long> statusIDs = new HashMap<String, Long>();
Label statusLabel = DynamicLabel.label("Location"); Label statusLabel = DynamicLabel.label("Location");
BatchInserter inserter; BatchInserter inserter;
   
private String filename; private String filename;
   
public StAXSample() { public StAXSample() {
} }
   
public static void main(String[] args) { public static void main(String[] args) {
if (args.length != 1) { /*if (args.length != 1) {
System.out.println("Usage: StAXSample file.xml"); System.out.println("Usage: StAXSample file.xml");
System.exit(-1); System.exit(-1);
} } */
   
StAXSample ss = new StAXSample(); StAXSample ss = new StAXSample();
ss.setFilename(args[0]); //ss.setFilename(args[0]);
  ss.setFilename("agency-sample.xml");
ss.run(); ss.run();
} }
   
public void run() { public void run() {
   
Map<String, String> config = new HashMap<String, String>(); Map<String, String> config = new HashMap<String, String>();
config.put("neostore.nodestore.db.mapped_memory", "90M"); config.put("neostore.nodestore.db.mapped_memory", "90M");
inserter = BatchInserters.inserter("target/batchinserter-example-config", config); inserter = BatchInserters.inserter("target/batchinserter-example-config", config);
inserter.createDeferredSchemaIndex(agencyLabel).on("agency_no"); inserter.createDeferredSchemaIndex(agencyLabel).on("agency_no");
inserter.createDeferredSchemaIndex(locationLabel).on("location_name"); inserter.createDeferredSchemaIndex(locationLabel).on("location_name");
inserter.createDeferredSchemaIndex(functionLabel).on("thesaurus_term"); inserter.createDeferredSchemaIndex(functionLabel).on("thesaurus_term");
inserter.createDeferredSchemaIndex(statusLabel).on("status_name"); inserter.createDeferredSchemaIndex(statusLabel).on("status_name");
   
try { try {
XMLInputFactory xmlif = XMLInputFactory.newInstance(); XMLInputFactory xmlif = XMLInputFactory.newInstance();
xmlif.setProperty( xmlif.setProperty(
XMLInputFactory.IS_REPLACING_ENTITY_REFERENCES, XMLInputFactory.IS_REPLACING_ENTITY_REFERENCES,
Boolean.TRUE); Boolean.TRUE);
xmlif.setProperty( xmlif.setProperty(
XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES,
Boolean.FALSE); Boolean.FALSE);
//set the IS_COALESCING property to true //set the IS_COALESCING property to true
//to get whole text data as one event. //to get whole text data as one event.
xmlif.setProperty(XMLInputFactory.IS_COALESCING, Boolean.TRUE); xmlif.setProperty(XMLInputFactory.IS_COALESCING, Boolean.TRUE);
   
try { try {
XMLEventReader r = null; XMLEventReader r = null;
r = xmlif.createXMLEventReader( r = xmlif.createXMLEventReader(
filename, filename,
//new FileInputStream(new File(xmlFileURL.toURI()))); //new FileInputStream(new File(xmlFileURL.toURI())));
new FileInputStream(new File(filename))); new FileInputStream(new File(filename)));
   
   
//iterate as long as there are more events on the input stream //iterate as long as there are more events on the input stream
while (r.hasNext()) { while (r.hasNext()) {
XMLEvent e = r.nextEvent(); XMLEvent e = r.nextEvent();
Map<String, Object> previousAgency = new HashMap<String, Object>(); Map<String, Object> previousAgency = new HashMap<String, Object>();
if (e.isStartElement()) { if (e.isStartElement()) {
if (hasStartTagName(e, "AGENCIES")) { if (hasStartTagName(e, "AGENCIES")) {
System.out.println("Agencies file loaded... "); System.out.println("Agencies file loaded... ");
} else if (hasStartTagName(e, "TITLE")) { } else if (hasStartTagName(e, "TITLE")) {
System.out.println("TITLE is: " + getCharacters(r)); System.out.println("TITLE is: " + getCharacters(r));
previousAgency.put("title", getCharacters(r)); previousAgency.put("title", getCharacters(r));
} else if (hasStartTagName(e, "END_DATE_QUAL")) { } else if (hasStartTagName(e, "END_DATE_QUAL")) {
System.out.println("END_DATE_QUAL is: " + getCharacters(r)); System.out.println("END_DATE_QUAL is: " + getCharacters(r));
previousAgency.put("end_date_qual", getCharacters(r)); previousAgency.put("end_date_qual", getCharacters(r));
// save agency // save agency
getAgency(previousAgency); getAgency(previousAgency);
previousAgency = new HashMap<String, Object>(); previousAgency = new HashMap<String, Object>();
} else if (hasStartTagName(e, "AGENCY_LINK")) { } else if (hasStartTagName(e, "AGENCY_LINK")) {
processAgencyLink(r); processAgencyLink(r);
} else if (hasStartTagName(e, "AGENCY_LOCATION")) { } else if (hasStartTagName(e, "AGENCY_LOCATION")) {
processAgencyLocation(r); processAgencyLocation(r);
} else if (hasStartTagName(e, "AGENCY_FUNCTION")) { } else if (hasStartTagName(e, "AGENCY_FUNCTION")) {
processAgencyFunction(r); processAgencyFunction(r);
} else if (hasStartTagName(e, "AGENCY_STATUS")) { } else if (hasStartTagName(e, "AGENCY_STATUS")) {
processAgencyStatus(r); processAgencyStatus(r);
} else { } else {
System.out.println("Unhandled tag: " + getStartTagName(e) + " content:" + getCharacters(r)); System.out.println("Unhandled tag: " + getStartTagName(e) + " content:" + getCharacters(r));
} }
} }
} }
} catch (XMLStreamException ex) { } catch (XMLStreamException ex) {
System.out.println(ex.getMessage()); System.out.println(ex.getMessage());
   
if (ex.getNestedException() != null) { if (ex.getNestedException() != null) {
ex.getNestedException().printStackTrace(); ex.getNestedException().printStackTrace();
} }
} }
   
} catch (FileNotFoundException ex) { } catch (FileNotFoundException ex) {
System.err.println("Error. Cannot find \"" + filename + "\" in classpath."); System.err.println("Error. Cannot find \"" + filename + "\" in classpath.");
ex.printStackTrace(); ex.printStackTrace();
} catch (Exception ex) { } catch (Exception ex) {
ex.printStackTrace(); ex.printStackTrace();
} }
   
inserter.shutdown(); inserter.shutdown();
} }
   
private long getAgency(Map<String, Object> properties) { private long getAgency(Map<String, Object> properties) {
if (agencyIDs.get(properties.get("agency_no").toString()) == null) { if (agencyIDs.get(properties.get("agency_no").toString()) == null) {
long agencyID = inserter.createNode(properties, agencyLabel); long agencyID = inserter.createNode(properties, agencyLabel);
if (properties.values().size() > 2) { if (properties.values().size() > 2) {
agencyFullVersion.put(properties.get("agency_no").toString(), true); agencyFullVersion.put(properties.get("agency_no").toString(), true);
} }
agencyIDs.put(properties.get("agency_no").toString(), agencyID); agencyIDs.put(properties.get("agency_no").toString(), agencyID);
return agencyID; return agencyID;
} else { } else {
long agencyID = agencyIDs.get(properties.get("agency_no").toString()); long agencyID = agencyIDs.get(properties.get("agency_no").toString());
if (properties.values().size() > 2 && agencyFullVersion.get(properties.get("agency_no")) == null) { if (properties.values().size() > 2 && agencyFullVersion.get(properties.get("agency_no")) == null) {
inserter.setNodeProperties(agencyID, properties); inserter.setNodeProperties(agencyID, properties);
agencyFullVersion.put(properties.get("agency_no").toString(), true); agencyFullVersion.put(properties.get("agency_no").toString(), true);
} }
return agencyID; return agencyID;
} }
} }
   
private long getLocation(String locationName) { private long getLocation(String locationName) {
if (locationIDs.get(locationName) == null) { if (locationIDs.get(locationName) == null) {
HashMap properties = new HashMap< String,Object > (); HashMap properties = new HashMap< String,Object > ();
properties.put("location_name", locationName); properties.put("location_name", locationName);
long locationID = inserter.createNode(properties, locationLabel); long locationID = inserter.createNode(properties, locationLabel);
locationIDs.put(locationName, locationID); locationIDs.put(locationName, locationID);
return locationID; return locationID;
} else { } else {
return locationIDs.get(locationName); return locationIDs.get(locationName);
} }
} }
private long getFunction(String functionName) { private long getFunction(String functionName) {
if (functionIDs.get(functionName) == null) { if (functionIDs.get(functionName) == null) {
HashMap properties = new HashMap< String,Object > (); HashMap properties = new HashMap< String,Object > ();
properties.put("function_name", functionName); properties.put("function_name", functionName);
long functionID = inserter.createNode(properties, functionLabel); long functionID = inserter.createNode(properties, functionLabel);
functionIDs.put(functionName, functionID); functionIDs.put(functionName, functionID);
return functionID; return functionID;
} else { } else {
return functionIDs.get(functionName); return functionIDs.get(functionName);
} }
} }
private long getStatus(String statusName) { private long getStatus(String statusName) {
if (statusIDs.get(statusName) == null) { if (statusIDs.get(statusName) == null) {
HashMap properties = new HashMap< String,Object > (); HashMap properties = new HashMap< String,Object > ();
properties.put("status_name", statusName); properties.put("status_name", statusName);
long statusID = inserter.createNode(properties, statusLabel); long statusID = inserter.createNode(properties, statusLabel);
statusIDs.put(statusName, statusID); statusIDs.put(statusName, statusID);
return statusID; return statusID;
} else { } else {
return statusIDs.get(statusName); return statusIDs.get(statusName);
} }
} }
   
private void processAgencyLink(XMLEventReader rdr) throws Exception { private void processAgencyLink(XMLEventReader rdr) throws Exception {
String agency_from_no = null; String agency_from_no = null;
String agency_to_no = null; String agency_to_no = null;
String link_type = null; String link_type = null;
String start_date = null; String start_date = null;
String start_date_qual = null; String start_date_qual = null;
String end_date = null; String end_date = null;
String end_date_qual = null; String end_date_qual = null;
   
while (rdr.hasNext()) { while (rdr.hasNext()) {
XMLEvent e = rdr.nextEvent(); XMLEvent e = rdr.nextEvent();
if (e.isStartElement()) { if (e.isStartElement()) {
if (hasStartTagName(e, "LINK_AGENCY_NO")) { if (hasStartTagName(e, "LINK_AGENCY_NO")) {
agency_from_no = getCharacters(rdr); agency_from_no = getCharacters(rdr);
} else if (hasStartTagName(e, "LINK_TO_AGENCY_NO")) { } else if (hasStartTagName(e, "LINK_TO_AGENCY_NO")) {
agency_to_no = getCharacters(rdr); agency_to_no = getCharacters(rdr);
} else if (hasStartTagName(e, "LINK_TYPE")) { } else if (hasStartTagName(e, "LINK_TYPE")) {
link_type = getCharacters(rdr); link_type = getCharacters(rdr);
} else if (hasStartTagName(e, "START_DATE")) { } else if (hasStartTagName(e, "START_DATE")) {
start_date = getCharacters(rdr); start_date = getCharacters(rdr);
}else if (hasStartTagName(e, "START_DATE_QUAL")) { }else if (hasStartTagName(e, "START_DATE_QUAL")) {
start_date_qual = getCharacters(rdr); start_date_qual = getCharacters(rdr);
}else if (hasStartTagName(e, "END_DATE")) { }else if (hasStartTagName(e, "END_DATE")) {
end_date = getCharacters(rdr); end_date = getCharacters(rdr);
}else if (hasStartTagName(e, "END_DATE_QUAL")) { }else if (hasStartTagName(e, "END_DATE_QUAL")) {
end_date_qual = getCharacters(rdr); end_date_qual = getCharacters(rdr);
} }
} }
if (e.isEndElement()) { if (e.isEndElement()) {
if (hasEndTagName(e, "AGENCY_LINK")) { if (hasEndTagName(e, "AGENCY_LINK")) {
   
//System.out.println("Finished processing link: Name = " + name + "; of = " + of + "; date = " + date); //System.out.println("Finished processing link: Name = " + name + "; of = " + of + "; date = " + date);
long agencyFromID, agencyToID; long agencyFromID, agencyToID;
Map<String, Object> agencyFromProperties = new HashMap<String, Object>(); Map<String, Object> agencyFromProperties = new HashMap<String, Object>();
agencyFromProperties.put("agency_no",agency_from_no); agencyFromProperties.put("agency_no",agency_from_no);
agencyFromID = getAgency(agencyFromProperties); agencyFromID = getAgency(agencyFromProperties);
Map<String, Object> agencyToProperties = new HashMap<String, Object>(); Map<String, Object> agencyToProperties = new HashMap<String, Object>();
agencyToProperties.put("agency_no",agency_to_no); agencyToProperties.put("agency_no",agency_to_no);
agencyToID = getAgency(agencyToProperties); agencyToID = getAgency(agencyToProperties);
Map<String, Object> relProperties = new HashMap<String, Object>(); Map<String, Object> relProperties = new HashMap<String, Object>();
relProperties.put("link_type", link_type); relProperties.put("link_type", link_type);
relProperties.put("start_date", start_date); relProperties.put("start_date", start_date);
relProperties.put("start_date_qual", start_date_qual); relProperties.put("start_date_qual", start_date_qual);
relProperties.put("end_date", end_date); relProperties.put("end_date", end_date);
relProperties.put("end_date_qual", end_date_qual); relProperties.put("end_date_qual", end_date_qual);
inserter.createRelationship(agencyFromID, agencyToID, inserter.createRelationship(agencyFromID, agencyToID,
DynamicRelationshipType.withName("IS_LINKED_TO"), relProperties); DynamicRelationshipType.withName("IS_LINKED_TO"), relProperties);
   
break; break;
} }
} }
} }
} }
   
private void processAgencyLocation(XMLEventReader rdr) throws Exception { private void processAgencyLocation(XMLEventReader rdr) throws Exception {
String of = null; String of = null;
String name = null; String name = null;
String date = null; String date = null;
   
while (rdr.hasNext()) { while (rdr.hasNext()) {
XMLEvent e = rdr.nextEvent(); XMLEvent e = rdr.nextEvent();
if (e.isStartElement()) { if (e.isStartElement()) {
if (hasStartTagName(e, "LOCATION_AGENCY_NO")) { if (hasStartTagName(e, "LOCATION_AGENCY_NO")) {
of = getCharacters(rdr); of = getCharacters(rdr);
} else if (hasStartTagName(e, "LOCATION_TEXT")) { } else if (hasStartTagName(e, "LOCATION_TEXT")) {
name = getCharacters(rdr); name = getCharacters(rdr);
} else if (hasStartTagName(e, "LOCATION_DATE")) { } else if (hasStartTagName(e, "LOCATION_DATE")) {
date = getCharacters(rdr); date = getCharacters(rdr);
} }
} }
if (e.isEndElement()) { if (e.isEndElement()) {
if (hasEndTagName(e, "AGENCY_LOCATION")) { if (hasEndTagName(e, "AGENCY_LOCATION")) {
System.out.println("Finished processing location: Name = " + name + "; of = " + of + "; date = " + date); System.out.println("Finished processing location: Name = " + name + "; of = " + of + "; date = " + date);
long locationID, agencyID; long locationID, agencyID;
locationID = getLocation(name); locationID = getLocation(name);
Map<String, Object> agencyProperties = new HashMap<String, Object>(); Map<String, Object> agencyProperties = new HashMap<String, Object>();
agencyProperties.put("agency_no",of); agencyProperties.put("agency_no",of);
agencyID = getAgency(agencyProperties); agencyID = getAgency(agencyProperties);
Map<String, Object> relProperties = new HashMap<String, Object>(); Map<String, Object> relProperties = new HashMap<String, Object>();
relProperties.put("date", date); relProperties.put("date", date);
inserter.createRelationship(agencyID, locationID, inserter.createRelationship(agencyID, locationID,
DynamicRelationshipType.withName("HAS_LOCATION"), relProperties); DynamicRelationshipType.withName("HAS_LOCATION"), relProperties);
   
break; break;
} }
} }
} }
} }
   
private void processAgencyStatus(XMLEventReader rdr) throws Exception { private void processAgencyStatus(XMLEventReader rdr) throws Exception {
String of = null; String of = null;
String status = null; String status = null;
String date = null; String date = null;
   
while (rdr.hasNext()) { while (rdr.hasNext()) {
XMLEvent e = rdr.nextEvent(); XMLEvent e = rdr.nextEvent();
if (e.isStartElement()) { if (e.isStartElement()) {
if (hasStartTagName(e, "STATUS_AGENCY_NO")) { if (hasStartTagName(e, "STATUS_AGENCY_NO")) {
of = getCharacters(rdr); of = getCharacters(rdr);
} else if (hasStartTagName(e, "STATUS")) { } else if (hasStartTagName(e, "STATUS")) {
status = getCharacters(rdr); status = getCharacters(rdr);
} else if (hasStartTagName(e, "STATUS_DATE")) { } else if (hasStartTagName(e, "STATUS_DATE")) {
date = getCharacters(rdr); date = getCharacters(rdr);
} }
} }
if (e.isEndElement()) { if (e.isEndElement()) {
if (hasEndTagName(e, "AGENCY_STATUS")) { if (hasEndTagName(e, "AGENCY_STATUS")) {
System.out.println("Finished processing status: Status = " + status + "; of = " + of + "; date = " + date); System.out.println("Finished processing status: Status = " + status + "; of = " + of + "; date = " + date);
long statusID, agencyID; long statusID, agencyID;
statusID = getStatus(status); statusID = getStatus(status);
Map<String, Object> agencyProperties = new HashMap<String, Object>(); Map<String, Object> agencyProperties = new HashMap<String, Object>();
agencyProperties.put("agency_no",of); agencyProperties.put("agency_no",of);
agencyID = getAgency(agencyProperties); agencyID = getAgency(agencyProperties);
Map<String, Object> relProperties = new HashMap<String, Object>(); Map<String, Object> relProperties = new HashMap<String, Object>();
relProperties.put("date", date); relProperties.put("date", date);
inserter.createRelationship(agencyID, statusID, inserter.createRelationship(agencyID, statusID,
DynamicRelationshipType.withName("HAS_STATUS"), relProperties); DynamicRelationshipType.withName("HAS_STATUS"), relProperties);
   
break; break;
} }
} }
} }
} }
   
private void processAgencyFunction(XMLEventReader rdr) throws Exception { private void processAgencyFunction(XMLEventReader rdr) throws Exception {
String agency = null; String agency = null;
String thesaurus_term = null; String thesaurus_term = null;
String start_date = null; String start_date = null;
String start_date_qual = null; String start_date_qual = null;
String end_date = null; String end_date = null;
String end_date_qual = null; String end_date_qual = null;
   
while (rdr.hasNext()) { while (rdr.hasNext()) {
XMLEvent e = rdr.nextEvent(); XMLEvent e = rdr.nextEvent();
if (e.isStartElement()) { if (e.isStartElement()) {
if (hasStartTagName(e, "FUNCTION_AGENCY_NO")) { if (hasStartTagName(e, "FUNCTION_AGENCY_NO")) {
agency = getCharacters(rdr); agency = getCharacters(rdr);
} else if (hasStartTagName(e, "THESAURUS_TERM")) { } else if (hasStartTagName(e, "THESAURUS_TERM")) {
thesaurus_term = getCharacters(rdr); thesaurus_term = getCharacters(rdr);
} else if (hasStartTagName(e, "START_DATE")) { } else if (hasStartTagName(e, "START_DATE")) {
start_date = getCharacters(rdr); start_date = getCharacters(rdr);
}else if (hasStartTagName(e, "START_DATE_QUAL")) { }else if (hasStartTagName(e, "START_DATE_QUAL")) {
start_date_qual = getCharacters(rdr); start_date_qual = getCharacters(rdr);
}else if (hasStartTagName(e, "END_DATE")) { }else if (hasStartTagName(e, "END_DATE")) {
end_date = getCharacters(rdr); end_date = getCharacters(rdr);
}else if (hasStartTagName(e, "END_DATE_QUAL")) { }else if (hasStartTagName(e, "END_DATE_QUAL")) {
end_date_qual = getCharacters(rdr); end_date_qual = getCharacters(rdr);
} }
} }
if (e.isEndElement()) { if (e.isEndElement()) {
if (hasEndTagName(e, "AGENCY_FUNCTION")) { if (hasEndTagName(e, "AGENCY_FUNCTION")) {
//System.out.println("Finished processing function: Name = " + name + "; of = " + of + "; date = " + date); //System.out.println("Finished processing function: Name = " + name + "; of = " + of + "; date = " + date);
long functionID, agencyID; long functionID, agencyID;
functionID = getFunction(thesaurus_term); functionID = getFunction(thesaurus_term);
Map<String, Object> agencyProperties = new HashMap<String, Object>(); Map<String, Object> agencyProperties = new HashMap<String, Object>();
agencyProperties.put("agency_no",agency); agencyProperties.put("agency_no",agency);
agencyID = getAgency(agencyProperties); agencyID = getAgency(agencyProperties);
Map<String, Object> relProperties = new HashMap<String, Object>(); Map<String, Object> relProperties = new HashMap<String, Object>();
relProperties.put("start_date", start_date); relProperties.put("start_date", start_date);
relProperties.put("start_date_qual", start_date_qual); relProperties.put("start_date_qual", start_date_qual);
relProperties.put("end_date", end_date); relProperties.put("end_date", end_date);
relProperties.put("end_date_qual", end_date_qual); relProperties.put("end_date_qual", end_date_qual);
inserter.createRelationship(agencyID, functionID, inserter.createRelationship(agencyID, functionID,
DynamicRelationshipType.withName("HAS_FUNCTION"), relProperties); DynamicRelationshipType.withName("HAS_FUNCTION"), relProperties);
   
break; break;
} }
} }
} }
} }
   
private String getCharacters(XMLEventReader rdr) throws XMLStreamException { private String getCharacters(XMLEventReader rdr) throws XMLStreamException {
XMLEvent e = rdr.nextEvent(); XMLEvent e = rdr.nextEvent();
if (e.isCharacters()) { if (e.isCharacters()) {
return e.asCharacters().getData(); return e.asCharacters().getData();
} else { } else {
return null; return null;
} }
} }
   
private boolean hasStartTagName(XMLEvent e, String name) { private boolean hasStartTagName(XMLEvent e, String name) {
return e.asStartElement().getName().getLocalPart().equals(name); return e.asStartElement().getName().getLocalPart().equals(name);
} }
   
private String getStartTagName(XMLEvent e) { private String getStartTagName(XMLEvent e) {
return e.asStartElement().getName().getLocalPart(); return e.asStartElement().getName().getLocalPart();
} }
   
private boolean hasEndTagName(XMLEvent e, String name) { private boolean hasEndTagName(XMLEvent e, String name) {
return e.asEndElement().getName().getLocalPart().equals(name); return e.asEndElement().getName().getLocalPart().equals(name);
} }
   
public void setFilename(String filename) { public void setFilename(String filename) {
this.filename = filename; this.filename = filename;
} }
   
   
} }
   
<?php <?php
   
require_once '../include/common.inc.php'; require_once '../include/common.inc.php';
//function createFOIDocumentsDesignDoc() { //function createFOIDocumentsDesignDoc() {
   
$foidb = $server->get_db('disclosr-foidocuments'); $foidb = $server->get_db('disclosr-foidocuments');
$obj = new stdClass(); $obj = new stdClass();
$obj->_id = "_design/" . urlencode("app"); $obj->_id = "_design/" . urlencode("app");
$obj->language = "javascript"; $obj->language = "javascript";
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; $obj->views->all->map = "function(doc) { emit(doc._id, doc); };";
$obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };"; $obj->views->byDate->map = "function(doc) { if (doc.title != \"Disclosure Log Updated\") { emit(doc.date, doc); } };";
$obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };"; $obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };";
$obj->views->byDateMonthYear->reduce = "_count"; $obj->views->byDateMonthYear->reduce = "_count";
$obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };"; $obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };";
$obj->views->byAgencyID->reduce = "_count"; $obj->views->byAgencyID->reduce = "_count";
$obj->views->fieldNames->map = ' $obj->views->fieldNames->map = 'function(doc) { for(var propName in doc) { emit(propName, doc._id); }}';
function(doc) { $obj->views->fieldNames->reduce = 'function (key, values, rereduce) { return values.length; }';
for(var propName in doc) {  
emit(propName, doc._id);  
}  
   
}';  
$obj->views->fieldNames->reduce = 'function (key, values, rereduce) {  
return values.length;  
}';  
// allow safe updates (even if slightly slower due to extra: rev-detection check). // allow safe updates (even if slightly slower due to extra: rev-detection check).
$foidb->save($obj, true); $foidb->save($obj, true);
   
   
//function createDocumentsDesignDoc() { //function createDocumentsDesignDoc() {
$docdb = $server->get_db('disclosr-documents'); $docdb = $server->get_db('disclosr-documents');
   
$obj = new stdClass(); $obj = new stdClass();
$obj->_id = "_design/" . urlencode("app"); $obj->_id = "_design/" . urlencode("app");
$obj->language = "javascript"; $obj->language = "javascript";
$obj->views->web_server->map = "function(doc) {\n emit(doc.web_server, 1);\n}"; $obj->views->web_server->map = "function(doc) {\n emit(doc.web_server, 1);\n}";
$obj->views->web_server->reduce = "_sum"; $obj->views->web_server->reduce = "_sum";
$obj->views->byAgency->map = "function(doc) {\n emit(doc.agencyID, 1);\n}"; $obj->views->byAgency->map = "function(doc) {\n emit(doc.agencyID, 1);\n}";
$obj->views->byAgency->reduce = "_sum"; $obj->views->byAgency->reduce = "_sum";
$obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}"; $obj->views->byURL->map = "function(doc) {\n emit(doc.url, doc);\n}";
$obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}"; $obj->views->agency->map = "function(doc) {\n emit(doc.agencyID, doc);\n}";
$obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}"; $obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}";
   
$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}"; $obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}";
$obj->views->datasetGroups->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n doc.metadata[\"data.gov.au Category\"] && doc.metadata[\"data.gov.au Category\"].forEach(function(tag) {\n emit(tag, doc.url); \n });\n}\n}"; $obj->views->datasetGroups->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n doc.metadata[\"data.gov.au Category\"] && doc.metadata[\"data.gov.au Category\"].forEach(function(tag) {\n emit(tag, doc.url); \n });\n}\n}";
$obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}"; $obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}";
$docdb->save($obj, true); $docdb->save($obj, true);
   
   
   
   
//function createAgencyDesignDoc() { //function createAgencyDesignDoc() {
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
$obj = new stdClass(); $obj = new stdClass();
$obj->_id = "_design/" . urlencode("app"); $obj->_id = "_design/" . urlencode("app");
$obj->language = "javascript"; $obj->language = "javascript";
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; $obj->views->all->map = "function(doc) { emit(doc._id, doc); };";
$obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };";
$obj->views->byCanonicalName->map = "function(doc) { $obj->views->byCanonicalName->map = "function(doc) {
if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') {
emit(doc.name, doc); emit(doc.name, doc);
} }
};"; };";
$obj->views->byDeptStateName->map = "function(doc) { $obj->views->byDeptStateName->map = "function(doc) {
if (doc.orgType == 'FMA-DepartmentOfState') { if (doc.orgType == 'FMA-DepartmentOfState') {
emit(doc.name, doc._id); emit(doc.name, doc._id);
} }
};"; };";
$obj->views->parentOrgs->map = "function(doc) { $obj->views->parentOrgs->map = "function(doc) {
if (doc.parentOrg) { if (doc.parentOrg) {
emit(doc._id, doc.parentOrg); emit(doc._id, doc.parentOrg);
} }
};"; };";
$obj->views->byName->map = 'function(doc) { $obj->views->byName->map = 'function(doc) {
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") {
emit(doc.name, doc._id); emit(doc.name, doc._id);
if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) {
emit(doc.shortName, doc._id); emit(doc.shortName, doc._id);
} }
for (name in doc.otherNames) { for (name in doc.otherNames) {
if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) {
emit(doc.otherNames[name], doc._id); emit(doc.otherNames[name], doc._id);
} }
} }
for (name in doc.foiBodies) { for (name in doc.foiBodies) {
if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) {
emit(doc.foiBodies[name], doc._id); emit(doc.foiBodies[name], doc._id);
} }
} }
for (name in doc.positions) { for (name in doc.positions) {
if (doc.positions[name] != "" && doc.positions[name] != doc.name) { if (doc.positions[name] != "" && doc.positions[name] != doc.name) {
emit(doc.positions[name], doc._id); emit(doc.positions[name], doc._id);
} }
} }
} }
};'; };';
   
$obj->views->foiEmails->map = "function(doc) { $obj->views->foiEmails->map = "function(doc) {
emit(doc._id, doc.foiEmail); emit(doc._id, doc.foiEmail);
};"; };";
   
$obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; $obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }";
$obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; $obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };';
$obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; $obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };';
$obj->views->getScrapeRequired->map = "function(doc) { $obj->views->getScrapeRequired->map = "function(doc) {
   
var lastScrape = Date.parse(doc.metadata.lastScraped); var lastScrape = Date.parse(doc.metadata.lastScraped);
   
var today = new Date(); var today = new Date();
   
if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) {
emit(doc._id, doc); emit(doc._id, doc);
} }
   
};"; };";
$obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; $obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };";
$obj->views->getConflicts->map = "function(doc) { $obj->views->getConflicts->map = "function(doc) {
if (doc._conflicts) { if (doc._conflicts) {
emit(null, [doc._rev].concat(doc._conflicts)); emit(null, [doc._rev].concat(doc._conflicts));
} }
}"; }";
$obj->views->getStatistics->map = $obj->views->getStatistics->map =
"function(doc) { "
if (doc.statistics) { function (doc) {
for (var statisticSet in doc.statistics) { if (doc.statistics) {
for (var statisticPeriod in doc.statistics[statisticSet]) { for (var statisticSet in doc.statistics) {
emit([statisticSet,statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']); for (var statisticPeriod in doc.statistics[statisticSet]) {
  if (doc.statistics[statisticSet][statisticPeriod]['value']) {
  emit([statisticSet, statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']);
  } else {
  for (var statisticSubSet in doc.statistics[statisticSet][statisticPeriod]) {
  if (statisticSubSet != 'source' && statisticSubSet != 'value') {
  emit([statisticSubSet, statisticPeriod], doc.statistics[statisticSet][statisticPeriod][statisticSubSet]);
  }
  }
  }
  }
  }
  }
} }
} ";
}  
}";  
$obj->views->getStatistics->reduce = '_sum'; $obj->views->getStatistics->reduce = '_sum';
// http://stackoverflow.com/questions/646628/javascript-startswith // http://stackoverflow.com/questions/646628/javascript-startswith
$obj->views->score->map = 'if(!String.prototype.startsWith){ $obj->views->score->map = 'if(!String.prototype.startsWith){
String.prototype.startsWith = function (str) { String.prototype.startsWith = function (str) {
return !this.indexOf(str); return !this.indexOf(str);
} }
} }
   
function(doc) { function(doc) {
count = 0; count = 0;
if (doc["status"] != "suspended") { if (doc["status"] != "suspended") {
for(var propName in doc) { for(var propName in doc) {
if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { if(typeof(doc[propName]) != "undefined" && doc[propName] != "") {
count++; count++;
} }
} }
portfolio = doc.parentOrg; portfolio = doc.parentOrg;
if (doc.orgType == "FMA-DepartmentOfState") { if (doc.orgType == "FMA-DepartmentOfState") {
portfolio = doc._id; portfolio = doc._id;
} }
if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") {
portfolio = doc.orgType; portfolio = doc.orgType;
} }
emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio});
} }
}'; }';
$obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ $obj->views->scoreHas->map = 'if(!String.prototype.startsWith){
String.prototype.startsWith = function (str) { String.prototype.startsWith = function (str) {
return !this.indexOf(str); return !this.indexOf(str);
} }
} }
if(!String.prototype.endsWith){ if(!String.prototype.endsWith){
String.prototype.endsWith = function(suffix) { String.prototype.endsWith = function(suffix) {
    return this.indexOf(suffix, this.length - suffix.length) !== -1;     return this.indexOf(suffix, this.length - suffix.length) !== -1;
}; };
} }
function(doc) { function(doc) {
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") {
for(var propName in doc) { for(var propName in doc) {
if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) {
emit(propName, 1); emit(propName, 1);
} }
} }
emit("total", 1); emit("total", 1);
} }
}'; }';
$obj->views->scoreHas->reduce = '_sum'; $obj->views->scoreHas->reduce = '_sum';
$obj->views->fieldNames->map = ' $obj->views->fieldNames->map = '
function(doc) { function(doc) {
for(var propName in doc) { for(var propName in doc) {
emit(propName, doc._id); emit(propName, doc._id);
} }
}'; }';
$obj->views->fieldNames->reduce = '_count'; $obj->views->fieldNames->reduce = '_count';
// allow safe updates (even if slightly slower due to extra: rev-detection check). // allow safe updates (even if slightly slower due to extra: rev-detection check).
$db->save($obj, true); $db->save($obj, true);
?> ?>
   
<?php <?php
   
include('template.inc.php'); include('template.inc.php');
include_header_documents("About"); include_header_documents("About");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
?> ?>
<h1>About</h1> <h1>About</h1>
  Written and managed by Alex Sadleir (maxious [at] lambdacomplex.org)
<?php <?php
include_footer_documents(); include_footer_documents();
?> ?>
   
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
   
include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency')); include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency'));
$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); $endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
?> ?>
<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act <div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act
in one place! in one place!
</div> </div>
<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a> <a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a>
<br> <br>
<?php <?php
try { try {
if ($_REQUEST['id']) { if (isset($_REQUEST['id'])) {
$rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
foreach ($rows as $row) { foreach ($rows as $row) {
//print_r($rows); //print_r($rows);
echo displayLogEntry($row, $idtoname); echo displayLogEntry($row, $idtoname);
if (!isset($startkey)) if (!isset($startkey))
$startkey = $row->key; $startkey = $row->key;
$endkey = $row->key; $endkey = $row->key;
} }
} else { } else {
$rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows; $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
if ($rows) { if ($rows) {
  function cmp($a, $b)
  {
  global $idtoname;
  return strcmp($idtoname[$a->key], $idtoname[$b->key]);
  }
  usort($rows, "cmp");
foreach ($rows as $row) { foreach ($rows as $row) {
echo '<a href="agency.php?id=' . $row->key . '">' . $idtoname[$row->key] . " (" . $row->value . " records)</a> <br>\n"; echo '<a href="agency.php?id=' . $row->key . '">' . $idtoname[$row->key] . " (" . $row->value . " records)</a> <br>\n";
} }
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>"; echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
include_footer_documents(); include_footer_documents();
?> ?>
   
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_header_documents("Charts"); include_header_documents("Charts");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
  $idtofoirequestssuccessful = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
  $foirequestssuccessful = 0;
  if(isset($row->value->statistics->foiRequests)) {
  foreach ($row->value->statistics->foiRequests as $statperiod) {
  $statperiod=object_to_array($statperiod);
  if (isset($statperiod["Requests for other information granted in full"])) $foirequestssuccessful += $statperiod["Requests for other information granted in full"];
  if (isset($statperiod["Requests for other information granted in part"])) $foirequestssuccessful += $statperiod["Requests for other information granted in part"];
  }
  }
  $idtofoirequestssuccessful[$row->id] =$foirequestssuccessful;
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
   
?> ?>
<div class="foundation-header"> <div class="foundation-header">
<h1><a href="about.php">Charts</a></h1> <h1><a href="about.php">Charts</a></h1>
<h4 class="subheader"></h4> <h4 class="subheader"></h4>
</div> </div>
<div id="bydate" style="width:1000px;height:300px;"></div> <div id="bydate" style="width:1000px;height:300px;"></div>
<div id="byagency" style="width:1200px;height:800px;"></div> <div id="byagency" style="width:1000px;height:1400px;"></div>
<script id="source"> <script id="source">
window.onload = function () { window.onload = function () {
$(document).ready(function () { $(document).ready(function () {
var var
d1 = [], d1 = [],
options1, options1,
o1; o1;
   
<?php <?php
try { try {
$rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true",null, false,false,true)->rows; $rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true",null, false,false,true)->rows;
   
   
$dataValues = Array(); $dataValues = Array();
foreach ($rows as $row) { foreach ($rows as $row) {
$dataValues[$row->key] = $row->value; $dataValues[$row->key] = $row->value;
} }
$i = 0; $i = 0;
ksort($dataValues); ksort($dataValues);
foreach ($dataValues as $key => $value) { foreach ($dataValues as $key => $value) {
$date = date_create_from_format('Y-m-d', $key); $date = date_create_from_format('Y-m-d', $key);
if (date_format($date, 'U') != "") { if (date_format($date, 'U') != "") {
echo " d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL; echo " d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL;
// echo " emplabels.push('$key');" . PHP_EOL; // echo " emplabels.push('$key');" . PHP_EOL;
$i++; $i++;
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
?> ?>
   
   
options1 = { options1 = {
xaxis: { xaxis: {
mode: 'time', mode: 'time',
labelsAngle: 45 labelsAngle: 45
}, },
selection: { selection: {
mode: 'x' mode: 'x'
}, },
HtmlText: false, HtmlText: false,
title: 'Disclosure Log entries added by Date' title: 'Disclosure Log entries added by Date'
}; };
   
// Draw graph with default options, overwriting with passed options // Draw graph with default options, overwriting with passed options
function drawGraph(opts) { function drawGraph(opts) {
   
// Clone the options, so the 'options' variable always keeps intact. // Clone the options, so the 'options' variable always keeps intact.
o1 = Flotr._.extend(Flotr._.clone(options1), opts || {}); o1 = Flotr._.extend(Flotr._.clone(options1), opts || {});
   
// Return a new graph. // Return a new graph.
return Flotr.draw( return Flotr.draw(
document.getElementById("bydate"), document.getElementById("bydate"),
[ d1 ], [ d1 ],
o1 o1
); );
} }
   
graph = drawGraph(); graph = drawGraph();
   
Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:select', function (area) { Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:select', function (area) {
// Draw selected area // Draw selected area
graph = drawGraph({ graph = drawGraph({
xaxis: { min: area.x1, max: area.x2, mode: 'time', labelsAngle: 45 }, xaxis: { min: area.x1, max: area.x2, mode: 'time', labelsAngle: 45 },
yaxis: { min: area.y1, max: area.y2 } yaxis: { min: area.y1, max: area.y2 }
}); });
}); });
   
// When graph is clicked, draw the graph with default area. // When graph is clicked, draw the graph with default area.
Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:click', function () { Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:click', function () {
graph = drawGraph(); graph = drawGraph();
}); });
   
}); });
}; };
   
var d2 = []; var d2 = [];
  var d3 = [];
var agencylabels = []; var agencylabels = [];
function agencytrackformatter(obj) { function agencytrackformatter(obj) {
   
return agencylabels[Math.floor(obj.y)] + " = " + obj.x; return agencylabels[Math.floor(obj.y)] + " = " + obj.x;
   
} }
function agencytickformatter(val, axis) { function agencytickformatter(val, axis) {
if (agencylabels[Math.floor(val)]) { if (agencylabels[Math.floor(val)]) {
return (agencylabels[Math.floor(val)]) ; return (agencylabels[Math.floor(val)]) ;
   
} else { } else {
return ""; return "";
} }
} }
<?php <?php
try { try {
$rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows; $rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows;
  function cmp($a, $b)
  {
  return $a->value > $b->value;
  }
  usort($rows, "cmp");
   
$dataValues = Array(); $dataValues = Array();
$i = 0; $i = 0;
foreach ($rows as $row) { foreach ($rows as $row) {
echo " d2.push([ $row->value,$i]);" . PHP_EOL; echo " d2.push([ $row->value,$i]);" . PHP_EOL;
  echo " d3.push([ ".$idtofoirequestssuccessful[$row->key].",$i]);" . PHP_EOL;
echo " agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL; echo " agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL;
   
$i++; $i++;
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
?> ?>
// Draw the graph // Draw the graph
Flotr.draw( Flotr.draw(
document.getElementById("byagency"), document.getElementById("byagency"),
[d2], [d2],
{ {
title: "Disclosure Log entries by Agency", title: "Disclosure Log entries by Agency",
bars: { bars: {
show: true, show: true,
horizontal: true, horizontal: true,
shadowSize: 0, shadowSize: 0,
barWidth: 0.5 barWidth: 0.5
}, },
mouse: { mouse: {
track: true, track: true,
relative: true, relative: true,
trackFormatter: agencytrackformatter trackFormatter: agencytrackformatter
}, },
yaxis: { yaxis: {
minorTickFreq: 1, minorTickFreq: 1,
noTicks: agencylabels.length, noTicks: agencylabels.length,
showMinorLabels: true, showMinorLabels: true,
tickFormatter: agencytickformatter tickFormatter: agencytickformatter
}, },
xaxis: { xaxis: {
min: 0, min: 0,
autoscaleMargin: 1 autoscaleMargin: 1
}, },
legend: { legend: {
show: false show: true
} }
} }
); );
</script> </script>
   
<?php <?php
include_footer_documents(); include_footer_documents();
?> ?>
   
   
import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
   
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
# Instantiate the CKAN client. # Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc' api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', server = 'data.disclosurelo.gs'
  api_key = 'c30eb6f5-0f90-47e0-bf05-9b1b4e3a461a'
  server = 'ckan.data.gov.au'
   
  ckan = ckanclient.CkanClient(base_location='http://' + server + '/api',
api_key=api_key) api_key=api_key)
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
#couch = couchdb.Server('http://192.168.1.113:5984/') #couch = couchdb.Server('http://192.168.1.113:5984/')
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
   
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
   
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
groups = {} groups = {}
for doc in docsdb.view('app/datasetGroups'): for doc in docsdb.view('app/datasetGroups'):
group_name = doc.key group_name = doc.key
if group_name != "Not specified": if group_name != "Not specified":
pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_', pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]); doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
if group_name in groups.keys(): if group_name in groups.keys():
groups[group_name] = list(set(groups[group_name] + [pkg_name])) groups[group_name] = list(set(groups[group_name] + [pkg_name]))
else: else:
groups[group_name] = [pkg_name] groups[group_name] = [pkg_name]
   
# add dataset to group(s) # add dataset to group(s)
for group_name in groups.keys(): for group_name in groups.keys():
if group_name != "Not specified": if group_name != "Not specified":
group_url = name_munge(group_name[:100]) group_url = name_munge(group_name[:100])
print group_name print group_name
print groups[group_name] print groups[group_name]
try: try:
# Update the group details # Update the group details
group_entity = ckan.group_entity_get(group_url) group_entity = ckan.group_entity_get(group_url)
print "group "+group_name+" exists" print "group "+group_name+" exists"
if 'packages' in group_entity.keys(): if 'packages' in group_entity.keys():
group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name])) group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
else: else:
group_entity['packages'] = groups[group_name] group_entity['packages'] = groups[group_name]
ckan.group_entity_put(group_entity) ckan.group_entity_put(group_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 404: if ckan.last_status == 404:
print "group "+group_name+" does not exist, creating" print "group "+group_name+" does not exist, creating"
group_entity = { group_entity = {
'name': group_url, 'name': group_url,
'title': group_name, 'title': group_name,
'description': group_name, 'description': group_name,
'packages': groups[group_name] 'packages': groups[group_name]
} }
#print group_entity #print group_entity
ckan.group_register_post(group_entity) ckan.group_register_post(group_entity)
elif ckan.last_status == 409: elif ckan.last_status == 409:
print "group already exists" print "group already exists"
else: else:
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
# coding=utf-8 # coding=utf-8
import ckanclient import ckanclient
import couchdb import couchdb
from ckanclient import CkanApiError from ckanclient import CkanApiError
import re import re
import html2text # aaronsw :( import html2text # aaronsw :(
import ckanapi # https://github.com/open-data/ckanapi import ckanapi # https://github.com/open-data/ckanapi
import scrape import scrape
import datetime, os, hashlib import datetime, os, hashlib
import urllib2 import urllib2
   
class LoaderError(Exception): class LoaderError(Exception):
pass pass
   
import tempfile import tempfile
def add_package_resource_cachedurl(ckan, package_name, url, name, format, license_id, size,**kwargs): def add_package_resource_cachedurl(ckan, package_name, url, name, format, license_id, size,**kwargs):
  excluded_apis = "recent-earthquakes,sentinel-hotspots,abc-local-stations,action-bus-service-gtfs-feed-act,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,journey-planner-data-act,live-traffic-cameras-nsw,precis-forecast-national,precis-forecast-national,precis-forecast-new-south-wales,precis-forecast-new-south-wales,precis-forecast-northern-territory,precis-forecast-northern-territory,precis-forecast-queensland,precis-forecast-queensland,precis-forecast-south-australia,precis-forecast-south-australia,precis-forecast-tasmania,precis-forecast-tasmania,precis-forecast-victoria,precis-forecast-victoria,precis-forecast-western-australia,precis-forecast-western-australia,register-of-penalty-notices-nsw,sentinel-hotspots,trove-people-and-organisations-data,weather-data-services-radar,abc-local-stations,act-emergency-services-agency-esa-28093-current-incidents,act-emergency-services-agency-esa-news-alerts,act-government-news-and-events,act-government-summaries-of-cabinet-outcomes,act-magistrates-court-judgements,act-supreme-court-judgements,act-supreme-court-sentences,actpla-latest-news,all-vacant-act-government-jobs,community-engagement-current-engagements,community-engagement-news,edd-media-releases,edd-news-and-events,freedom-of-information-foi-summaries,libraries-act-announcements,nsw-rural-fire-service-current-incidents,nsw-rural-fire-service-major-updates,precis-forecast-new-south-wales,precis-forecast-south-australia,precis-forecast-tasmania,precis-forecast-victoria,sentinel-hotspots,south-australian-road-crash-statistics,trove-people-and-organisations-data,weather-warnings-for-new-south-wales-australian-capital-territory,weather-warnings-for-northern-territory,weather-warnings-for-queensland,weather-warnings-for-south-australia,weather-warnings-for-tasmania,weather-warnings-for-victoria,weather-warnings-for-western-australia".split(",")
if "xls" in url: if "xls" in url:
format = "xls" format = "xls"
if "pdf" in url: if "pdf" in url:
format = "pdf" format = "pdf"
if "xlsx" in url: if "xlsx" in url:
format = "xlsx" format = "xlsx"
(returned_url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (returned_url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
url, "dataset_resource", "AGIMO", False) url, "dataset_resource", "AGIMO", False)
if mime_type in ["application/vnd.ms-excel","application/msexcel","application/x-msexcel","application/x-ms-excel","application/x-excel","application/x-dos_ms_excel","application/xls","application/x-xls"]: if mime_type in ["application/vnd.ms-excel","application/msexcel","application/x-msexcel","application/x-ms-excel","application/x-excel","application/x-dos_ms_excel","application/xls","application/x-xls"]:
format = "xls" format = "xls"
if mime_type in ["application/xlsx","application/x-xlsx","application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"]: if mime_type in ["application/xlsx","application/x-xlsx","application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"]:
format = "xlsx" format = "xlsx"
   
if content != None: #put file extensions on for windows users downloading files
tf = tempfile.NamedTemporaryFile(delete=False) suffix = name.encode("ascii","ignore").replace("/","")
  if len(suffix) < 5 or (suffix[-4] != "." and suffix[-5] != "."):
  suffix = suffix + "." + format
  if content != None and package_name not in excluded_apis:
  tf = tempfile.NamedTemporaryFile(suffix=suffix)
tfName = os.path.abspath(tf.name) tfName = os.path.abspath(tf.name)
print tfName print tfName
tf.seek(0) tf.seek(0)
tf.write(content) tf.write(content)
tf.flush() tf.flush()
ckan.add_package_resource (package_name, tfName, name=name, format=format, license_id=license_id) ckan.add_package_resource (package_name, tfName, name=name, format=format, license_id=license_id)
else: else:
print "fetch error" print "fetch error"
return ckan.add_package_resource(package_name, url, name=name, resource_type='data', return ckan.add_package_resource(package_name, url, name=name, resource_type='data',
format=format, format=format,
size=size, mimetype=mime_type, license_id=license_id) size=size, mimetype=mime_type, license_id=license_id)
   
# Instantiate the CKAN client. # Instantiate the CKAN client.
api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc' api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
server = 'data.disclosurelo.gs' server = 'data.disclosurelo.gs'
   
ckan = ckanclient.CkanClient(base_location='http://' + server + '/api', ckan = ckanclient.CkanClient(base_location='http://' + server + '/api',
api_key=api_key) api_key=api_key)
ckandirect = ckanapi.RemoteCKAN('http://' + server, api_key=api_key) ckandirect = ckanapi.RemoteCKAN('http://' + server, api_key=api_key)
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
#couch = couchdb.Server('http://192.168.1.113:5984/') #couch = couchdb.Server('http://192.168.1.113:5984/')
   
import urllib import urllib
import urlparse import urlparse
   
   
def url_fix(s, charset='utf-8'): def url_fix(s, charset='utf-8'):
"""Sometimes you get an URL by a user that just isn't a real """Sometimes you get an URL by a user that just isn't a real
URL because it contains unsafe characters like ' ' and so on. This URL because it contains unsafe characters like ' ' and so on. This
function can fix some of the problems in a similar way browsers function can fix some of the problems in a similar way browsers
handle data entered by the user: handle data entered by the user:
   
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)') >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29' 'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
   
:param charset: The target charset for the URL if the url was :param charset: The target charset for the URL if the url was
given as unicode string. given as unicode string.
""" """
if isinstance(s, unicode): if isinstance(s, unicode):
s = s.encode(charset, 'ignore') s = s.encode(charset, 'ignore')
if not urlparse.urlparse(s).scheme: if not urlparse.urlparse(s).scheme:
s = "http://" + s s = "http://" + s
scheme, netloc, path, qs, anchor = urlparse.urlsplit(s) scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
path = urllib.quote(path, '/%') path = urllib.quote(path, '/%')
qs = urllib.quote_plus(qs, ':&=') qs = urllib.quote_plus(qs, ':&=')
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor)) return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
   
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = { SYMBOLS = {
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'), 'zetta', 'iotta'),
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'), 'zebi', 'yobi'),
} }
   
   
def human2bytes(s): def human2bytes(s):
""" """
Attempts to guess the string format based on default symbols Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer. set and return the corresponding bytes as an integer.
When unable to recognize the format ValueError is raised. When unable to recognize the format ValueError is raised.
   
>>> human2bytes('0 B') >>> human2bytes('0 B')
0 0
>>> human2bytes('1 K') >>> human2bytes('1 K')
1024 1024
>>> human2bytes('1 M') >>> human2bytes('1 M')
1048576 1048576
>>> human2bytes('1 Gi') >>> human2bytes('1 Gi')
1073741824 1073741824
>>> human2bytes('1 tera') >>> human2bytes('1 tera')
1099511627776 1099511627776
   
>>> human2bytes('0.5kilo') >>> human2bytes('0.5kilo')
512 512
>>> human2bytes('0.1 byte') >>> human2bytes('0.1 byte')
0 0
>>> human2bytes('1 k') # k is an alias for K >>> human2bytes('1 k') # k is an alias for K
1024 1024
>>> human2bytes('12 foo') >>> human2bytes('12 foo')
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: can't interpret '12 foo' ValueError: can't interpret '12 foo'
""" """
if s == None: if s == None:
return 0 return 0
s = s.replace(',', '') s = s.replace(',', '')
init = s init = s
num = "" num = ""
while s and s[0:1].isdigit() or s[0:1] == '.': while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0] num += s[0]
s = s[1:] s = s[1:]
num = float(num) num = float(num)
letter = s.strip() letter = s.strip()
for name, sset in SYMBOLS.items(): for name, sset in SYMBOLS.items():
if letter in sset: if letter in sset:
break break
else: else:
if letter == 'k': if letter == 'k':
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
sset = SYMBOLS['customary'] sset = SYMBOLS['customary']
letter = letter.upper() letter = letter.upper()
else: else:
raise ValueError("can't interpret %r" % init) raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1} prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]): for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10 prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) return int(num * prefix[letter])
   
# https://github.com/okfn/ckanext-importlib # https://github.com/okfn/ckanext-importlib
def munge(name): def munge(name):
# convert spaces to underscores # convert spaces to underscores
name = re.sub(' ', '_', name).lower() name = re.sub(' ', '_', name).lower()
# convert symbols to dashes # convert symbols to dashes
name = re.sub('[:]', '_-', name).lower() name = re.sub('[:]', '_-', name).lower()
name = re.sub('[/]', '-', name).lower() name = re.sub('[/]', '-', name).lower()
# take out not-allowed characters # take out not-allowed characters
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
# remove double underscores # remove double underscores
name = re.sub('__', '_', name).lower() name = re.sub('__', '_', name).lower()
return name return name
   
   
def name_munge(input_name): def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
   
   
def get_license_id(licencename): def get_license_id(licencename):
map = { map = {
"Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
"CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
'Otherpleasespecify': 'notspecified', 'Otherpleasespecify': 'notspecified',
'': 'notspecified', '': 'notspecified',
"Publicly available data": 'notspecified', "Publicly available data": 'notspecified',
"CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "cc-by-nd", "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "cc-by-nd",
"CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "cc-nc-nd", "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "cc-nc-nd",
'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
"Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
'CreativeCommonsAttributionCCBY25': 'cc-by', 'CreativeCommonsAttributionCCBY25': 'cc-by',
"PublicDomain": 'other-pd', "PublicDomain": 'other-pd',
} }
if licencename not in map.keys(): if licencename not in map.keys():
raise Exception(licencename + " not found"); raise Exception(licencename + " not found");
return map[licencename]; return map[licencename];
   
goodcsvdata = "afl-in-victoria,annual-budget-initiatives-by-suburb-brisbane-city-council,athletics-in-victoria-gfyl,bicycle-racks-mosman-municipal-council,boat-ramps-brisbane-city-council,brisbane-access-ratings-database,bus-stops-brisbane-city-council,cemeteries-brisbane-city-council,cfa-locations,citycycle-stations-brisbane-city-council,community-gardens-brisbane-city-council,community-halls-brisbane-city-council,cooking-classes-gfyl,court-locations-victoria,customer-service-centres-brisbane-city-council,dance-in-victoria-gfyl,disability-activity-gfyl,dog-parks-brisbane-city-council,ferry-terminals-brisbane-city-council,fishing-club-in-victoria-gfyl,fitness-centres-in-victoria-gfyl,gardens-reserves-gfyl,golf-courses-brisbane-city-council,gymnastics-in-victoria-gfyl,historic-cemeteries-brisbane-city-council,ice-skating-centres-gfyl,immunisation-clinics-brisbane-city-council,libraries-brisbane-city-council,licenced-venues-victoria,lifesaving-locations-victoria,loading-zones-brisbane-city-council,major-projects-victoria,markets-in-victoria,martial-arts-in-victoria-gfyl,melbourne-water-use-by-postcode,members-of-parliament-both-houses-nsw,members-of-the-legislative-assembly-nsw,members-of-the-legislative-council-nsw,mfb-locations-vic,ministers-of-the-nsw-parliament,mosman-local-government-area,mosman-rider-route,mosman-wwii-honour-roll,neighbourhood-houses-gfyl,news-feeds-mosman-municipal-council,off-street-car-parks-mosman-municipal-council,orienteering-clubs-gfyl,parking-meter-areas-brisbane-city-council,parks-and-reserves-mosman-municipal-council,parks-brisbane-city-council,personal-training-gfyl,picnic-areas-brisbane-city-council,playgrounds-brisbane-city-council,playgrounds-mosman-municipal-council,police-region-crime-statistics-victoria,police-service-area-crime-statistics-victoria,pony-clubs-in-victoria-gfyl,prison-locations-victoria,public-amenities-maintained-by-mosman-council,public-art-brisbane-city-council,public-internet-locations-vic,public-toilets-brisbane-city-council,racecourse-locations-victoria,recent-development-applications-mosman-municipal-council,recreation-groups-gfyl,recreational-fishing-spots,regional-business-centres-brisbane-city-council,reports-of-swooping-birds-mosman-municipal-council,restricted-parking-areas-brisbane-city-council,rollerskating-centres-in-victoria-gfyl,sailing-clubs-gfyl,school-locations-victoria,shadow-ministers-of-the-nsw-parliament,skate-parks-gfyl,sporting-clubs-and-organisations-gfyl,stakeboard-parks-brisbane-city-council,state-bodies-gfyl,street-names-brisbane-city-council,suburbs-and-adjoining-suburbs-brisbane-city-council,swimming-pools-brisbane-city-council,swimming-pools-gfyl,tennis-courts-brisbane-city-council,top-40-book-club-reads-brisbane-city-council,tracks-and-trails-gfyl,triathlon-clubs-gfyl,urban-water-restrictions-victoria,veterinary-services-in-mosman,victorian-microbreweries,volunteering-centres-services-and-groups-victoria,walking-groups-gfyl,ward-offices-brisbane-city-council,waste-collection-days-brisbane-city-council,waste-transfer-stations-brisbane-city-council,water-consumption-in-melbourne,water-sports-in-victoria-gfyl,wifi-hot-spots-brisbane-city-council,yoga-pilates-and-tai-chi-in-victoria-gfyl,2809cycling-in-new-south-wales-what-the-data-tells-us2809-and-related-data,act-barbecue-bbq-locations,act-tafe-locations,ausindustry-locations,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,australian-gas-light-company-maps,australian-gas-light-company-maps,australian-ports,australian-public-service-statistical-bulletin-2011-12,australian-public-service-statistical-bulletin-snapshot-at-december-31-2011,australian-public-service-statistical-bulletin-tables-0910,austrics-timetable-set,capital-works-call-tender-schedule,collection-item-usage-state-library-of-victoria,country-and-commodity-trade-data-spreadsheet,country-and-commodity-trade-data-spreadsheet-2,country-by-level-of-processing-trade-data-spreadsheet,crime-incident-type-and-frequency-by-capital-city-and-nationally,csiro-locations,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,department-of-finance-and-deregulation-office-locations,digitised-maps,diisr-division-locations-excluding-ausindustry-enterprise-connect-and-nmi,diisr-locations,diisr-portfolio-agency-locations-excluding-csiro,distance-to-legal-service-providers-from-disadvantaged-suburbs,enterprise-connect-locations,fire-insurance-maps-sydney-block-plans-1919-1940,fire-insurance-maps-sydney-block-plans-1919-1940,first-fleet-collection,first-fleet-collection,first-fleet-maps,first-fleet-maps,freedom-of-information-annual-estimated-costs-and-staff-time-statistical-data-2011-12,freedom-of-information-quarterly-request-and-review-statistical-data-2011-12,freedom-of-information-requests-estimated-costs-and-charges-collected-1982-83-to-2011-12,higher-education-course-completions,higher-education-enrolments,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,journey-planner-data-nt,library-catalogue-search-terms-state-library-of-victoria,location-of-act-schools,location-of-centrelink-offices,location-of-european-wasps-nests,location-of-lawyers-and-legal-service-providers-by-town,location-of-legal-assistance-service-providers,location-of-medicare-offices,location-of-medicare-offices,maps-of-the-southern-hemisphere-16th-18th-centuries,maps-of-the-southern-hemisphere-16th-18th-centuries,music-queensland,national-measurement-institute-locations,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,photographs-of-nsw-life-pre-1955,photographs-of-nsw-life-pre-1955,photographs-of-sydney-before-1885,photographs-of-sydney-before-1885,picture-queensland,plgr-28093-playgrounds-act,police-station-locations,queensland-public-libraries,rare-printed-books,rare-printed-books,real-estate-maps,regional-australia-funding-projects,sa-memory-state-library-of-south-australia,search-engine-terms-state-library-of-victoria,south-australian-photographs-state-library-of-south-australia,south-australian-sheet-music-state-library-of-south-australia,sydney-bond-store-maps-1894,sydney-bond-store-maps-1894,sydney-maps-1917,sydney-maps-1917,tafe-institute-locations-victoria,tafe-sa-campus-locations,tolt-public-toilets-act,victorian-public-library-branches-state-library-of-victoria,western-australia-public-library-network,world-war-one-photographs-by-frank-hurley,world-war-one-photographs-by-frank-hurley,citycat-timetables-brisbane-city-council,cityferry-timetables-brisbane-city-council,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,downstream-cost-calculator-model-and-data-for-199697-or-2001-prices,economics-of-australian-soil-conditions-199697-limiting-factor-or-relative-yield-min-of-ry_salt2000-,geographical-names-register-gnr-of-nsw,victorian-dryland-salinity-assessment-2000-d01cac_ramsar_final-xls,victorian-dryland-salinity-assessment-2000-d02cac_fauna_final-xls,victorian-dryland-salinity-assessment-2000-d03cac_fauna_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc04cac_hydrol_final-xls,victorian-dryland-salinity-assessment-2000-dc05cac_wetland_final-xls,victorian-dryland-salinity-assessment-2000-dc06cac_util_final-xls,victorian-dryland-salinity-assessment-2000-dc07cac_road_final-xls,victorian-dryland-salinity-assessment-2000-dc08cac_towns_final-xls,victorian-dryland-salinity-assessment-2000-dc09cac_flora_final-xls,victorian-dryland-salinity-assessment-2000-dc10cac_flora_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc12cac_infrastructure-xls,victorian-dryland-salinity-assessment-2000-dc13cac_natural_envt-xls,victorian-dryland-salinity-assessment-2000-dc14cac_agriculture-xls,victorian-dryland-salinity-assessment-2000-dc16cac_agric_cost-xls,victorian-dryland-salinity-assessment-2000-dc17cac_shallow_wt-xls,victorian-dryland-salinity-assessment-2000-dc18cac_agric_cost_time-xls,victorian-dryland-salinity-assessment-2000-dc21cac_water_resources_new-xls,victorian-dryland-salinity-assessment-2000-dc22cac_risk-xls,licensed-broadcasting-transmitter-data,nsw-crime-data,recorded-crime-dataset-nsw,crime-statistics-in-nsw-by-month,2001-02-to-2007-08-local-government-survey-victoria,2009-green-light-report,annual-statistical-reports-fire-brigades-nsw-200304,annual-statistical-reports-fire-brigades-nsw-200405,annual-statistical-reports-fire-brigades-nsw-200506,annual-statistical-reports-fire-brigades-nsw-200607,arts-on-the-map,assets-and-liabilities-of-australian-located-operations,assets-of-australian-located-operations,assets-of-australian-located-operations-by-country,assets-of-financial-institutions,back-issues-of-monthly-banking-statistics,banks-assets,banks-consolidated-group-capital,banks-consolidated-group-impaired-assets,banks-consolidated-group-off-balance-sheet-business,banks-liabilities,building-societies-selected-assets-and-liabilities,byteback2842-locations-vic,cash-management-trusts,city-of-melbourne-street-furniture-database,community-services-nsw,consolidated-exposures-immediate-and-ultimate-risk-basis,consolidated-exposures-immediate-risk-basis-foreign-claims-by-country,consolidated-exposures-immediate-risk-basis-international-claims-by-country,consolidated-exposures-ultimate-risk-basis,consolidated-exposures-ultimate-risk-basis-foreign-claims-by-country,cosolidated-exposures-immediate-risk-basis,credit-unions-selected-assets-and-liabilities,daily-net-foreign-exchange-transactions,detox-your-home,education-national-assessment-program-literacy-and-numeracy-nsw,employment-data-by-nsw-regions,excise-beer-clearance-data-updated-each-month-beer-clearance-summary-data,finance-companies-and-general-financiers-selected-assets-and-liabilities,foreign-exchange-transactions-and-holdings-of-official-reserve-assets,half-yearly-life-insurance-bulletin-december-2010,health-behaviours-in-nsw,international-liabilities-by-country-of-the-australian-located-operations-of-banks-and-rfcs,liabilities-and-assets-monthly,liabilities-and-assets-weekly,liabilities-of-australian-located-operations,life-insurance-offices-statutory-funds,managed-funds,monetary-policy-changes,money-market-corporations-selected-assets-and-liabilities,monthly-airport-traffic-data-for-top-ten-airports-january-1985-to-december-2008,monthly-banking-statistics-april-2011,monthly-banking-statistics-june-2011,monthly-banking-statistics-may-2011,open-market-operations-2009-to-current,projected-households-vic-rvic-msd-2006-2056,projected-population-by-age-and-sex-vic-rvic-msd-2006-2056,public-unit-trust,quarterly-bank-performance-statistics,quarterly-general-insurance-performance-statistics-march-2011,quarterly-superannuation-performance-march-2011,recorded-crime-dataset-nsw,residential-land-bulletin,resourcesmart-retailers,resourcesmart-retailers-vic,road-fatalities-nsw,securitisation-vehicles,selected-asset-and-liabilities-of-the-private-non-financial-sectors,seperannuation-funds-outside-life-offices,solar-report-vic,towns-in-time-victoria,vif2008-projected-population-by-5-year-age-groups-and-sex-sla-lga-ssd-sd-2006-2026,vif2008-projected-population-totals-and-components-vic-rvic-msd-2006-2056,vif2008-projected-population-totals-sla-lga-ssd-sd-2006-2026,arts-festivals-victoria,arts-organisations-victoria,arts-spaces-and-places-victoria,ausgrid-average-electricity-use,collecting-institutions-victoria,indigenous-arts-organisations-victoria,latest-coastal-weather-observations-for-coolangatta-qld,top-10-fiction-books-brisbane-city-council".split(",") goodcsvdata = "afl-in-victoria,annual-budget-initiatives-by-suburb-brisbane-city-council,athletics-in-victoria-gfyl,bicycle-racks-mosman-municipal-council,boat-ramps-brisbane-city-council,brisbane-access-ratings-database,bus-stops-brisbane-city-council,cemeteries-brisbane-city-council,cfa-locations,citycycle-stations-brisbane-city-council,community-gardens-brisbane-city-council,community-halls-brisbane-city-council,cooking-classes-gfyl,court-locations-victoria,customer-service-centres-brisbane-city-council,dance-in-victoria-gfyl,disability-activity-gfyl,dog-parks-brisbane-city-council,ferry-terminals-brisbane-city-council,fishing-club-in-victoria-gfyl,fitness-centres-in-victoria-gfyl,gardens-reserves-gfyl,golf-courses-brisbane-city-council,gymnastics-in-victoria-gfyl,historic-cemeteries-brisbane-city-council,ice-skating-centres-gfyl,immunisation-clinics-brisbane-city-council,libraries-brisbane-city-council,licenced-venues-victoria,lifesaving-locations-victoria,loading-zones-brisbane-city-council,major-projects-victoria,markets-in-victoria,martial-arts-in-victoria-gfyl,melbourne-water-use-by-postcode,members-of-parliament-both-houses-nsw,members-of-the-legislative-assembly-nsw,members-of-the-legislative-council-nsw,mfb-locations-vic,ministers-of-the-nsw-parliament,mosman-local-government-area,mosman-rider-route,mosman-wwii-honour-roll,neighbourhood-houses-gfyl,news-feeds-mosman-municipal-council,off-street-car-parks-mosman-municipal-council,orienteering-clubs-gfyl,parking-meter-areas-brisbane-city-council,parks-and-reserves-mosman-municipal-council,parks-brisbane-city-council,personal-training-gfyl,picnic-areas-brisbane-city-council,playgrounds-brisbane-city-council,playgrounds-mosman-municipal-council,police-region-crime-statistics-victoria,police-service-area-crime-statistics-victoria,pony-clubs-in-victoria-gfyl,prison-locations-victoria,public-amenities-maintained-by-mosman-council,public-art-brisbane-city-council,public-internet-locations-vic,public-toilets-brisbane-city-council,racecourse-locations-victoria,recent-development-applications-mosman-municipal-council,recreation-groups-gfyl,recreational-fishing-spots,regional-business-centres-brisbane-city-council,reports-of-swooping-birds-mosman-municipal-council,restricted-parking-areas-brisbane-city-council,rollerskating-centres-in-victoria-gfyl,sailing-clubs-gfyl,school-locations-victoria,shadow-ministers-of-the-nsw-parliament,skate-parks-gfyl,sporting-clubs-and-organisations-gfyl,stakeboard-parks-brisbane-city-council,state-bodies-gfyl,street-names-brisbane-city-council,suburbs-and-adjoining-suburbs-brisbane-city-council,swimming-pools-brisbane-city-council,swimming-pools-gfyl,tennis-courts-brisbane-city-council,top-40-book-club-reads-brisbane-city-council,tracks-and-trails-gfyl,triathlon-clubs-gfyl,urban-water-restrictions-victoria,veterinary-services-in-mosman,victorian-microbreweries,volunteering-centres-services-and-groups-victoria,walking-groups-gfyl,ward-offices-brisbane-city-council,waste-collection-days-brisbane-city-council,waste-transfer-stations-brisbane-city-council,water-consumption-in-melbourne,water-sports-in-victoria-gfyl,wifi-hot-spots-brisbane-city-council,yoga-pilates-and-tai-chi-in-victoria-gfyl,2809cycling-in-new-south-wales-what-the-data-tells-us2809-and-related-data,act-barbecue-bbq-locations,act-tafe-locations,ausindustry-locations,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,australian-gas-light-company-maps,australian-gas-light-company-maps,australian-ports,australian-public-service-statistical-bulletin-2011-12,australian-public-service-statistical-bulletin-snapshot-at-december-31-2011,australian-public-service-statistical-bulletin-tables-0910,austrics-timetable-set,capital-works-call-tender-schedule,collection-item-usage-state-library-of-victoria,country-and-commodity-trade-data-spreadsheet,country-and-commodity-trade-data-spreadsheet-2,country-by-level-of-processing-trade-data-spreadsheet,crime-incident-type-and-frequency-by-capital-city-and-nationally,csiro-locations,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,department-of-finance-and-deregulation-office-locations,digitised-maps,diisr-division-locations-excluding-ausindustry-enterprise-connect-and-nmi,diisr-locations,diisr-portfolio-agency-locations-excluding-csiro,distance-to-legal-service-providers-from-disadvantaged-suburbs,enterprise-connect-locations,fire-insurance-maps-sydney-block-plans-1919-1940,fire-insurance-maps-sydney-block-plans-1919-1940,first-fleet-collection,first-fleet-collection,first-fleet-maps,first-fleet-maps,freedom-of-information-annual-estimated-costs-and-staff-time-statistical-data-2011-12,freedom-of-information-quarterly-request-and-review-statistical-data-2011-12,freedom-of-information-requests-estimated-costs-and-charges-collected-1982-83-to-2011-12,higher-education-course-completions,higher-education-enrolments,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,journey-planner-data-nt,library-catalogue-search-terms-state-library-of-victoria,location-of-act-schools,location-of-centrelink-offices,location-of-european-wasps-nests,location-of-lawyers-and-legal-service-providers-by-town,location-of-legal-assistance-service-providers,location-of-medicare-offices,location-of-medicare-offices,maps-of-the-southern-hemisphere-16th-18th-centuries,maps-of-the-southern-hemisphere-16th-18th-centuries,music-queensland,national-measurement-institute-locations,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,photographs-of-nsw-life-pre-1955,photographs-of-nsw-life-pre-1955,photographs-of-sydney-before-1885,photographs-of-sydney-before-1885,picture-queensland,plgr-28093-playgrounds-act,police-station-locations,queensland-public-libraries,rare-printed-books,rare-printed-books,real-estate-maps,regional-australia-funding-projects,sa-memory-state-library-of-south-australia,search-engine-terms-state-library-of-victoria,south-australian-photographs-state-library-of-south-australia,south-australian-sheet-music-state-library-of-south-australia,sydney-bond-store-maps-1894,sydney-bond-store-maps-1894,sydney-maps-1917,sydney-maps-1917,tafe-institute-locations-victoria,tafe-sa-campus-locations,tolt-public-toilets-act,victorian-public-library-branches-state-library-of-victoria,western-australia-public-library-network,world-war-one-photographs-by-frank-hurley,world-war-one-photographs-by-frank-hurley,citycat-timetables-brisbane-city-council,cityferry-timetables-brisbane-city-council,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,downstream-cost-calculator-model-and-data-for-199697-or-2001-prices,economics-of-australian-soil-conditions-199697-limiting-factor-or-relative-yield-min-of-ry_salt2000-,geographical-names-register-gnr-of-nsw,victorian-dryland-salinity-assessment-2000-d01cac_ramsar_final-xls,victorian-dryland-salinity-assessment-2000-d02cac_fauna_final-xls,victorian-dryland-salinity-assessment-2000-d03cac_fauna_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc04cac_hydrol_final-xls,victorian-dryland-salinity-assessment-2000-dc05cac_wetland_final-xls,victorian-dryland-salinity-assessment-2000-dc06cac_util_final-xls,victorian-dryland-salinity-assessment-2000-dc07cac_road_final-xls,victorian-dryland-salinity-assessment-2000-dc08cac_towns_final-xls,victorian-dryland-salinity-assessment-2000-dc09cac_flora_final-xls,victorian-dryland-salinity-assessment-2000-dc10cac_flora_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc12cac_infrastructure-xls,victorian-dryland-salinity-assessment-2000-dc13cac_natural_envt-xls,victorian-dryland-salinity-assessment-2000-dc14cac_agriculture-xls,victorian-dryland-salinity-assessment-2000-dc16cac_agric_cost-xls,victorian-dryland-salinity-assessment-2000-dc17cac_shallow_wt-xls,victorian-dryland-salinity-assessment-2000-dc18cac_agric_cost_time-xls,victorian-dryland-salinity-assessment-2000-dc21cac_water_resources_new-xls,victorian-dryland-salinity-assessment-2000-dc22cac_risk-xls,licensed-broadcasting-transmitter-data,nsw-crime-data,recorded-crime-dataset-nsw,crime-statistics-in-nsw-by-month,2001-02-to-2007-08-local-government-survey-victoria,2009-green-light-report,annual-statistical-reports-fire-brigades-nsw-200304,annual-statistical-reports-fire-brigades-nsw-200405,annual-statistical-reports-fire-brigades-nsw-200506,annual-statistical-reports-fire-brigades-nsw-200607,arts-on-the-map,assets-and-liabilities-of-australian-located-operations,assets-of-australian-located-operations,assets-of-australian-located-operations-by-country,assets-of-financial-institutions,back-issues-of-monthly-banking-statistics,banks-assets,banks-consolidated-group-capital,banks-consolidated-group-impaired-assets,banks-consolidated-group-off-balance-sheet-business,banks-liabilities,building-societies-selected-assets-and-liabilities,byteback2842-locations-vic,cash-management-trusts,city-of-melbourne-street-furniture-database,community-services-nsw,consolidated-exposures-immediate-and-ultimate-risk-basis,consolidated-exposures-immediate-risk-basis-foreign-claims-by-country,consolidated-exposures-immediate-risk-basis-international-claims-by-country,consolidated-exposures-ultimate-risk-basis,consolidated-exposures-ultimate-risk-basis-foreign-claims-by-country,cosolidated-exposures-immediate-risk-basis,credit-unions-selected-assets-and-liabilities,daily-net-foreign-exchange-transactions,detox-your-home,education-national-assessment-program-literacy-and-numeracy-nsw,employment-data-by-nsw-regions,excise-beer-clearance-data-updated-each-month-beer-clearance-summary-data,finance-companies-and-general-financiers-selected-assets-and-liabilities,foreign-exchange-transactions-and-holdings-of-official-reserve-assets,half-yearly-life-insurance-bulletin-december-2010,health-behaviours-in-nsw,international-liabilities-by-country-of-the-australian-located-operations-of-banks-and-rfcs,liabilities-and-assets-monthly,liabilities-and-assets-weekly,liabilities-of-australian-located-operations,life-insurance-offices-statutory-funds,managed-funds,monetary-policy-changes,money-market-corporations-selected-assets-and-liabilities,monthly-airport-traffic-data-for-top-ten-airports-january-1985-to-december-2008,monthly-banking-statistics-april-2011,monthly-banking-statistics-june-2011,monthly-banking-statistics-may-2011,open-market-operations-2009-to-current,projected-households-vic-rvic-msd-2006-2056,projected-population-by-age-and-sex-vic-rvic-msd-2006-2056,public-unit-trust,quarterly-bank-performance-statistics,quarterly-general-insurance-performance-statistics-march-2011,quarterly-superannuation-performance-march-2011,recorded-crime-dataset-nsw,residential-land-bulletin,resourcesmart-retailers,resourcesmart-retailers-vic,road-fatalities-nsw,securitisation-vehicles,selected-asset-and-liabilities-of-the-private-non-financial-sectors,seperannuation-funds-outside-life-offices,solar-report-vic,towns-in-time-victoria,vif2008-projected-population-by-5-year-age-groups-and-sex-sla-lga-ssd-sd-2006-2026,vif2008-projected-population-totals-and-components-vic-rvic-msd-2006-2056,vif2008-projected-population-totals-sla-lga-ssd-sd-2006-2026,arts-festivals-victoria,arts-organisations-victoria,arts-spaces-and-places-victoria,ausgrid-average-electricity-use,collecting-institutions-victoria,indigenous-arts-organisations-victoria,latest-coastal-weather-observations-for-coolangatta-qld,top-10-fiction-books-brisbane-city-council".split(",")
goodotherdata = "abc-local-stations,abc-local-stations,abc-local-stations,act-emergency-services-agency-esa-28093-current-incidents,act-government-news-and-events,act-government-summaries-of-cabinet-outcomes,act-magistrates-court-judgements,act-supreme-court-judgements,act-supreme-court-sentences,action-bus-service-gtfs-feed-act,actpla-latest-news,agricultural-commodities-for-199697-linked-to-profit-function-surfaces,agricultural-structure-classification,agricultural-structure-classification,all-vacant-act-government-jobs,annual-family-income-1996-1997-to-1998-1999-three-year-average,apvma-pubcris-dataset-for-registered-agricultural-and-veterinary-chemical-products-and-approved-acti,argus-newspaper-collection-of-photographs-state-library-of-victoria,assessment-of-terrestrial-biodiversity-2002-biodiversity-audit-data-entry-system-bades,assessment-of-terrestrial-biodiversity-2002-database,assisted-immigration-1848-1912-index,ausgrid-average-electricity-use,ausgrid-average-electricity-use-2011,ausindustry-locations,ausindustry-locations,austender-contract-notice-export,australian-broadband-guarantee,australian-broadband-guarantee,australian-data-access,australian-dryland-salinity-assessment-spatial-data-12500000-nlwra-2001,australian-dryland-salinity-assessment-spatial-data-12500000-nlwra-2001,australian-groundwater-flow-systems-national-land-and-water-resources-audit-january-2000,australian-groundwater-flow-systems-national-land-and-water-resources-audit-january-2000,australian-irrigation-areas-raster-version-1a-national-land-and-water-resources-audit,australian-irrigation-areas-raster-version-1a-national-land-and-water-resources-audit,australian-irrigation-areas-vector-version-1a-national-land-and-water-resources-audit,australian-irrigation-areas-vector-version-1a-national-land-and-water-resources-audit,australian-public-service-statistical-bulletin-2010-11,australian-water-resources-assessment-2000-database,australiana-index-state-library-of-victoria,available-water-capacity-for-australian-areas-of-intensive-agriculture-of-layer-1-a-horizon-top-soil,bicycle-racks-mosman-municipal-council,bikeways-briisbane-city-council,bikeways-briisbane-city-council,boreholes-in-the-murray-basin-southeastern-australia,boreholes-in-the-murray-basin-southeastern-australia,british-convict-transportation-registers,calculated-annual-and-monthly-potential-evaporation-mm,calculated-annual-and-monthly-potential-evaporation-mm,canberra-suburb-boundaries,catchment-and-subcatchments-grid,cemeteries-brisbane-city-council,cemeteries-brisbane-city-council,coal-fields-in-the-murray-basin-southeastern-australia,coal-fields-in-the-murray-basin-southeastern-australia,commonwealth-agencies,commonwealth-electoral-boundaries-archive-2009,commonwealth-electoral-boundaries-archive-2009,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-all-infrastructure-buildings-road-rail-a,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-all-infrastructure-buildings-road-rail-a,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-increase-to-local-infrastructure-based-o,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-all-infrastructure-buildings-road-rai,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-all-infrastructure-buildings-road-rai,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-all-infrastructure-buildings-road-rai,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-all-infrastructure-buildings-road-rai,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-the-general-infrastructure-component-,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-the-rail-component-of-infrastructure-,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-the-general-infrastructure-component-bui,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-the-road-component-of-infrastructure-bas,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-the-road-component-of-infrastructure-bas,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-to-the-bridge-component-of-infrastructur,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-to-the-bridge-component-of-infrastructur,country-by-level-of-processing-trade-data-spreadsheet-2,country-by-level-of-processing-trade-data-spreadsheet-2011-12,crime-incidents-data-2004-international-crime-victimisation-survey-icvs-australian-component,cropping-management-practices-1998-1999,csiro-locations,csiro-locations,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,cybersafety-outreach-program,cybersafety-outreach-program,data-source-for-polygonal-data-used-by-the-asris-project-in-generation-of-modelled-surfaces,department-of-finance-and-deregulation-office-locations,department-of-finance-and-deregulation-office-locations,depositional-path-length,digital-enterprise,digital-hubs,digitised-maps,diisr-division-locations-excluding-ausindustry-enterprise-connect-and-nmi,diisr-division-locations-excluding-ausindustry-enterprise-connect-and-nmi,diisr-locations,diisr-portfolio-agency-locations-excluding-csiro,diisr-portfolio-agency-locations-excluding-csiro,directory-gov-au-full-data-export,distance-to-ridges,economics-of-australian-soil-conditions-199697-factor-most-limiting-yield-aciditysodicitysalinity,economics-of-australian-soil-conditions-199697-gross-benefit-acidity-hayr,economics-of-australian-soil-conditions-199697-gross-benefit-of-the-limiting-factor-hayr,economics-of-australian-soil-conditions-199697-gross-benefit-salinity-hayr,economics-of-australian-soil-conditions-199697-gross-benefit-sodicity-hayr,economics-of-australian-soil-conditions-199697-impact-cost-of-salinity-2000-2020-hayr,economics-of-australian-soil-conditions-199697-relative-yield-from-acidity,economics-of-australian-soil-conditions-199697-relative-yield-from-salinity-in-2000,economics-of-australian-soil-conditions-199697-relative-yield-from-salinity-in-2020,economics-of-australian-soil-conditions-199697-relative-yield-from-sodicity,edd-media-releases,edd-news-and-events,egovernment-resource-centre-website-analytics,elevation-of-the-pre-tertiary-basement-in-the-murray-basin,elevation-of-the-pre-tertiary-basement-in-the-murray-basin,enterprise-connect-locations,enterprise-connect-locations,equivalent-fresh-water-head-difference-between-the-shallowest-and-deepest-aquifers,equivalent-fresh-water-head-difference-between-the-shallowest-and-deepest-aquifers,erosion-gully-density,erosion-path-length,estimated-proportion-of-farms-carrying-out-landcare-related-work-1998-1999,estimated-value-of-agricultural-operations-evao-1996-1997,farm-equity-ratio-1996-1997-to-1998-1999-three-year-average,farm-family-cash-income-1196-1997-to-1998-1999-three-year-average,farmer-population-1996,farms-with-significant-degradation-problems-irrigation-salinity-1998-1999,farms-with-significant-degradation-problems-irrigation-salinity-1998-1999-2,farms-with-significant-degradation-problems-soil-acidity-1998-1999,forests-of-australia-2003,freedom-of-information-foi-summaries,geology-lithology-12-500-000-scale,glenorchy-city-council-building-footprints,glenorchy-city-council-building-footprints,glenorchy-city-council-building-footprints,glenorchy-city-council-kerbs,glenorchy-city-council-kerbs,glenorchy-city-council-kerbs,glenorchy-city-council-stormwater-pipes,glenorchy-city-council-stormwater-pipes,glenorchy-city-council-stormwater-pipes,glenorchy-city-council-stormwater-pits,glenorchy-city-council-stormwater-pits,glenorchy-city-council-stormwater-pits,groundwater-sdl-resource-units,groundwater-sdl-resource-units,groundwater-sdl-resource-units,higher-qualifications-of-farmers-and-farm-managers-1996,historical-australian-government-contract-data,historical-australian-government-contract-data,hydrologic-indicator-sites,hydrologic-indicator-sites,immigration-land-orders-1861-1874,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-biota-condition-sub-in,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-catchment-condition-in,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-feral-animal-density,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-human-population-densi,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-impoundment-density,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-industrial-point-sourc,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-intensive-agricultural,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-land-condition-sub-ind,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-native-vegetation-frag,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-nutrient-point-source-,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-pesticide-hazard,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-predicted-2050-salinit,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-protected-areas,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-rivers-in-acidificatio,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-rivers-in-salt-hazard,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-rivers-through-forests,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-soil-acidification-haz,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-soil-degradation-hazar,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-suspended-sediment-loa,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-weed-density,integrated-vegetation-cover-2003-version-1,john-t-collins-collection-state-library-of-victoria,journal-of-the-h-m-s-endeavour-1768-1771,journey-planner-data-act,krantz-sheldon-architectural-images,land-use-of-australia-version-3-28093-20012002,lands-surveys-historic-map-series-western-australia,latest-coastal-weather-observations-for-coolangatta-qld,launceston-city-council-addresses,launceston-city-council-building-footprints,launceston-city-council-contours,launceston-city-council-detail-survey-drawing-file,launceston-city-council-drainage,launceston-city-council-fences,launceston-city-council-pavement,launceston-city-council-railway,launceston-city-council-roads,libraries-act-announcements,licensed-broadcasting-transmitter-data,linc-tasmania,look-up-table-of-auslig-river-basins-of-australia-1997,major-water-resources-infrastructure-part-of-the-australian-water-resources-assessment-2000-database,mean-annual-concentration-of-mineral-nitrogen-in-soil-water-mgn-kgh20-in-the-pre-1788-scenario,mean-annual-concentration-of-mineral-nitrogen-in-soil-water-mgn-kgh20-in-the-pre-1788-scenario,mean-annual-concentration-of-mineral-nitrogen-in-soil-water-mgn-kgh20-in-the-present-day-scenario,mean-annual-concentration-of-mineral-nitrogen-in-soil-water-mgn-kgh20-in-the-present-day-scenario,mean-annual-deep-drainage-mm-y-in-the-pre-1788-scenario,mean-annual-deep-drainage-mm-y-in-the-pre-1788-scenario,mean-annual-deep-drainage-mm-y-in-the-present-day-scenario,mean-annual-deep-drainage-mm-y-in-the-present-day-scenario,mean-annual-transpiration-from-the-plant-canopy-for-the-pre-1788-scenario,mean-annual-transpiration-from-the-plant-canopy-for-the-pre-1788-scenario,mean-annual-transpiration-from-the-plant-canopy-for-the-present-day-scenario,mean-annual-transpiration-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-april-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-april-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-august-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-august-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-august-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-august-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-december-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-december-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-december-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-december-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-february-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-february-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-january-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-january-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-january-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-january-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-july-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-july-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-july-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-july-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-june-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-june-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-june-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-june-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-present-day-scenario-2,mean-transpiration-in-march-from-the-plant-canopy-for-the-present-day-scenario-2,mean-transpiration-in-may-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-may-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-may-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-may-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-november-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-november-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-november-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-november-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-october-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-october-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-october-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-october-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-september-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-september-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-september-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-september-from-the-plant-canopy-for-the-present-day-scenario,mildenhall-photographs-of-early-canberra,mobility-map-brisbane-city,mobility-map-mt-coot-tha,mosman-local-government-area,mosman-rider-route,mosman-wwii-honour-roll,mosman-wwii-honour-roll,murray-darling-basin-water-resource-plan-areas-groundwater,murray-darling-basin-water-resource-plan-areas-groundwater,murray-darling-basin-water-resource-plan-areas-surface-water,murray-darling-basin-water-resource-plan-areas-surface-water,music-queensland,national-broadband-network,national-broadband-network,national-broadband-network-2011-10,national-broadband-network-2011-10,national-broadband-network-2011-12,national-broadband-network-2011-12,national-broadband-network-2012,national-broadband-network-28093-august-2011,national-broadband-network-28093-august-2011,national-broadband-network-28093-july-2011,national-broadband-network-28093-july-2011,national-broadband-network-february-2012,national-broadband-network-february-2012,national-broadband-network-september-2011,national-broadband-network-september-2011,national-library-of-australia-sheet-music-collection,national-measurement-institute-locations,national-parks-and-asset-locations-south-australia,national-public-toilet-map,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,nsw-newspapers-catalogue-data,nsw-rural-fire-service-current-incidents,nsw-rural-fire-service-major-updates,off-street-car-parks-mosman-municipal-council,open-database-brisbane-city-council,ost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-the-rail-component-of-infrastructure-b,parking-areas-brisbane-city-council,parking-areas-brisbane-city-council,parks-and-reserves-mosman-municipal-council,parks-brisbane-city-council,parks-brisbane-city-council,picture-australia-metadata,picture-queensland,picture-queensland,playgrounds-mosman-municipal-council,police-station-locations,police-station-locations,port-phillip-papers-state-library-of-victoria,precis-forecast-national,precis-forecast-national,precis-forecast-new-south-wales,precis-forecast-new-south-wales,precis-forecast-new-south-wales,precis-forecast-northern-territory,precis-forecast-northern-territory,precis-forecast-queensland,precis-forecast-queensland,precis-forecast-south-australia,precis-forecast-south-australia,precis-forecast-south-australia,precis-forecast-tasmania,precis-forecast-tasmania,precis-forecast-tasmania,precis-forecast-victoria,precis-forecast-victoria,precis-forecast-victoria,precis-forecast-western-australia,precis-forecast-western-australia,public-amenities-maintained-by-mosman-council,radio-and-television-broadcasting-stations-book-internet-edition,real-estate-maps,recent-earthquakes,regional-development-australia,regional-development-australia-2011-september-2011,regional-development-australia-may-2012,reports-of-swooping-birds-mosman-municipal-council,sentinel-hotspots,sentinel-hotspots,slq-catalogue-searches,slq-catalogue-searches,slv-rural-water,slv-shipping,slwa-digital-photographic-collection,south-australian-boat-ramp-locator,south-australian-road-crash-statistics,state-library-of-victoria-online-image-collection,state-library-of-victoria-online-image-collection-inc-high-res,state-of-the-service-report-2010-11-australian-public-service-employee-survey-results,state-of-the-service-report-2010-11-australian-public-service-employee-survey-results,statistical-local-areas-1996-for-agricultural-structure-classification,surface-water-gauging-stations-part-of-the-australian-water-resources-assessment-2000-database,surface-water-gauging-stations-part-of-the-australian-water-resources-assessment-2000-database,surface-water-sdl-resource-units,surface-water-sdl-resource-units,tasmanian-herbarium,tasmanian-museum-and-art-gallery-faunal-collection".split(",") goodotherdata = "abc-local-stations,abc-local-stations,abc-local-stations,act-emergency-services-agency-esa-28093-current-incidents,act-government-news-and-events,act-government-summaries-of-cabinet-outcomes,act-magistrates-court-judgements,act-supreme-court-judgements,act-supreme-court-sentences,action-bus-service-gtfs-feed-act,actpla-latest-news,agricultural-commodities-for-199697-linked-to-profit-function-surfaces,agricultural-structure-classification,agricultural-structure-classification,all-vacant-act-government-jobs,annual-family-income-1996-1997-to-1998-1999-three-year-average,apvma-pubcris-dataset-for-registered-agricultural-and-veterinary-chemical-products-and-approved-acti,argus-newspaper-collection-of-photographs-state-library-of-victoria,assessment-of-terrestrial-biodiversity-2002-biodiversity-audit-data-entry-system-bades,assessment-of-terrestrial-biodiversity-2002-database,assisted-immigration-1848-1912-index,ausgrid-average-electricity-use,ausgrid-average-electricity-use-2011,ausindustry-locations,ausindustry-locations,austender-contract-notice-export,australian-broadband-guarantee,australian-broadband-guarantee,australian-data-access,australian-dryland-salinity-assessment-spatial-data-12500000-nlwra-2001,australian-dryland-salinity-assessment-spatial-data-12500000-nlwra-2001,australian-groundwater-flow-systems-national-land-and-water-resources-audit-january-2000,australian-groundwater-flow-systems-national-land-and-water-resources-audit-january-2000,australian-irrigation-areas-raster-version-1a-national-land-and-water-resources-audit,australian-irrigation-areas-raster-version-1a-national-land-and-water-resources-audit,australian-irrigation-areas-vector-version-1a-national-land-and-water-resources-audit,australian-irrigation-areas-vector-version-1a-national-land-and-water-resources-audit,australian-public-service-statistical-bulletin-2010-11,australian-water-resources-assessment-2000-database,australiana-index-state-library-of-victoria,available-water-capacity-for-australian-areas-of-intensive-agriculture-of-layer-1-a-horizon-top-soil,bicycle-racks-mosman-municipal-council,bikeways-briisbane-city-council,bikeways-briisbane-city-council,boreholes-in-the-murray-basin-southeastern-australia,boreholes-in-the-murray-basin-southeastern-australia,british-convict-transportation-registers,calculated-annual-and-monthly-potential-evaporation-mm,calculated-annual-and-monthly-potential-evaporation-mm,canberra-suburb-boundaries,catchment-and-subcatchments-grid,cemeteries-brisbane-city-council,cemeteries-brisbane-city-council,coal-fields-in-the-murray-basin-southeastern-australia,coal-fields-in-the-murray-basin-southeastern-australia,commonwealth-agencies,commonwealth-electoral-boundaries-archive-2009,commonwealth-electoral-boundaries-archive-2009,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-all-infrastructure-buildings-road-rail-a,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-all-infrastructure-buildings-road-rail-a,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-increase-to-local-infrastructure-based-o,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-all-infrastructure-buildings-road-rai,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-all-infrastructure-buildings-road-rai,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-all-infrastructure-buildings-road-rai,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-all-infrastructure-buildings-road-rai,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-the-general-infrastructure-component-,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-the-rail-component-of-infrastructure-,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-the-general-infrastructure-component-bui,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-the-road-component-of-infrastructure-bas,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-the-road-component-of-infrastructure-bas,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-to-the-bridge-component-of-infrastructur,cost-of-salinity-to-local-infrastructure-1996-97-total-cost-to-the-bridge-component-of-infrastructur,country-by-level-of-processing-trade-data-spreadsheet-2,country-by-level-of-processing-trade-data-spreadsheet-2011-12,crime-incidents-data-2004-international-crime-victimisation-survey-icvs-australian-component,cropping-management-practices-1998-1999,csiro-locations,csiro-locations,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,current-and-future-road-reports-traffic-restrictions-in-south-australia,cybersafety-outreach-program,cybersafety-outreach-program,data-source-for-polygonal-data-used-by-the-asris-project-in-generation-of-modelled-surfaces,department-of-finance-and-deregulation-office-locations,department-of-finance-and-deregulation-office-locations,depositional-path-length,digital-enterprise,digital-hubs,digitised-maps,diisr-division-locations-excluding-ausindustry-enterprise-connect-and-nmi,diisr-division-locations-excluding-ausindustry-enterprise-connect-and-nmi,diisr-locations,diisr-portfolio-agency-locations-excluding-csiro,diisr-portfolio-agency-locations-excluding-csiro,directory-gov-au-full-data-export,distance-to-ridges,economics-of-australian-soil-conditions-199697-factor-most-limiting-yield-aciditysodicitysalinity,economics-of-australian-soil-conditions-199697-gross-benefit-acidity-hayr,economics-of-australian-soil-conditions-199697-gross-benefit-of-the-limiting-factor-hayr,economics-of-australian-soil-conditions-199697-gross-benefit-salinity-hayr,economics-of-australian-soil-conditions-199697-gross-benefit-sodicity-hayr,economics-of-australian-soil-conditions-199697-impact-cost-of-salinity-2000-2020-hayr,economics-of-australian-soil-conditions-199697-relative-yield-from-acidity,economics-of-australian-soil-conditions-199697-relative-yield-from-salinity-in-2000,economics-of-australian-soil-conditions-199697-relative-yield-from-salinity-in-2020,economics-of-australian-soil-conditions-199697-relative-yield-from-sodicity,edd-media-releases,edd-news-and-events,egovernment-resource-centre-website-analytics,elevation-of-the-pre-tertiary-basement-in-the-murray-basin,elevation-of-the-pre-tertiary-basement-in-the-murray-basin,enterprise-connect-locations,enterprise-connect-locations,equivalent-fresh-water-head-difference-between-the-shallowest-and-deepest-aquifers,equivalent-fresh-water-head-difference-between-the-shallowest-and-deepest-aquifers,erosion-gully-density,erosion-path-length,estimated-proportion-of-farms-carrying-out-landcare-related-work-1998-1999,estimated-value-of-agricultural-operations-evao-1996-1997,farm-equity-ratio-1996-1997-to-1998-1999-three-year-average,farm-family-cash-income-1196-1997-to-1998-1999-three-year-average,farmer-population-1996,farms-with-significant-degradation-problems-irrigation-salinity-1998-1999,farms-with-significant-degradation-problems-irrigation-salinity-1998-1999-2,farms-with-significant-degradation-problems-soil-acidity-1998-1999,forests-of-australia-2003,freedom-of-information-foi-summaries,geology-lithology-12-500-000-scale,glenorchy-city-council-building-footprints,glenorchy-city-council-building-footprints,glenorchy-city-council-building-footprints,glenorchy-city-council-kerbs,glenorchy-city-council-kerbs,glenorchy-city-council-kerbs,glenorchy-city-council-stormwater-pipes,glenorchy-city-council-stormwater-pipes,glenorchy-city-council-stormwater-pipes,glenorchy-city-council-stormwater-pits,glenorchy-city-council-stormwater-pits,glenorchy-city-council-stormwater-pits,groundwater-sdl-resource-units,groundwater-sdl-resource-units,groundwater-sdl-resource-units,higher-qualifications-of-farmers-and-farm-managers-1996,historical-australian-government-contract-data,historical-australian-government-contract-data,hydrologic-indicator-sites,hydrologic-indicator-sites,immigration-land-orders-1861-1874,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-biota-condition-sub-in,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-catchment-condition-in,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-feral-animal-density,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-human-population-densi,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-impoundment-density,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-industrial-point-sourc,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-intensive-agricultural,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-land-condition-sub-ind,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-native-vegetation-frag,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-nutrient-point-source-,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-pesticide-hazard,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-predicted-2050-salinit,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-protected-areas,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-rivers-in-acidificatio,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-rivers-in-salt-hazard,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-rivers-through-forests,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-soil-acidification-haz,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-soil-degradation-hazar,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-suspended-sediment-loa,indicators-of-catchment-condition-in-the-intensive-land-use-zone-of-australia-weed-density,integrated-vegetation-cover-2003-version-1,john-t-collins-collection-state-library-of-victoria,journal-of-the-h-m-s-endeavour-1768-1771,journey-planner-data-act,krantz-sheldon-architectural-images,land-use-of-australia-version-3-28093-20012002,lands-surveys-historic-map-series-western-australia,latest-coastal-weather-observations-for-coolangatta-qld,launceston-city-council-addresses,launceston-city-council-building-footprints,launceston-city-council-contours,launceston-city-council-detail-survey-drawing-file,launceston-city-council-drainage,launceston-city-council-fences,launceston-city-council-pavement,launceston-city-council-railway,launceston-city-council-roads,libraries-act-announcements,licensed-broadcasting-transmitter-data,linc-tasmania,look-up-table-of-auslig-river-basins-of-australia-1997,major-water-resources-infrastructure-part-of-the-australian-water-resources-assessment-2000-database,mean-annual-concentration-of-mineral-nitrogen-in-soil-water-mgn-kgh20-in-the-pre-1788-scenario,mean-annual-concentration-of-mineral-nitrogen-in-soil-water-mgn-kgh20-in-the-pre-1788-scenario,mean-annual-concentration-of-mineral-nitrogen-in-soil-water-mgn-kgh20-in-the-present-day-scenario,mean-annual-concentration-of-mineral-nitrogen-in-soil-water-mgn-kgh20-in-the-present-day-scenario,mean-annual-deep-drainage-mm-y-in-the-pre-1788-scenario,mean-annual-deep-drainage-mm-y-in-the-pre-1788-scenario,mean-annual-deep-drainage-mm-y-in-the-present-day-scenario,mean-annual-deep-drainage-mm-y-in-the-present-day-scenario,mean-annual-transpiration-from-the-plant-canopy-for-the-pre-1788-scenario,mean-annual-transpiration-from-the-plant-canopy-for-the-pre-1788-scenario,mean-annual-transpiration-from-the-plant-canopy-for-the-present-day-scenario,mean-annual-transpiration-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-april-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-april-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-august-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-august-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-august-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-august-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-december-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-december-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-december-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-december-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-february-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-february-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-january-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-january-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-january-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-january-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-july-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-july-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-july-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-july-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-june-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-june-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-june-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-june-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-march-from-the-plant-canopy-for-the-present-day-scenario-2,mean-transpiration-in-march-from-the-plant-canopy-for-the-present-day-scenario-2,mean-transpiration-in-may-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-may-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-may-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-may-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-november-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-november-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-november-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-november-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-october-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-october-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-october-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-october-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-september-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-september-from-the-plant-canopy-for-the-pre-1788-scenario,mean-transpiration-in-september-from-the-plant-canopy-for-the-present-day-scenario,mean-transpiration-in-september-from-the-plant-canopy-for-the-present-day-scenario,mildenhall-photographs-of-early-canberra,mobility-map-brisbane-city,mobility-map-mt-coot-tha,mosman-local-government-area,mosman-rider-route,mosman-wwii-honour-roll,mosman-wwii-honour-roll,murray-darling-basin-water-resource-plan-areas-groundwater,murray-darling-basin-water-resource-plan-areas-groundwater,murray-darling-basin-water-resource-plan-areas-surface-water,murray-darling-basin-water-resource-plan-areas-surface-water,music-queensland,national-broadband-network,national-broadband-network,national-broadband-network-2011-10,national-broadband-network-2011-10,national-broadband-network-2011-12,national-broadband-network-2011-12,national-broadband-network-2012,national-broadband-network-28093-august-2011,national-broadband-network-28093-august-2011,national-broadband-network-28093-july-2011,national-broadband-network-28093-july-2011,national-broadband-network-february-2012,national-broadband-network-february-2012,national-broadband-network-september-2011,national-broadband-network-september-2011,national-library-of-australia-sheet-music-collection,national-measurement-institute-locations,national-parks-and-asset-locations-south-australia,national-public-toilet-map,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2000,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2020,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,new-south-wales-dryland-salinity-assessment-2000-assessmet-of-dryland-salinity-extent-2050,nsw-newspapers-catalogue-data,nsw-rural-fire-service-current-incidents,nsw-rural-fire-service-major-updates,off-street-car-parks-mosman-municipal-council,open-database-brisbane-city-council,ost-of-salinity-to-local-infrastructure-1996-97-total-cost-of-the-rail-component-of-infrastructure-b,parking-areas-brisbane-city-council,parking-areas-brisbane-city-council,parks-and-reserves-mosman-municipal-council,parks-brisbane-city-council,parks-brisbane-city-council,picture-australia-metadata,picture-queensland,picture-queensland,playgrounds-mosman-municipal-council,police-station-locations,police-station-locations,port-phillip-papers-state-library-of-victoria,precis-forecast-national,precis-forecast-national,precis-forecast-new-south-wales,precis-forecast-new-south-wales,precis-forecast-new-south-wales,precis-forecast-northern-territory,precis-forecast-northern-territory,precis-forecast-queensland,precis-forecast-queensland,precis-forecast-south-australia,precis-forecast-south-australia,precis-forecast-south-australia,precis-forecast-tasmania,precis-forecast-tasmania,precis-forecast-tasmania,precis-forecast-victoria,precis-forecast-victoria,precis-forecast-victoria,precis-forecast-western-australia,precis-forecast-western-australia,public-amenities-maintained-by-mosman-council,radio-and-television-broadcasting-stations-book-internet-edition,real-estate-maps,recent-earthquakes,regional-development-australia,regional-development-australia-2011-september-2011,regional-development-australia-may-2012,reports-of-swooping-birds-mosman-municipal-council,sentinel-hotspots,sentinel-hotspots,slq-catalogue-searches,slq-catalogue-searches,slv-rural-water,slv-shipping,slwa-digital-photographic-collection,south-australian-boat-ramp-locator,south-australian-road-crash-statistics,state-library-of-victoria-online-image-collection,state-library-of-victoria-online-image-collection-inc-high-res,state-of-the-service-report-2010-11-australian-public-service-employee-survey-results,state-of-the-service-report-2010-11-australian-public-service-employee-survey-results,statistical-local-areas-1996-for-agricultural-structure-classification,surface-water-gauging-stations-part-of-the-australian-water-resources-assessment-2000-database,surface-water-gauging-stations-part-of-the-australian-water-resources-assessment-2000-database,surface-water-sdl-resource-units,surface-water-sdl-resource-units,tasmanian-herbarium,tasmanian-museum-and-art-gallery-faunal-collection".split(",")
   
   
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
orgs_list = [] orgs_list = []
orgs_ids = {} orgs_ids = {}
for doc in docsdb.view('app/datasets'): for doc in docsdb.view('app/datasets'):
print " --- " print " --- "
print doc.id print doc.id
   
if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
   
   
# Collect the package metadata. # Collect the package metadata.
pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_', pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]); doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
print pkg_name print pkg_name
if pkg_name != "": if pkg_name != "" :
   
#add to or create organization using direct API #add to or create organization using direct API
agency = doc.value['metadata']["Agency"] agency = doc.value['metadata']["Agency"]
if agency == "APS": if agency == "APS":
agency = "Australian Public Service Commission" agency = "Australian Public Service Commission"
if agency == "Department of Broadband, Communications and the Digital Ecomomy": if agency == "Department of Broadband, Communications and the Digital Ecomomy":
agency = "Department of Broadband, Communications and the Digital Economy" agency = "Department of Broadband, Communications and the Digital Economy"
if agency == "Shared Services, Treasury Directorate": if agency == "Shared Services, Treasury Directorate":
agency = "Shared Services Procurement, Treasury Directorate" agency = "Shared Services Procurement, Treasury Directorate"
if agency == "Treasury - Shared Services": if agency == "Treasury - Shared Services":
agency = "Shared Services Procurement, Treasury Directorate" agency = "Shared Services Procurement, Treasury Directorate"
if agency == "Territory and Municipal Services (TAMS)": if agency == "Territory and Municipal Services (TAMS)":
agency = "Territory and Municipal Services Directorate" agency = "Territory and Municipal Services Directorate"
if agency == "State Library of NSW": if agency == "State Library of NSW":
agency = "State Library of New South Wales" agency = "State Library of New South Wales"
org_name = name_munge(agency[:100]) org_name = name_munge(agency[:100])
if org_name not in orgs_list: if org_name not in orgs_list:
orgs_list = ckandirect.action.organization_list()['result'] orgs_list = ckandirect.action.organization_list()['result']
#print orgs_list #print orgs_list
if org_name not in orgs_list: if org_name not in orgs_list:
try: try:
print "org not found, creating " + org_name print "org not found, creating " + org_name
ckandirect.action.organization_create(name=org_name, title=agency, ckandirect.action.organization_create(name=org_name, title=agency,
description=agency) description=agency)
orgs_list.append(org_name) orgs_list.append(org_name)
except ckanapi.ValidationError, e: except ckanapi.ValidationError, e:
print e print e
raise LoaderError('Unexpected status') raise LoaderError('Unexpected status')
else: else:
print "org found, adding dataset to " + org_name print "org found, adding dataset to " + org_name
   
# cache org names -> id mapping # cache org names -> id mapping
if org_name not in orgs_ids: if org_name not in orgs_ids:
org = ckandirect.action.organization_show(id=org_name) org = ckandirect.action.organization_show(id=org_name)
orgs_ids[org_name] = org["result"]["id"] orgs_ids[org_name] = org["result"]["id"]
org_id = orgs_ids[org_name] org_id = orgs_ids[org_name]
print "org id is " + org_id print "org id is " + org_id
tags = [] tags = []
creator = doc.value['metadata']["DCTERMS.Creator"] creator = doc.value['metadata']["DCTERMS.Creator"]
if doc.value['agencyID'] == "AGIMO": if doc.value['agencyID'] == "AGIMO":
if len(doc.value['metadata']["Keywords / Tags"]) > 0: if len(doc.value['metadata']["Keywords / Tags"]) > 0:
if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
tags = tags + doc.value['metadata']["Keywords / Tags"] tags = tags + doc.value['metadata']["Keywords / Tags"]
else: else:
tags = tags + [doc.value['metadata']["Keywords / Tags"]] tags = tags + [doc.value['metadata']["Keywords / Tags"]]
   
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
#print tags #print tags
extras = [] extras = []
   
for extra_key in doc.value['metadata'].keys(): for extra_key in doc.value['metadata'].keys():
if extra_key not in ["Description", "Content-Language", "DCTERMS.Description", if extra_key not in ["Description", "Content-Language", "DCTERMS.Description",
"Keywords / Tags", "Keywords / Tags",
"data.gov.au Category", "Download", "Permalink", "DCTERMS.Identifier"]: "data.gov.au Category", "Download", "Permalink", "DCTERMS.Identifier"]:
if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "": if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "":
extras.append([extra_key, doc.value['metadata'][extra_key]]) extras.append([extra_key, doc.value['metadata'][extra_key]])
   
package_entity = { package_entity = {
'name': pkg_name, 'name': pkg_name,
'title': doc.value['metadata']['DCTERMS.Title'], 'title': doc.value['metadata']['DCTERMS.Title'],
'url': doc.value['metadata']['DCTERMS.Source.URI'], 'url': doc.value['metadata']['DCTERMS.Source.URI'],
'tags': tags, #tags are mandatory? 'tags': tags, #tags are mandatory?
'author': creator, 'author': creator,
'maintainer': creator, 'maintainer': creator,
'license_id': get_license_id(doc.value['metadata']['DCTERMS.License']), 'license_id': get_license_id(doc.value['metadata']['DCTERMS.License']),
'notes': html2text.html2text(doc.value['metadata']['Description']).replace('AC/a!a','-').replace('AC/a!aC/',"'").replace("AC/a!E",":")replace("A "," "), 'notes': html2text.html2text(doc.value['metadata']['Description']).replace('AC/a!a','-').replace('AC/a!aC/',"'").replace("AC/a!E",":").replace("A "," "),
'owner_org': org_id, 'owner_org': org_id,
'extras': extras, 'extras': extras,
'private': (pkg_name not in goodcsvdata and pkg_name not in goodotherdata) 'private': (pkg_name not in goodcsvdata and pkg_name not in goodotherdata)
} }
   
try: try:
#print package_entity #print package_entity
ckan.package_register_post(package_entity) ckan.package_register_post(package_entity)
except CkanApiError, e: except CkanApiError, e:
if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}": if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
print "package already exists" print "package already exists"
else: else:
print ckan.last_message print ckan.last_message
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
pkg = ckan.package_entity_get(pkg_name) pkg = ckan.package_entity_get(pkg_name)
   
   
# add resources (downloadable data files) # add resources (downloadable data files)
if 'Download' in doc.value['metadata'].keys(): if 'Download' in doc.value['metadata'].keys():
try: try:
   
resources = pkg.get('resources', []) resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']): if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']: for resource in doc.value['metadata']['Download']:
   
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
# (KML/KMZ) / (Shapefile) /(Other) # (KML/KMZ) / (Shapefile) /(Other)
format = "plain" format = "plain"
if resource['format'] == '(XML)': if resource['format'] == '(XML)':
format = 'xml' format = 'xml'
if resource['format'] == '(CSV/XLS)': if resource['format'] == '(CSV/XLS)':
format = 'csv' format = 'csv'
if resource['format'] == '(Shapefile)': if resource['format'] == '(Shapefile)':
format = 'shp' format = 'shp'
if resource['format'] == '(KML/KMZ)': if resource['format'] == '(KML/KMZ)':
format = 'kml' format = 'kml'
name = resource['href'] name = resource['href']
if 'name' in resource.keys(): if 'name' in resource.keys():
name = resource['name'] name = resource['name']
print resource print resource
add_package_resource_cachedurl(ckan, pkg_name, url_fix(resource['href']), name, add_package_resource_cachedurl(ckan, pkg_name, url_fix(resource['href']), name,
format, get_license_id(doc.value['metadata']['DCTERMS.License']), format, get_license_id(doc.value['metadata']['DCTERMS.License']),
human2bytes(resource.get('size', '0B'))) human2bytes(resource.get('size', '0B')))
else: else:
print "resources already exist" print "resources already exist"
except CkanApiError, e: except CkanApiError, e:
if ckan.last_status == 404: if ckan.last_status == 404:
print "parent dataset does not exist" print "parent dataset does not exist"
else: else:
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args)) ckan.last_status, pkg_name, e.args))
   
import sys import sys
import os import os
   
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from time import mktime from time import mktime
import feedparser import feedparser
import abc import abc
import unicodedata import unicodedata
import re import re
import dateutil import dateutil
from dateutil.parser import * from dateutil.parser import *
from datetime import * from datetime import *
import codecs import codecs
   
import difflib import difflib
   
from StringIO import StringIO from StringIO import StringIO
   
from pdfminer.pdfparser import PDFDocument, PDFParser from pdfminer.pdfparser import PDFDocument, PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
from pdfminer.pdfdevice import PDFDevice, TagExtractor from pdfminer.pdfdevice import PDFDevice, TagExtractor
from pdfminer.converter import TextConverter from pdfminer.converter import TextConverter
from pdfminer.cmapdb import CMapDB from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams from pdfminer.layout import LAParams
   
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
   
def remove_control_chars(self, input): def remove_control_chars(self, input):
return "".join([i for i in input if ord(i) in range(32, 127)]) return "".join([i for i in input if ord(i) in range(32, 127)])
   
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
if self.agencyID is None: if self.agencyID is None:
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
return self.agencyID return self.agencyID
   
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
if self.disclogURL is None: if self.disclogURL is None:
agency = scrape.agencydb.get(self.getAgencyID()) agency = scrape.agencydb.get(self.getAgencyID())
self.disclogURL = agency['FOIDocumentsURL'] self.disclogURL = agency['FOIDocumentsURL']
return self.disclogURL return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def doScrape(self): def doScrape(self):
""" do the scraping """ """ do the scraping """
return return
   
   
class GenericHTMLDisclogScraper(GenericDisclogScraper): class GenericHTMLDisclogScraper(GenericDisclogScraper):
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
content = rcontent content = rcontent
dochash = scrape.mkhash(content) dochash = scrape.mkhash(content)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
if last_attach != None: if last_attach != None:
html_diff = difflib.HtmlDiff() html_diff = difflib.HtmlDiff()
diff = html_diff.make_table(last_attach.read().split('\n'), diff = html_diff.make_table(last_attach.read().split('\n'),
content.split('\n')) content.split('\n'))
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "date": edate, "title": "Disclosure Log Updated",
"description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)} "description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericPDFDisclogScraper(GenericDisclogScraper): class GenericPDFDisclogScraper(GenericDisclogScraper):
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
laparams = LAParams() laparams = LAParams()
rsrcmgr = PDFResourceManager(caching=True) rsrcmgr = PDFResourceManager(caching=True)
outfp = StringIO() outfp = StringIO()
device = TextConverter(rsrcmgr, outfp, codec='utf-8', device = TextConverter(rsrcmgr, outfp, codec='utf-8',
laparams=laparams) laparams=laparams)
fp = StringIO() fp = StringIO()
fp.write(content) fp.write(content)
   
process_pdf(rsrcmgr, device, fp, set(), caching=True, process_pdf(rsrcmgr, device, fp, set(), caching=True,
check_extractable=True) check_extractable=True)
description = outfp.getvalue() description = outfp.getvalue()
fp.close() fp.close()
device.close() device.close()
outfp.close() outfp.close()
dochash = scrape.mkhash(description) dochash = scrape.mkhash(description)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description)} "date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description)}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericDOCXDisclogScraper(GenericDisclogScraper): class GenericDOCXDisclogScraper(GenericDisclogScraper):
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
, self.getURL(), "foidocuments", self.getAgencyID()) , self.getURL(), "foidocuments", self.getAgencyID())
mydoc = zipfile.ZipFile(file) mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml') xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent) document = etree.fromstring(xmlcontent)
## Fetch all the text out of the document we just created ## Fetch all the text out of the document we just created
paratextlist = getdocumenttext(document) paratextlist = getdocumenttext(document)
# Make explicit unicode version # Make explicit unicode version
newparatextlist = [] newparatextlist = []
for paratext in paratextlist: for paratext in paratextlist:
newparatextlist.append(paratext.encode("utf-8")) newparatextlist.append(paratext.encode("utf-8"))
## Print our documnts test with two newlines under each paragraph ## Print our documnts test with two newlines under each paragraph
description = '\n\n'.join(newparatextlist).strip(' \t\n\r') description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
dochash = scrape.mkhash(description) dochash = scrape.mkhash(description)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
   
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = time().strftime("%Y-%m-%d") edate = time().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID() doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash, , 'url': self.getURL(), 'docID': dochash,
"date": edate, "title": "Disclosure Log Updated", "description": description} "date": edate, "title": "Disclosure Log Updated", "description": description}
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
   
class GenericRSSDisclogScraper(GenericDisclogScraper): class GenericRSSDisclogScraper(GenericDisclogScraper):
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
feed = feedparser.parse(content) feed = feedparser.parse(content)
for entry in feed.entries: for entry in feed.entries:
#print entry #print entry
print entry.id print entry.id
dochash = scrape.mkhash(entry.id) dochash = scrape.mkhash(entry.id)
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
#print doc #print doc
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
edate = datetime.fromtimestamp( edate = datetime.fromtimestamp(
mktime(entry.published_parsed)).strftime("%Y-%m-%d") mktime(entry.published_parsed)).strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID(), doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
'url': entry.link, 'docID': entry.id, 'url': entry.link, 'docID': entry.id,
"date": edate, "title": entry.title} "date": edate, "title": entry.title}
self.getDescription(entry, entry, doc) self.getDescription(entry, entry, doc)
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
   
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
doc.update({'description': content.summary}) doc.update({'description': content.summary})
   
return return
   
   
class GenericOAICDisclogScraper(GenericDisclogScraper): class GenericOAICDisclogScraper(GenericDisclogScraper):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
   
@abc.abstractmethod @abc.abstractmethod
def getColumns(self, columns): def getColumns(self, columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
   
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
   
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
descriptiontxt = "" descriptiontxt = ""
for string in content.stripped_strings: for string in content.stripped_strings:
descriptiontxt = descriptiontxt + " \n" + string descriptiontxt = descriptiontxt + " \n" + string
doc.update({'description': descriptiontxt}) doc.update({'description': descriptiontxt})
   
def getTitle(self, content, entry, doc): def getTitle(self, content, entry, doc):
doc.update({'title': (''.join(content.stripped_strings))}) doc.update({'title': (''.join(content.stripped_strings))})
   
def getTable(self, soup): def getTable(self, soup):
return soup.table return soup.table
   
def getRows(self, table): def getRows(self, table):
return table.find_all('tr') return table.find_all('tr')
  def findColumns(self, row):
  return row.find_all('td')
   
  def getDocHash(self, id,date, url):
  if id.string is None:
  print "no id, using date as hash"
  return scrape.mkhash(
  self.remove_control_chars(
  url + (''.join(date.stripped_strings))))
  else:
  return scrape.mkhash(
  self.remove_control_chars(
  url + (''.join(id.stripped_strings))))
   
def getDate(self, content, entry, doc): def getDate(self, content, entry, doc):
strdate = ''.join(content.stripped_strings).strip() strdate = ''.join(content.stripped_strings).strip()
(a, b, c) = strdate.partition("(") (a, b, c) = strdate.partition("(")
strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012")) strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012").replace("Janrurary", "January").replace("1012","2012"))
print strdate print strdate
try: try:
edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
except ValueError: except ValueError:
print >> sys.stderr, "ERROR date invalid %s " % strdate print >> sys.stderr, "ERROR date invalid %s " % strdate
print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip() print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip()
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
print edate print edate
doc.update({'date': edate}) doc.update({'date': edate})
return return
   
def getLinks(self, content, entry, doc): def getLinks(self, content, entry, doc):
links = [] links = []
for atag in entry.find_all("a"): for atag in entry.find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(content, atag['href'])) links.append(scrape.fullurl(content, atag['href']))
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
return return
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID()) self.getURL(), "foidocuments", self.getAgencyID())
if content is not None: if content is not None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
print "parsing" print "parsing"
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
table = self.getTable(soup) table = self.getTable(soup)
for row in self.getRows(table): for row in self.getRows(table):
columns = row.find_all('td') columns = self.findColumns(row)
if len(columns) is self.getColumnCount(): if len(columns) is self.getColumnCount():
(id, date, title, (id, date, title,
description, notes) = self.getColumns(columns) description, notes) = self.getColumns(columns)
print self.remove_control_chars( print self.remove_control_chars(
''.join(id.stripped_strings)) ''.join(id.stripped_strings))
if id.string is None: dochash = self.getDocHash(id,date,url)
dochash = scrape.mkhash(  
self.remove_control_chars(  
url + (''.join(date.stripped_strings))))  
else:  
dochash = scrape.mkhash(  
self.remove_control_chars(  
url + (''.join(id.stripped_strings))))  
doc = foidocsdb.get(dochash) doc = foidocsdb.get(dochash)
   
if doc is None: if doc is None:
print "saving " + dochash print "saving " + dochash
doc = {'_id': dochash, doc = {'_id': dochash,
'agencyID': self.getAgencyID(), 'agencyID': self.getAgencyID(),
'url': self.getURL(), 'url': self.getURL(),
'docID': (''.join(id.stripped_strings))} 'docID': (''.join(id.stripped_strings))}
self.getLinks(self.getURL(), row, doc) self.getLinks(self.getURL(), row, doc)
self.getTitle(title, row, doc) self.getTitle(title, row, doc)
self.getDate(date, row, doc) self.getDate(date, row, doc)
self.getDescription(description, row, doc) self.getDescription(description, row, doc)
if notes is not None: if notes is not None:
doc.update({'notes': ( doc.update({'notes': (
''.join(notes.stripped_strings))}) ''.join(notes.stripped_strings))})
badtitles = ['-', 'Summary of FOI Request' badtitles = ['-', 'Summary of FOI Request'
, 'FOI request(in summary form)' , 'FOI request(in summary form)'
, 'Summary of FOI request received by the ASC', , 'Summary of FOI request received by the ASC',
'Summary of FOI request received by agency/minister', 'Summary of FOI request received by agency/minister',
'Description of Documents Requested', 'FOI request', 'Description of Documents Requested', 'FOI request',
'Description of FOI Request', 'Summary of request', 'Description', 'Summary', 'Description of FOI Request', 'Summary of request', 'Description', 'Summary',
'Summary of FOIrequest received by agency/minister', 'Summary of FOIrequest received by agency/minister',
'Summary of FOI request received', 'Description of FOI Request', 'Summary of FOI request received', 'Description of FOI Request',
"FOI request", 'Results 1 to 67 of 67'] "FOI request", 'Results 1 to 67 of 67']
if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '': if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '':
print "saving" print "saving"
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved " + dochash print "already saved " + dochash
   
elif len(row.find_all('th')) is self.getColumnCount(): elif len(row.find_all('th')) is self.getColumnCount():
print "header row" print "header row"
   
else: else:
print >> sys.stderr, "ERROR number of columns incorrect" print >> sys.stderr, "ERROR number of columns incorrect"
print row print row
   
<?php <?php
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); $endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
$enddocid = (isset($_REQUEST['end_docid']) ? $_REQUEST['end_docid'] : null); $enddocid = (isset($_REQUEST['end_docid']) ? $_REQUEST['end_docid'] : null);
?> ?>
<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in <div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in
one place! one place!
</div> </div>
<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a> <a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a>
<br> <br>
<?php <?php
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
   
$idtoname = Array(); $idtoname = Array();
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
  //print_r($foidocsdb);
try { try {
$rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20, null, $enddocid)->rows; $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20, null, $enddocid)->rows;
if ($rows) { if ($rows) {
foreach ($rows as $key => $row) { foreach ($rows as $key => $row) {
echo displayLogEntry($row, $idtoname); echo displayLogEntry($row, $idtoname);
if (!isset($startkey)) if (!isset($startkey))
$startkey = $row->key; $startkey = $row->key;
$endkey = $row->key; $endkey = $row->key;
$enddocid = $row->value->_id; $enddocid = $row->value->_id;
} }
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey&amp;end_docid=$enddocid' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>"; echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey&amp;end_docid=$enddocid' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
include_footer_documents(); include_footer_documents();
?> ?>
   
# www.robotstxt.org/ # www.robotstxt.org/
# http://code.google.com/web/controlcrawlindex/ # http://code.google.com/web/controlcrawlindex/
   
User-agent: * User-agent: *
Disallow: /admin/ Disallow: /admin/
  Disallow: /viewDocument.php
Sitemap: http://disclosurelo.gs/sitemap.xml.php Sitemap: http://disclosurelo.gs/sitemap.xml.php
<?php <?php
   
// Agency X updated Y, new files, diff of plain text/link text, // Agency X updated Y, new files, diff of plain text/link text,
// feed for just one agency or all // feed for just one agency or all
// This is a minimum example of using the Universal Feed Generator Class // This is a minimum example of using the Universal Feed Generator Class
include("../lib/FeedWriter/FeedTypes.php"); include("../lib/FeedWriter/FeedTypes.php");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
//Creating an instance of FeedWriter class. //Creating an instance of FeedWriter class.
$TestFeed = new RSS2FeedWriter(); $TestFeed = new RSS2FeedWriter();
//Setting the channel elements //Setting the channel elements
////Retriving informations from database ////Retriving informations from database
$idtoname = Array(); $idtoname = Array();
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name); $idtoname[$row->id] = trim($row->value->name);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments'); $foidocsdb = $server->get_db('disclosr-foidocuments');
if (isset($_REQUEST['id'])) { if (isset($_REQUEST['id'])) {
$rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
$title = $idtoname[$_REQUEST['id']]; $title = $idtoname[$_REQUEST['id']];
} else { } else {
$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows; $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
$title = 'All Agencies'; $title = 'All Agencies';
} }
//Use wrapper functions for common channelelements //Use wrapper functions for common channelelements
$TestFeed->setTitle('disclosurelo.gs Newest Entries - ' . $title); $TestFeed->setTitle('disclosurelo.gs Newest Entries - ' . $title);
$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php' . (isset($_REQUEST['id']) ? '?id=' . $_REQUEST['id'] : '')); $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php' . (isset($_REQUEST['id']) ? '?id=' . $_REQUEST['id'] : ''));
$TestFeed->setDescription('disclosurelo.gs Newest Entries - ' . $title); $TestFeed->setDescription('disclosurelo.gs Newest Entries - ' . $title);
$TestFeed->setChannelElement('language', 'en-us'); $TestFeed->setChannelElement('language', 'en-us');
$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
   
   
//print_r($rows); //print_r($rows);
  $i =0;
foreach ($rows as $row) { foreach ($rows as $row) {
//Create an empty FeedItem //Create an empty FeedItem
$newItem = $TestFeed->createNewItem(); $newItem = $TestFeed->createNewItem();
//Add elements to the feed item //Add elements to the feed item
$newItem->setTitle($row->value->title); $newItem->setTitle(preg_replace('/[\x00-\x1F\x80-\xFF]/', '', $row->value->title));
$newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id); $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id);
$newItem->setDate(strtotime($row->value->date)); $newItem->setDate(strtotime($row->value->date));
$newItem->setDescription(displayLogEntry($row, $idtoname)); $newItem->setDescription(displayLogEntry($row, $idtoname));
$newItem->setAuthor($idtoname[$row->value->agencyID]); $newItem->setAuthor($idtoname[$row->value->agencyID]);
$newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true')); $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true'));
//Now add the feed item //Now add the feed item
$TestFeed->addItem($newItem); $TestFeed->addItem($newItem);
  $i++;
  if ($i > 50) break;
} }
//OK. Everything is done. Now genarate the feed. //OK. Everything is done. Now genarate the feed.
$TestFeed->generateFeed(); $TestFeed->generateFeed();
?> ?>
   
  #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
  echo $DIR
cd $DIR cd $DIR
echo "" > /tmp/disclosr-error echo "" > /tmp/disclosr-error
for f in scrapers/*.py; do for f in $DIR/scrapers/*.py; do
echo "Processing $f file.."; echo "Processing $f file..";
md5=`md5sum /tmp/disclosr-error` md5=`md5sum /tmp/disclosr-error`
python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error; python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error;
md52=`md5sum /tmp/disclosr-error` md52=`md5sum /tmp/disclosr-error`
if [ "$md5" != "$md52" ]; then if [ "$md5" != "$md52" ]; then
echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error; echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error;
fi fi
if [ "$?" -ne "0" ]; then if [ "$?" -ne "0" ]; then
echo "error"; echo "error";
sleep 1; sleep 1;
fi fi
done done
  curl "localhost:5984/disclosr-foidocuments/_design/app/_view/byDate?startkey=\"9999-99-99\"&endkey=\"0000-00-00\"&descending=true&limit=20"
if [ -s /tmp/disclosr-error ] ; then if [ -s /tmp/disclosr-error ] ; then
echo "emailling logs.."; echo "emailling logs..";
mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ; mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
fi fi
   
   
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
import sys import sys
import mimetypes import mimetypes
import urllib import urllib
import urlparse import urlparse
import socket import socket
   
#couch = couchdb.Server('http://192.168.1.148:5984/') #couch = couchdb.Server('http://192.168.1.148:5984/')
#couch = couchdb.Server('http://192.168.1.113:5984/') #couch = couchdb.Server('http://192.168.1.113:5984/')
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
   
   
def mkhash(input): def mkhash(input):
return hashlib.md5(input).hexdigest().encode("utf-8") return hashlib.md5(input).hexdigest().encode("utf-8")
   
   
def canonurl(url): def canonurl(url):
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
if the URL looks invalid. if the URL looks invalid.
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
'http://xn--hgi.ws/' 'http://xn--hgi.ws/'
""" """
# strip spaces at the ends and ensure it's prefixed with 'scheme://' # strip spaces at the ends and ensure it's prefixed with 'scheme://'
url = url.strip() url = url.strip()
if not url: if not url:
return '' return ''
if not urlparse.urlsplit(url).scheme: if not urlparse.urlsplit(url).scheme:
url = 'http://' + url url = 'http://' + url
   
# turn it into Unicode # turn it into Unicode
#try: #try:
# url = unicode(url, 'utf-8') # url = unicode(url, 'utf-8')
#except UnicodeDecodeError: #except UnicodeDecodeError:
# return '' # bad UTF-8 chars in URL # return '' # bad UTF-8 chars in URL
   
# parse the URL into its components # parse the URL into its components
parsed = urlparse.urlsplit(url) parsed = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = parsed scheme, netloc, path, query, fragment = parsed
   
# ensure scheme is a letter followed by letters, digits, and '+-.' chars # ensure scheme is a letter followed by letters, digits, and '+-.' chars
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
return '' return ''
scheme = str(scheme) scheme = str(scheme)
   
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
if not match: if not match:
return '' return ''
domain, port = match.groups() domain, port = match.groups()
netloc = domain + (port if port else '') netloc = domain + (port if port else '')
netloc = netloc.encode('idna') netloc = netloc.encode('idna')
   
# ensure path is valid and convert Unicode chars to %-encoded # ensure path is valid and convert Unicode chars to %-encoded
if not path: if not path:
path = '/' # eg: 'http://google.com' -> 'http://google.com/' path = '/' # eg: 'http://google.com' -> 'http://google.com/'
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
   
# ensure query is valid # ensure query is valid
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
   
# ensure fragment is valid # ensure fragment is valid
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
   
# piece it all back together, truncating it to a maximum of 4KB # piece it all back together, truncating it to a maximum of 4KB
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
return url[:4096] return url[:4096]
   
   
def fullurl(url, href): def fullurl(url, href):
href = href.replace(" ", "%20") href = href.replace(" ", "%20")
href = re.sub('#.*$', '', href) href = re.sub('#.*$', '', href)
return urljoin(url, href) return urljoin(url, href)
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
   
def getLastAttachment(docsdb, url): def getLastAttachment(docsdb, url):
hash = mkhash(url) hash = mkhash(url)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc != None and "_attachments" in doc.keys(): if doc != None and "_attachments" in doc.keys():
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
return last_attachment return last_attachment
else: else:
return None return None
   
   
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
url = canonurl(url) url = canonurl(url)
hash = mkhash(url) hash = mkhash(url)
req = urllib2.Request(url) req = urllib2.Request(url)
print "Fetching %s (%s)" % (url, hash) print "Fetching %s (%s)" % (url, hash)
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
print >> sys.stderr, "Not a valid HTTP url" print >> sys.stderr, "Not a valid HTTP url"
return (None, None, None) return (None, None, None)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc == None: if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'} doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'}
else: else:
if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)): if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
print "Uh oh, trying to scrape URL again too soon!" + hash print "Uh oh, trying to scrape URL again too soon!" + hash
if (not doc.has_key('file_size') or doc["file_size"] != "0") and "_attachments" in doc.keys(): if (not doc.has_key('file_size') or doc["file_size"] != "0") and "_attachments" in doc.keys():
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
content = last_attachment.read() content = last_attachment.read()
mime_type = doc['mime_type'] mime_type = doc['mime_type']
else: else:
content = None content = None
mime_type = None mime_type = None
return (doc['url'], mime_type, content) return (doc['url'], mime_type, content)
   
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if doc.has_key('etag'): if doc.has_key('etag'):
req.add_header("If-None-Match", doc['etag']) req.add_header("If-None-Match", doc['etag'])
if doc.has_key('last_modified'): if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", doc['last_modified']) req.add_header("If-Modified-Since", doc['last_modified'])
   
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
try: try:
url_handle = opener.open(req, None, 20) url_handle = opener.open(req, None, 20)
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
doc['etag'] = headers.getheader("ETag") doc['etag'] = headers.getheader("ETag")
doc['last_modified'] = headers.getheader("Last-Modified") doc['last_modified'] = headers.getheader("Last-Modified")
doc['date'] = headers.getheader("Date") doc['date'] = headers.getheader("Date")
doc['page_scraped'] = time.time() doc['page_scraped'] = time.time()
doc['web_server'] = headers.getheader("Server") doc['web_server'] = headers.getheader("Server")
doc['via'] = headers.getheader("Via") doc['via'] = headers.getheader("Via")
doc['powered_by'] = headers.getheader("X-Powered-By") doc['powered_by'] = headers.getheader("X-Powered-By")
doc['file_size'] = headers.getheader("Content-Length") doc['file_size'] = headers.getheader("Content-Length")
content_type = headers.getheader("Content-Type") content_type = headers.getheader("Content-Type")
if content_type != None: if content_type != None:
doc['mime_type'] = content_type.split(";")[0] doc['mime_type'] = content_type.split(";")[0]
else: else:
(type, encoding) = mimetypes.guess_type(url) (type, encoding) = mimetypes.guess_type(url)
doc['mime_type'] = type doc['mime_type'] = type
if hasattr(url_handle, 'code'): if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified" + hash print "the web page has not been modified" + hash
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
content = last_attachment content = last_attachment
return (doc['url'], doc['mime_type'], content.read()) return (doc['url'], doc['mime_type'], content.read())
else: else:
print "new webpage loaded" print "new webpage loaded"
content = url_handle.read() content = url_handle.read()
docsdb.save(doc) docsdb.save(doc)
doc = docsdb.get(hash) # need to get a _rev doc = docsdb.get(hash) # need to get a _rev
docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type']) docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type'])
return (doc['url'], doc['mime_type'], content) return (doc['url'], doc['mime_type'], content)
#store as attachment epoch-filename #store as attachment epoch-filename
   
except (urllib2.URLError, socket.timeout) as e: except (urllib2.URLError, socket.timeout) as e:
print >> sys.stderr,"error!" print >> sys.stderr,"error!"
error = "" error = ""
if hasattr(e, 'reason'): if hasattr(e, 'reason'):
error = "error %s in downloading %s" % (str(e.reason), url) error = "error %s in downloading %s" % (str(e.reason), url)
elif hasattr(e, 'code'): elif hasattr(e, 'code'):
error = "error %s in downloading %s" % (e.code, url) error = "error %s in downloading %s" % (e.code, url)
print >> sys.stderr, error print >> sys.stderr, error
doc['error'] = error doc['error'] = error
docsdb.save(doc) docsdb.save(doc)
return (None, None, None) return (None, None, None)
   
   
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
(url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID) (url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID)
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
navIDs = soup.findAll( navIDs = soup.findAll(
id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
for nav in navIDs: for nav in navIDs:
print "Removing element", nav['id'] print "Removing element", nav['id']
nav.extract() nav.extract()
navClasses = soup.findAll( navClasses = soup.findAll(
attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
for nav in navClasses: for nav in navClasses:
print "Removing element", nav['class'] print "Removing element", nav['class']
nav.extract() nav.extract()
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
linkurls = set([]) linkurls = set([])
for link in links: for link in links:
if link.has_key("href"): if link.has_attr("href"):
if link['href'].startswith("http"): if link['href'].startswith("http"):
# lets not do external links for now # lets not do external links for now
# linkurls.add(link['href']) # linkurls.add(link['href'])
None None
if link['href'].startswith("mailto"): if link['href'].startswith("mailto"):
# not http # not http
None None
if link['href'].startswith("javascript"): if link['href'].startswith("javascript"):
# not http # not http
None None
else: else:
# remove anchors and spaces in urls # remove anchors and spaces in urls
linkurls.add(fullurl(url, link['href'])) linkurls.add(fullurl(url, link['href']))
for linkurl in linkurls: for linkurl in linkurls:
#print linkurl #print linkurl
scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID) scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID)
   
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for row in agencydb.view('app/all'): #not recently scraped agencies view? for row in agencydb.view('app/all'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
for key in agency.keys(): for key in agency.keys():
if key == "FOIDocumentsURL" and "status" not in agency.keys() and False: if key == "FOIDocumentsURL" and "status" not in agency.keys() and False:
scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) scrapeAndStore(docsdb, agency[key], 0, key, agency['_id'])
if key == 'website' and True: if key == 'website' and True:
scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) scrapeAndStore(docsdb, agency[key], 0, key, agency['_id'])
if "metadata" not in agency.keys(): if "metadata" not in agency.keys():
agency['metadata'] = {} agency['metadata'] = {}
agency['metadata']['lastScraped'] = time.time() agency['metadata']['lastScraped'] = time.time()
if key.endswith('URL') and False: if key.endswith('URL') and False:
print key print key
depth = 1 depth = 1
if 'scrapeDepth' in agency.keys(): if 'scrapeDepth' in agency.keys():
depth = agency['scrapeDepth'] depth = agency['scrapeDepth']
scrapeAndStore(docsdb, agency[key], depth, key, agency['_id']) scrapeAndStore(docsdb, agency[key], depth, key, agency['_id'])
agencydb.save(agency) agencydb.save(agency)
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup): def getTable(self,soup):
return soup.find(id = "maincontentcontainer").table return soup.find(class_ = "contentcontainer").table
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
def getColumns(self,columns): def getColumns(self,columns):
(date, disclogdate, title, description, notes) = columns (date, disclogdate, title, description, notes) = columns
return (date, date, title, description, notes) return (date, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import dateutil import dateutil
from dateutil.parser import * from dateutil.parser import *
from datetime import * from datetime import *
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
   
def __init__(self): def __init__(self):
super(ScraperImplementation, self).__init__() super(ScraperImplementation, self).__init__()
   
def getDescription(self,content, entry,doc): def getDescription(self,content, entry,doc):
link = None link = None
links = [] links = []
description = "" description = ""
for atag in entry.find_all('a'): for atag in entry.find_all('a'):
if atag.has_key('href'): if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(), atag['href']) link = scrape.fullurl(self.getURL(), atag['href'])
(url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None: if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
soup = BeautifulSoup(htcontent) soup = BeautifulSoup(htcontent)
row = soup.find(id="content_div_148050") row = soup.find(id="content_div_148050")
description = ''.join(row.stripped_strings) description = ''.join(row.stripped_strings)
for atag in row.find_all("a"): for atag in row.find_all("a"):
if atag.has_key('href'): if atag.has_attr('href'):
links.append(scrape.fullurl(link, atag['href'])) links.append(scrape.fullurl(link, atag['href']))
   
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
if description != "": if description != "":
doc.update({ 'description': description}) doc.update({ 'description': description})
def getColumnCount(self): def getColumnCount(self):
return 4 return 4
   
def getColumns(self, columns): def getColumns(self, columns):
(id, date, datepub, title) = columns (id, date, datepub, title) = columns
return (id, date, title, title, None) return (id, date, title, title, None)
   
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
   
nsi = ScraperImplementation() nsi = ScraperImplementation()
nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1"  
nsi.doScrape()  
nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2"  
nsi.doScrape()  
nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3"  
nsi.doScrape()  
nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4"  
nsi.doScrape()  
nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5"  
nsi.doScrape() nsi.doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
#def getTable(self,soup): def getTable(self,soup):
# return soup.find(id = "cphMain_C001_Col01").table return soup.findAll('table')[1]
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description,notes) = columns (id, date, title, description,notes) = columns
return (id, date, title, description, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getDescription(self,content, entry,doc): def getDescription(self,content, entry,doc):
link = None link = None
links = [] links = []
description = "" description = ""
for atag in entry.find_all('a'): for atag in entry.find_all('a'):
if atag.has_key('href'): if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(),atag['href']) link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None: if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent) soup = BeautifulSoup(htcontent)
rowtitle = soup.find(class_ = "wc-title").find("h1").string rowtitle = soup.find(class_ = "wc-title").find("h1").string
if rowtitle != None: if rowtitle != None:
description = rowtitle + ": " description = rowtitle + ": "
for row in soup.find(class_ ="wc-content").find_all('td'): for row in soup.find(class_ ="wc-content").find_all('td'):
if row != None: if row != None:
for text in row.stripped_strings: for text in row.stripped_strings:
description = description + text + "\n" description = description + text + "\n"
for atag in row.find_all("a"): for atag in row.find_all("a"):
if atag.has_key('href'): if atag.has_attr('href'):
links.append(scrape.fullurl(link,atag['href'])) links.append(scrape.fullurl(link,atag['href']))
   
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
if description != "": if description != "":
doc.update({ 'description': description}) doc.update({ 'description': description})
  def getRows(self, table):
  return table.find_all(class_ = "dl-row");
  def findColumns(self, table):
  return table.find_all('div');
def getColumnCount(self): def getColumnCount(self):
return 2 return 2
def getTable(self,soup): def getTable(self,soup):
return soup.find(class_ = "ms-rteTable-default") return soup.find(class_ = "foi-dl-list")
def getColumns(self,columns): def getColumns(self,columns):
(date, title) = columns (title,date) = columns
return (title, date, title, title, None) return (title, date, title, title, None)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from datetime import date from datetime import date
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup): def getTable(self,soup):
return soup.find(id= "ctl00_MSO_ContentDiv").table return soup.find(class_ = "rgMasterTable")
   
def getColumns(self,columns): def getColumns(self,columns):
(id, title, description, notes) = columns (id, title, description, notes) = columns
return (id, title, title, description, notes) return (id, title, title, description, notes)
def getDate(self, content, entry, doc): def getDate(self, content, entry, doc):
edate = date.today().strftime("%Y-%m-%d") edate = date.today().strftime("%Y-%m-%d")
doc.update({'date': edate}) doc.update({'date': edate})
return return
def getColumnCount(self): def getColumnCount(self):
return 4 return 4
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup): def getTable(self,soup):
return soup.find(class_ = "ms-rtestate-field").table return soup.find(class_ = "ms-rtestate-field").table
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, title, description, notes) return (id, date, title, description, notes)
   
def getLinks(self, content, entry, doc): def getLinks(self, content, entry, doc):
link = None link = None
links = [] links = []
for atag in entry.find_all('a'): for atag in entry.find_all('a'):
if atag.has_key('href'): if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(),atag['href']) link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None: if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent) soup = BeautifulSoup(htcontent)
for atag in soup.find(class_ = "article-content").find_all('a'): for atag in soup.find(class_ = "article-content").find_all('a'):
if atag.has_key('href'): if atag.has_attr('href'):
links.append(scrape.fullurl(link,atag['href'])) links.append(scrape.fullurl(link,atag['href']))
   
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
doc.update({'url': link}) doc.update({'url': link})
return return
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getDocHash(self, id,date, url):
  ''' url changes on ever request so ignore for hash '''
  return scrape.mkhash(
  self.remove_control_chars(
  ''.join(id.stripped_strings)))
def getColumnCount(self): def getColumnCount(self):
return 4 return 4
def getColumns(self,columns): def getColumns(self,columns):
(date, id, title, description) = columns (date, id, title, description) = columns
return (id, date, title, description, None) return (id, date, title, description, None)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
#def getTable(self,soup): def getTable(self,soup):
# return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table return soup.find(id = "main").table
def getColumnCount(self): def getColumnCount(self):
return 4 return 4
def getColumns(self,columns): def getColumns(self,columns):
(date, title, description,notes) = columns (date, title, description,notes) = columns
return (title, date, title, description, notes) return (title, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):  
return soup.find(id = "centercontent").table  
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description,notes) = columns (id, date, title, description,notes) = columns
return (id, date, title, description, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
#RSS feed not detailed #RSS feed not detailed
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "page_content").table
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description, notes) = columns (id, date, title, description, notes) = columns
return (id, date, title, description, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import codecs import codecs
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getDescription(self,content, entry,doc): def getDescription(self,content, entry,doc):
link = None link = None
links = [] links = []
description = "" description = ""
for atag in entry.find_all('a'): for atag in entry.find_all('a'):
if atag.has_key('href'): if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(),atag['href']) link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None: if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent) soup = BeautifulSoup(htcontent)
for text in soup.find(class_ = "mainContent").stripped_strings: for text in soup.find(class_ = "mainContent").stripped_strings:
description = description + text.encode('ascii', 'ignore') description = description + text.encode('ascii', 'ignore')
   
for atag in soup.find(id="SortingTable").find_all("a"): for atag in soup.find(id="SortingTable").find_all("a"):
if atag.has_key('href'): if atag.has_attr('href'):
links.append(scrape.fullurl(link,atag['href'])) links.append(scrape.fullurl(link,atag['href']))
   
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
if description != "": if description != "":
doc.update({ 'description': description}) doc.update({ 'description': description})
   
def getColumnCount(self): def getColumnCount(self):
return 2 return 2
def getTable(self,soup): def getTable(self,soup):
return soup.find(id = "TwoColumnSorting") return soup.find(id = "TwoColumnSorting")
def getColumns(self,columns): def getColumns(self,columns):
( title, date) = columns ( title, date) = columns
return (title, date, title, title, None) return (title, date, title, title, None)
class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getDescription(self,content, entry,doc): def getDescription(self,content, entry,doc):
link = None link = None
links = [] links = []
description = "" description = ""
for atag in entry.find_all('a'): for atag in entry.find_all('a'):
if atag.has_key('href'): if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(),atag['href']) link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None: if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent) soup = BeautifulSoup(htcontent)
for text in soup.find(id="content-item").stripped_strings: for text in soup.find(id="content-item").stripped_strings:
description = description + text + " \n" description = description + text + " \n"
for atag in soup.find(id="content-item").find_all("a"): for atag in soup.find(id="content-item").find_all("a"):
if atag.has_key('href'): if atag.has_attr('href'):
links.append(scrape.fullurl(link,atag['href'])) links.append(scrape.fullurl(link,atag['href']))
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
if description != "": if description != "":
doc.update({ 'description': description}) doc.update({ 'description': description})
   
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
if description != "": if description != "":
doc.update({ 'description': description}) doc.update({ 'description': description})
   
def getColumnCount(self): def getColumnCount(self):
return 2 return 2
def getTable(self,soup): def getTable(self,soup):
return soup.find(class_ = "doc-list") return soup.find(class_ = "doc-list")
def getColumns(self,columns): def getColumns(self,columns):
(date, title) = columns (date, title) = columns
return (title, date, title, title, None) return (title, date, title, title, None)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
NewScraperImplementation().doScrape() NewScraperImplementation().doScrape()
print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
osi = OldScraperImplementation() osi = OldScraperImplementation()
osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI"
osi.doScrape() osi.doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup): # def getTable(self,soup):
return soup.find(id = "_ctl0__ctl0_MainContentPlaceHolder_MainContentPlaceHolder_ContentSpan").findAll("table")[3] # return soup.find(_class = "content").table
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description,notes) = columns (id, date, title, description,notes) = columns
return (id, date, title, description, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
#RSS feed not detailed #RSS feed not detailed
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
def getDescription(self,content, entry,doc): def getDescription(self,content, entry,doc):
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False)
if htcontent != None: if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
links = [] links = []
description = "" description = ""
dldivs = soup.find('div',class_="download") dldivs = soup.find('div',class_="download")
if dldivs != None: if dldivs != None:
for atag in dldivs.find_all("a"): for atag in dldivs.find_all("a"):
if atag.has_key('href'): if atag.has_attr('href'):
links.append(scrape.fullurl(url,atag['href'])) links.append(scrape.fullurl(url,atag['href']))
nodldivs = soup.find('div',class_="incompleteNotification") nodldivs = soup.find('div',class_="incompleteNotification")
if nodldivs != None and nodldivs.stripped_strings != None: if nodldivs != None and nodldivs.stripped_strings != None:
for text in nodldivs.stripped_strings: for text in nodldivs.stripped_strings:
description = description + text description = description + text
for row in soup.table.find_all('tr'): for row in soup.table.find_all('tr'):
if row != None: if row != None:
description = description + "\n" + row.find('th').string + ": " description = description + "\n" + row.find('th').string + ": "
for text in row.find('div').stripped_strings: for text in row.find('div').stripped_strings:
description = description + text description = description + text
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
if description != "": if description != "":
doc.update({ 'description': description}) doc.update({ 'description': description})
   
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup): def getTable(self,soup):
return soup.find(summary="This table shows every FOI request to date.") return soup
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description,notes) = columns (id, date, title, description,notes) = columns
return (id, date, title, description, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):  
return soup.find(id="main").table  
def getColumnCount(self): def getColumnCount(self):
return 7 return 7
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description,link,deldate,notes) = columns (id, date, title, description,link,deldate,notes) = columns
return (id, date, title, description, notes) return (id, date, title, description, notes)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
#RSS feed not detailed import dateutil
  from dateutil.parser import *
  from datetime import *
  import scrape
  from bs4 import BeautifulSoup
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
   
#http://www.doughellmann.com/PyMOTW/abc/ def __init__(self):
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): super(ScraperImplementation, self).__init__()
def getColumns(self,columns): def getTable(self, soup):
(id, date, title, description, notes) = columns return soup.find(id='zone-content')
return (id, date, title, description, notes)  
  def getDescription(self,content, entry,doc):
  link = None
  links = []
  description = ""
  for atag in entry.find_all('a'):
  if atag.has_attr('href'):
  link = scrape.fullurl(self.getURL(), atag['href'])
  (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
  if htcontent != None:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
  soup = BeautifulSoup(htcontent)
  row = soup.find(id="foidetails")
  if row == None:
  row = soup.find(id="content").table
  if row == None:
  row = soup.find(id="content")
  description = ''.join(row.stripped_strings)
  for atag in row.find_all("a"):
  if atag.has_attr('href'):
  links.append(scrape.fullurl(link, atag['href']))
   
  if links != []:
  doc.update({'links': links})
  if description != "":
  doc.update({ 'description': description})
   
  def getColumnCount(self):
  return 3
   
  def getColumns(self, columns):
  (id, title, date) = columns
  return (id, date, title, title, None)
   
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers import genericScrapers
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
   
#http://www.doughellmann.com/PyMOTW/abc/ #http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup): def getTable(self,soup):
return soup.find(id = "ctl00_PlaceHolderMain_PublishingPageContent__ControlWrapper_RichHtmlField").table return soup.find(id = "block-system-main").table
def getColumnCount(self): def getColumnCount(self):
return 7 return 2
def getColumns(self,columns): def getColumns(self,columns):
(id, date, title, description,link,deldate, notes) = columns (date, title) = columns
return (id, date, title, description, notes) return (date, date, title, title, None)
   
if __name__ == '__main__': if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape() ScraperImplementation().doScrape()
   
<?php <?php
   
include ('../include/common.inc.php'); include ('../include/common.inc.php');
$last_updated = date('Y-m-d', @filemtime('cbrfeed.zip'));  
header("Content-Type: text/xml"); header("Content-Type: text/xml");
echo "<?xml version='1.0' encoding='UTF-8'?>"; echo "<?xml version='1.0' encoding='UTF-8'?>";
echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' . "\n"; echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' . "\n";
echo " <url><loc>" . local_url() . "index.php</loc><priority>1.0</priority></url>\n"; echo " <url><loc>" . local_url() . "index.php</loc><priority>1.0</priority></url>\n";
foreach (scandir("./") as $file) { foreach (scandir("./") as $file) {
if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php") if (strpos($file, ".php") !== false && ($file != "index.php" && $file != "sitemap.xml.php"&& $file != "viewDocument.php")) {
echo " <url><loc>" . local_url() . "$file</loc><priority>0.6</priority></url>\n"; echo " <url><loc>" . local_url() . "$file</loc><priority>0.6</priority></url>\n";
  }
} }
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
  $foidocsdb = $server->get_db('disclosr-foidocuments');
try { try {
$rows = $agenciesdb->get_view("app", "byCanonicalName")->rows; $rows = $agenciesdb->get_view("app", "byCanonicalName")->rows;
foreach ($rows as $row) { foreach ($rows as $row) {
echo '<url><loc>' . local_url() . 'agency.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n"; echo '<url><loc>' . local_url() . 'agency.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n";
} }
  unset($rows);
  $rows = null;
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
$foidocsdb = $server->get_db('disclosr-foidocuments');  
  foreach (range(0, 8) as $number) {
try { try {
$rows = $foidocsdb->get_view("app", "all")->rows; $rows = $foidocsdb->get_view("app", "all", Array($number,$number+1))->rows;
foreach ($rows as $row) { foreach ($rows as $row) {
echo '<url><loc>' . local_url() . 'view.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n"; echo '<url><loc>' . local_url() . 'view.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n";
} }
  unset($rows);
  $rows = null;
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
  }
   
  try {
  $rows = $foidocsdb->get_view("app", "all", Array('9','fffffffff'))->rows;
  foreach ($rows as $row) {
  echo '<url><loc>' . local_url() . 'view.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n";
  }
  unset($rows);
  $rows = null;
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo '</urlset>'; echo '</urlset>';
?> ?>
   
<?php <?php
   
function include_header_documents($title) function include_header_documents($title)
{ {
header('X-UA-Compatible: IE=edge,chrome=1'); header('X-UA-Compatible: IE=edge,chrome=1');
?> ?>
<!doctype html> <!doctype html>
<!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> <!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ -->
<!--[if lt IE 7]> <!--[if lt IE 7]>
<html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]-->
<!--[if IE 7]> <!--[if IE 7]>
<html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]-->
<!--[if IE 8]> <!--[if IE 8]>
<html class="no-js lt-ie9" lang="en"> <![endif]--> <html class="no-js lt-ie9" lang="en"> <![endif]-->
<!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> <!-- Consider adding a manifest.appcache: h5bp.com/d/Offline -->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> <!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]-->
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
   
<title>Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?></title> <title>Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?></title>
<meta name="description" content=""> <meta name="description" content="">
   
<!-- Mobile viewport optimized: h5bp.com/viewport --> <!-- Mobile viewport optimized: h5bp.com/viewport -->
<meta name="viewport" content="width=device-width"> <meta name="viewport" content="width=device-width">
<link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php"/> <link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php"/>
<!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> <!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons -->
<meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8"/> <meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8"/>
   
<!-- Le styles --> <!-- Le styles -->
<link href="css/bootstrap.min.css" rel="stylesheet"> <link href="css/bootstrap.min.css" rel="stylesheet">
<style type="text/css"> <style type="text/css">
body { body {
padding-top: 60px; padding-top: 60px;
padding-bottom: 40px; padding-bottom: 40px;
} }
   
.sidebar-nav { .sidebar-nav {
padding: 9px 0; padding: 9px 0;
} }
</style> </style>
<link href="css/bootstrap-responsive.min.css" rel="stylesheet"> <link href="css/bootstrap-responsive.min.css" rel="stylesheet">
   
<!-- HTML5 shim, for IE6-8 support of HTML5 elements --> <!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
<!--[if lt IE 9]> <!--[if lt IE 9]>
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> <script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]--> <![endif]-->
<!-- More ideas for your <head> here: h5bp.com/d/head-Tips --> <!-- More ideas for your <head> here: h5bp.com/d/head-Tips -->
   
<!-- All JavaScript at the bottom, except this Modernizr build. <!-- All JavaScript at the bottom, except this Modernizr build.
Modernizr enables HTML5 elements & feature detects for optimal performance. Modernizr enables HTML5 elements & feature detects for optimal performance.
Create your own custom Modernizr build: www.modernizr.com/download/ Create your own custom Modernizr build: www.modernizr.com/download/
<script src="js/libs/modernizr-2.5.3.min.js"></script>--> <script src="js/libs/modernizr-2.5.3.min.js"></script>-->
<script src="js/jquery.js"></script> <script src="js/jquery.js"></script>
<script type="text/javascript" src="js/flotr2.min.js"></script> <script type="text/javascript" src="js/flotr2.min.js"></script>
   
</head> </head>
<body> <body>
<div class="navbar navbar-inverse navbar-fixed-top"> <div class="navbar navbar-inverse navbar-fixed-top">
<div class="navbar-inner"> <div class="navbar-inner">
<div class="container-fluid"> <div class="container-fluid">
<!--<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> <!--<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse">
<span class="icon-bar"></span> <span class="icon-bar"></span>
<span class="icon-bar"></span> <span class="icon-bar"></span>
<span class="icon-bar"></span> <span class="icon-bar"></span>
</a> --> </a> -->
<a class="brand" href="#">Australian Disclosure Logs</a> <a class="brand" href="#">Australian Disclosure Logs</a>
   
<div class="nav-collapse collapse"> <div class="nav-collapse collapse">
<p class="navbar-text pull-right"> <p class="navbar-text pull-right">
<small> <small>
Subsites on: Subsites on:
</small> </small>
<a href="http://orgs.disclosurelo.gs">Government Agencies</a> <a href="http://orgs.disclosurelo.gs">Government Agencies</a>
• <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a> • <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a>
• <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a> • <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a>
   
</p> </p>
<ul class="nav"> <ul class="nav">
<li><a href="agency.php">By Agency</a></li> <li><a href="agency.php">By Agency</a></li>
<li><a href="date.php">By Date</a></li> <!-- <li><a href="date.php">By Date</a></li> -->
<li><a href="disclogsList.php">List of Disclosure Logs</a></li> <li><a href="disclogsList.php">List of Disclosure Logs</a></li>
<li><a href="charts.php">Charts</a></li> <li><a href="charts.php">Charts</a></li>
<li><a href="about.php">About</a></li> <li><a href="about.php">About</a></li>
   
</ul> </ul>
</div> </div>
<!--/.nav-collapse --> <!--/.nav-collapse -->
</div> </div>
</div> </div>
</div> </div>
<div class="container"> <div class="container">
<?php <?php
} }
   
function include_footer_documents() function include_footer_documents()
{ {
global $ENV; global $ENV;
?> ?>
</div> <!-- /container --> </div> <!-- /container -->
<hr> <hr>
   
<footer> <footer>
<p>Not affiliated with or endorsed by any government agency.</p> <p>Not affiliated with or endorsed by any government agency.</p>
</footer> </footer>
<?php <?php
if ($ENV != "DEV") { if ($ENV != "DEV") {
echo "<script type='text/javascript'> echo "<script type='text/javascript'>
   
var _gaq = _gaq || []; var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-12341040-4']); _gaq.push(['_setAccount', 'UA-12341040-4']);
_gaq.push(['_setDomainName', 'disclosurelo.gs']); _gaq.push(['_setDomainName', 'disclosurelo.gs']);
_gaq.push(['_setAllowLinker', true]); _gaq.push(['_setAllowLinker', true]);
_gaq.push(['_trackPageview']); _gaq.push(['_trackPageview']);
   
(function() { (function() {
var ga = document.createElement('script'); var ga = document.createElement('script');
ga.type = 'text/javascript'; ga.type = 'text/javascript';
ga.async = true; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(ga, s); s.parentNode.insertBefore(ga, s);
})(); })();
   
</script>"; </script>";
} }
?> ?>
<!-- Le javascript <!-- Le javascript
================================================== --> ================================================== -->
<!-- Placed at the end of the document so the pages load faster --> <!-- Placed at the end of the document so the pages load faster -->
<!-- <!--
<script src="js/bootstrap-transition.js"></script> <script src="js/bootstrap-transition.js"></script>
<script src="js/bootstrap-alert.js"></script> <script src="js/bootstrap-alert.js"></script>
<script src="js/bootstrap-modal.js"></script> <script src="js/bootstrap-modal.js"></script>
<script src="js/bootstrap-dropdown.js"></script> <script src="js/bootstrap-dropdown.js"></script>
<script src="js/bootstrap-scrollspy.js"></script> <script src="js/bootstrap-scrollspy.js"></script>
<script src="js/bootstrap-tab.js"></script> <script src="js/bootstrap-tab.js"></script>
<script src="js/bootstrap-tooltip.js"></script> <script src="js/bootstrap-tooltip.js"></script>
<script src="js/bootstrap-popover.js"></script> <script src="js/bootstrap-popover.js"></script>
<script src="js/bootstrap-button.js"></script> <script src="js/bootstrap-button.js"></script>
<script src="js/bootstrap-collapse.js"></script> <script src="js/bootstrap-collapse.js"></script>
<script src="js/bootstrap-carousel.js"></script> <script src="js/bootstrap-carousel.js"></script>
<script src="js/bootstrap-typeahead.js"></script>--> <script src="js/bootstrap-typeahead.js"></script>-->
   
   
</body> </body>
</html> </html>
<?php <?php
} }
   
function truncate($string, $length, $stopanywhere = false) function truncate($string, $length, $stopanywhere = false)
{ {
//truncates a string to a certain char length, stopping on a word if not specified otherwise. //truncates a string to a certain char length, stopping on a word if not specified otherwise.
if (strlen($string) > $length) { if (strlen($string) > $length) {
//limit hit! //limit hit!
$string = substr($string, 0, ($length - 3)); $string = substr($string, 0, ($length - 3));
if ($stopanywhere) { if ($stopanywhere) {
//stop anywhere //stop anywhere
$string .= '...'; $string .= '...';
} else { } else {
//stop on a word. //stop on a word.
$string = substr($string, 0, strrpos($string, ' ')) . '...'; $string = substr($string, 0, strrpos($string, ' ')) . '...';
} }
} }
return $string; return $string;
} }
   
function displayLogEntry($row, $idtoname) function displayLogEntry($row, $idtoname)
{ {
$result = ""; $result = "";
$result .= '<div itemscope itemtype="http://schema.org/Article">'; $result .= '<div itemscope itemtype="http://schema.org/Article">';
$result .= '<h2><a href="http://disclosurelo.gs/view.php?id=' . $row->value->_id . '"> <span itemprop="datePublished">' . $row->value->date . "</span>: <span itemprop='name headline'>" . truncate($row->value->title, 120) . "</span>"; $result .= '<h2><a href="http://disclosurelo.gs/view.php?id=' . $row->value->_id . '"> <span itemprop="datePublished">' . $row->value->date . "</span>: <span itemprop='name headline'>" . truncate($row->value->title, 120) . "</span>";
$result .= ' (<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</a></h2>'; $result .= ' (<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</a></h2>';
$result .= "<p itemprop='description articleBody text'> Title: " . $row->value->title . "<br/>"; $result .= "<p itemprop='description articleBody text'> Title: " . $row->value->title . "<br/>";
if (isset($row->value->description)) { if (isset($row->value->description)) {
$result .= str_replace("\n", "<br>", preg_replace("/(^[\r\n]*|[\r\n]+)[\s\t]*[\r\n]+/", "", trim($row->value->description))); $result .= str_replace("\n", "<br>", preg_replace("/(^[\r\n]*|[\r\n]+)[\s\t]*[\r\n]+/", "", trim($row->value->description)));
} }
if (isset($row->value->notes)) { if (isset($row->value->notes)) {
$result .= " <br>Note: " . $row->value->notes; $result .= " <br>Note: " . $row->value->notes;
} }
$result .= "</p>"; $result .= "</p>";
   
if (isset($row->value->links)) { if (isset($row->value->links)) {
$result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">'; $result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">';
foreach ($row->value->links as $link) { foreach ($row->value->links as $link) {
$result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href="' . htmlspecialchars($link) . '" itemprop="url contentURL">' . htmlspecialchars($link) . "</a></li>"; $result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href="' . htmlspecialchars($link) . '" itemprop="url contentURL">' . htmlspecialchars($link) . "</a></li>";
} }
   
$result .= "</ul>"; $result .= "</ul>";
} }
$result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>"; $result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>";
$result .= "</div>\n"; $result .= "</div>\n";
return $result; return $result;
} }
   
  <?php
  // use https://github.com/okfn/publicbodies/blob/master/data/nz.csv format
  include_once("include/common.inc.php");
 
  setlocale(LC_CTYPE, 'C');
 
  $headers = Array("title","abbr","key","category","parent","parent_key","description","url","jurisdiction","jurisdiction_code","source","source_url","address","contact","email","tags","created_at","updated_at");
 
  $db = $server->get_db('disclosr-agencies');
 
 
  $foiEmail = Array();
  try {
  $rows = $db->get_view("app", "foiEmails", null, true)->rows;
  //print_r($rows);
  foreach ($rows as $row) {
  $foiEmail[$row->key] = $row->value;
  }
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  die();
  }
 
  $fp = fopen('php://output', 'w');
  if ($fp && $db) {
  header('Content-Type: text/csv; charset=utf-8');
  header('Content-Disposition: attachment; filename="export.' . date("c") . '.csv"');
  header('Pragma: no-cache');
  header('Expires: 0');
  fputcsv($fp, $headers);
  try {
  $agencies = $db->get_view("app", "byCanonicalName", null, true)->rows;
  //print_r($rows);
  foreach ($agencies as $agency) {
  // print_r($agency);
 
  if (isset($agency->value->foiEmail) && $agency->value->foiEmail != "null" && !isset($agency->value->status)) {
  $row = Array();
  $row["title"] = trim($agency->value->name);
  $row["abbr"] = (isset($agency->value->shortName) ? $agency->value->shortName : "");
  $row["key"] = (isset($agency->value->shortName) ? "au/".strtolower($agency->value->shortName) : "");
  $row["category"] ="";
  $row["parent"] ="";
  $row["parentkey"] ="";
  $row["description"] = (isset($agency->value->description) ? $agency->value->description : "");
  $row["url"] = (isset($agency->value->website) ? $agency->value->website : "");
  $row["jurisdiction"] = "Australia";
  $row["jurisdiction_code"] = "au";
 
  $row["source"] ="";
  $row["source_url"] ="";
  $row["address"] ="";
  $row["contact"] ="";
 
  $row["email"] = (isset($agency->value->foiEmail) ? $agency->value->foiEmail : "");
  $row["tags"] ="";
  $row["created_at"] ="";
  $row["updated_at"] ="";
 
 
  $otherBodies = Array();
  if (isset($agency->value->foiBodies)) {
  $otherBodies = array_merge($otherBodies, $agency->value->foiBodies);
  }
  if (isset($agency->value->positions)) {
  $positions = Array();
  foreach ($agency->value->positions as $position) {
  $positions[] = "Office of the ".$position;
  }
  $otherBodies = array_merge($otherBodies, $positions);
  }
  sort($otherBodies);
  if (count($otherBodies) > 0) {
  $row["description"] .= "<br/> This department also responds to requests for information held by " . implode(", ", $otherBodies);
  }
 
 
  fputcsv($fp, array_values($row));
  }
  }
  } catch (SetteeRestClientException $e) {
  setteErrorHandler($e);
  }
 
  die;
  }
  ?>
 
<?php <?php
   
include_once('include/common.inc.php'); include_once('include/common.inc.php');
   
function displayValue($key, $value, $mode) function displayValue($key, $value, $mode)
{ {
global $db, $schemas; global $db, $schemas;
$ignoreKeys = Array("metadata", "metaTags", "statistics", "rtkURLs", "rtkDescriptions"); $ignoreKeys = Array("metadata", "metaTags", "statistics", "rtkURLs", "rtkDescriptions");
if ($mode == "view") { if ($mode == "view") {
if (strpos($key, "_") === 0 || in_array($key, $ignoreKeys)) if (strpos($key, "_") === 0 || in_array($key, $ignoreKeys))
return; return;
echo "<tr>"; echo "<tr>";
   
echo "<td class='$key'>"; echo "<td class='$key'>";
if (isset($schemas['agency']["properties"][$key])) { if (isset($schemas['agency']["properties"][$key])) {
echo $schemas['agency']["properties"][$key]['x-title'] . "<br><small>" . $schemas['agency']["properties"][$key]['description'] . "</small>"; echo $schemas['agency']["properties"][$key]['x-title'] . "<br><small>" . $schemas['agency']["properties"][$key]['description'] . "</small>";
} }
echo "</td><td>"; echo "</td><td>";
if (is_array($value)) { if (is_array($value)) {
echo "<ol>"; echo "<ol>";
foreach ($value as $subkey => $subvalue) { foreach ($value as $subkey => $subvalue) {
   
echo "<li "; echo "<li ";
if (isset($schemas['agency']["properties"][$key]['x-property'])) { if (isset($schemas['agency']["properties"][$key]['x-property'])) {
echo ' property="' . $schemas['agency']["properties"][$key]['x-property'] . '" '; echo ' property="' . $schemas['agency']["properties"][$key]['x-property'] . '" ';
} }
if (isset($schemas['agency']["properties"][$key]['x-itemprop'])) { if (isset($schemas['agency']["properties"][$key]['x-itemprop'])) {
echo ' itemprop="' . $schemas['agency']["properties"][$key]['x-itemprop'] . '" '; echo ' itemprop="' . $schemas['agency']["properties"][$key]['x-itemprop'] . '" ';
} }
echo " >"; echo " >";
   
echo "$subvalue</li>"; echo "$subvalue</li>";
} }
echo "</ol></td></tr>"; echo "</ol></td></tr>";
} else { } else {
if (isset($schemas['agency']["properties"][$key]['x-property'])) { if (isset($schemas['agency']["properties"][$key]['x-property'])) {
echo '<span property="' . $schemas['agency']["properties"][$key]['x-property'] . '">'; echo '<span property="' . $schemas['agency']["properties"][$key]['x-property'] . '">';
} else { } else {
echo "<span>"; echo "<span>";
} }
   
if ((strpos($key, "URL") > 0 || $key == 'website') && $value != "") { if ((strpos($key, "URL") > 0 || $key == 'website') && $value != "") {
echo "<a " . ($key == 'website' ? 'itemprop="url"' : '') . " href='$value'>$value</a>"; echo "<a " . ($key == 'website' ? 'itemprop="url"' : '') . " href='$value'>$value</a>";
} else if ($key == 'abn') { } else if ($key == 'abn') {
echo "<a href='http://www.abr.business.gov.au/SearchByAbn.aspx?SearchText=$value'>$value</a>"; echo "<a href='http://www.abr.business.gov.au/SearchByAbn.aspx?SearchText=$value'>$value</a>";
} else { } else {
echo "$value"; echo "$value";
} }
echo "</span>"; echo "</span>";
} }
echo "</td></tr>"; echo "</td></tr>";
} }
if ($mode == "edit") { if ($mode == "edit") {
if (is_array($value)) { if (is_array($value)) {
echo '<div class="row"> echo '<div class="row">
<div class="seven columns"> <div class="seven columns">
<fieldset> <fieldset>
<h5>' . $key . '</h5>'; <h5>' . $key . '</h5>';
foreach ($value as $subkey => $subvalue) { foreach ($value as $subkey => $subvalue) {
echo "<label>$subkey</label><input class='input-text' type='text' id='$key$subkey' name='$key" . '[' . $subkey . "]' value='$subvalue'/></tr>"; echo "<label>$subkey</label><input class='input-text' type='text' id='$key$subkey' name='$key" . '[' . $subkey . "]' value='$subvalue'/></tr>";
} }
echo "</fieldset> echo "</fieldset>
</div> </div>
</div>"; </div>";
} else { } else {
if (strpos($key, "_") === 0) { if (strpos($key, "_") === 0) {
echo "<input type='hidden' id='$key' name='$key' value='$value'/>"; echo "<input type='hidden' id='$key' name='$key' value='$value'/>";
} else if ($key == "parentOrg") { } else if ($key == "parentOrg") {
echo "<label for='$key'>$key</label><select id='$key' name='$key'><option value=''> Select... </option>"; echo "<label for='$key'>$key</label><select id='$key' name='$key'><option value=''> Select... </option>";
$rows = $db->get_view("app", "byDeptStateName")->rows; $rows = $db->get_view("app", "byDeptStateName")->rows;
//print_r($rows); //print_r($rows);
foreach ($rows as $row) { foreach ($rows as $row) {
echo "<option value='{$row->value}'" . (($row->value == $value) ? "SELECTED" : "") . " >" . str_replace("Department of ", "", $row->key) . "</option>"; echo "<option value='{$row->value}'" . (($row->value == $value) ? "SELECTED" : "") . " >" . str_replace("Department of ", "", $row->key) . "</option>";
} }
echo " </select>"; echo " </select>";
} else { } else {
echo "<label>$key</label><input class='input-text' type='text' id='$key' name='$key' value='$value'/>"; echo "<label>$key</label><input class='input-text' type='text' id='$key' name='$key' value='$value'/>";
if ((strpos($key, "URL") > 0 || $key == 'website') && $value != "") { if ((strpos($key, "URL") > 0 || $key == 'website') && $value != "") {
echo "<a " . ($key == 'website' ? 'itemprop="url"' : '') . " href='$value'>view</a>"; echo "<a " . ($key == 'website' ? 'itemprop="url"' : '') . " href='$value'>view</a>";
} }
if ($key == 'abn') { if ($key == 'abn') {
echo "<a href='http://www.abr.business.gov.au/SearchByAbn.aspx?SearchText=$value'>view abn</a>"; echo "<a href='http://www.abr.business.gov.au/SearchByAbn.aspx?SearchText=$value'>view abn</a>";
} }
} }
} }
} }
// //
} }
   
function addDefaultFields($row) function addDefaultFields($row)
{ {
global $schemas; global $schemas;
$defaultFields = array_keys($schemas['agency']['properties']); $defaultFields = array_keys($schemas['agency']['properties']);
foreach ($defaultFields as $defaultField) { foreach ($defaultFields as $defaultField) {
if (!isset($row[$defaultField])) { if (!isset($row[$defaultField])) {
if ($schemas['agency']['properties'][$defaultField]['type'] == "string") { if ($schemas['agency']['properties'][$defaultField]['type'] == "string") {
$row[$defaultField] = ""; $row[$defaultField] = "";
} }
if ($schemas['agency']['properties'][$defaultField]['type'] == "array") { if ($schemas['agency']['properties'][$defaultField]['type'] == "array") {
$row[$defaultField] = Array(""); $row[$defaultField] = Array("");
} }
} else if ($schemas['agency']['properties'][$defaultField]['type'] == "array") { } else if ($schemas['agency']['properties'][$defaultField]['type'] == "array") {
if (is_array($row[$defaultField])) { if (is_array($row[$defaultField])) {
$row[$defaultField][] = ""; $row[$defaultField][] = "";
$row[$defaultField][] = ""; $row[$defaultField][] = "";
$row[$defaultField][] = ""; $row[$defaultField][] = "";
} else { } else {
$value = $row[$defaultField]; $value = $row[$defaultField];
$row[$defaultField] = Array($value); $row[$defaultField] = Array($value);
$row[$defaultField][] = ""; $row[$defaultField][] = "";
$row[$defaultField][] = ""; $row[$defaultField][] = "";
} }
} }
} }
return $row; return $row;
} }
   
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
   
if (isset($_REQUEST['id'])) { if (isset($_REQUEST['id'])) {
//get an agency record as json/html, search by name/abn/id //get an agency record as json/html, search by name/abn/id
// by name = startkey="Ham"&endkey="Ham\ufff0" // by name = startkey="Ham"&endkey="Ham\ufff0"
// edit? // edit?
   
$obj = $db->get($_REQUEST['id']); $obj = $db->get($_REQUEST['id']);
include_header(isset($obj->name) ? $obj->name : ""); include_header(isset($obj->name) ? $obj->name : "");
//print_r($row); //print_r($row);
if (sizeof($_POST) > 0) { if (sizeof($_POST) > 0) {
//print_r($_POST); //print_r($_POST);
foreach ($_POST as $postkey => $postvalue) { foreach ($_POST as $postkey => $postvalue) {
if ($postvalue == "") { if ($postvalue == "") {
unset($_POST[$postkey]); unset($_POST[$postkey]);
} }
if (is_array($postvalue)) { if (is_array($postvalue)) {
if (count($postvalue) == 1 && $postvalue[0] == "") { if (count($postvalue) == 1 && $postvalue[0] == "") {
unset($_POST[$postkey]); unset($_POST[$postkey]);
} else { } else {
foreach ($_POST[$postkey] as $key => &$value) { foreach ($_POST[$postkey] as $key => &$value) {
if ($value == "") { if ($value == "") {
unset($_POST[$postkey][$key]); unset($_POST[$postkey][$key]);
} }
} }
} }
} }
} }
if (isset($_POST['_id']) && $db->get_rev($_POST['_id']) == $_POST['_rev']) { if (isset($_POST['_id']) && $db->get_rev($_POST['_id']) == $_POST['_rev']) {
echo "Edited version was latest version, continue saving"; echo "Edited version was latest version, continue saving";
$newdoc = $_POST; $newdoc = $_POST;
$newdoc['metadata']['lastModified'] = time(); $newdoc['metadata']['lastModified'] = time();
$obj = $db->save($newdoc); $obj = $db->save($newdoc);
} else { } else {
echo "ALERT doc revised by someone else while editing. Document not saved."; echo "ALERT doc revised by someone else while editing. Document not saved.";
} }
} }
   
$mode = "view"; $mode = "view";
$rowArray = object_to_array($obj); $rowArray = object_to_array($obj);
ksort($rowArray); ksort($rowArray);
if ($mode == "edit") { if ($mode == "edit") {
$row = addDefaultFields($rowArray); $row = addDefaultFields($rowArray);
} else { } else {
$row = $rowArray; $row = $rowArray;
} }
   
if ($mode == "view") { if ($mode == "view") {
echo ' <div class="container-fluid"> echo ' <div class="container-fluid">
<div class="row-fluid"> <div class="row-fluid">
<div class="span3"> <div class="span3">
<div class="well sidebar-nav"> <div class="well sidebar-nav">
<ul class="nav nav-list"> <ul class="nav nav-list">
<li class="nav-header">Statistics</li>'; <li class="nav-header">Statistics</li>';
   
if (isset($row['statistics']['employees'])) { if (isset($row['statistics']['employees'])) {
echo '<div><i class="icon-user" style="float:left"></i><p style="margin-left:16px;">'; echo '<div><i class="icon-user" style="float:left"></i><p style="margin-left:16px;">';
$keys = array_keys($row['statistics']['employees']); $keys = array_keys($row['statistics']['employees']);
$lastkey = $keys[count($keys) - 1]; $lastkey = $keys[count($keys) - 1];
echo $row['statistics']['employees'][$lastkey]['value'] . ' employees <small>(' . $lastkey . ')</small>'; echo $row['statistics']['employees'][$lastkey]['value'] . ' employees <small>(' . $lastkey . ')</small>';
echo '</div>'; echo '</div>';
} }
if (isset($row['statistics']['budget'])) { if (isset($row['statistics']['budget'])) {
echo '<div><i class="icon-shopping-cart" style="float:left"></i><p style="margin-left:16px;">'; echo '<div><i class="icon-shopping-cart" style="float:left"></i><p style="margin-left:16px;">';
$keys = array_keys($row['statistics']['budget']); $keys = array_keys($row['statistics']['budget']);
$lastkey = $keys[count($keys) - 1]; $lastkey = $keys[count($keys) - 1];
echo "$" . number_format(floatval($row['statistics']['budget'][$lastkey]['value'])) . ' <small>(' . $lastkey . ' budget)</small>'; echo "$" . number_format(floatval($row['statistics']['budget'][$lastkey]['value'])) . ' <small>(' . $lastkey . ' budget)</small>';
echo '</div>'; echo '</div>';
} }
echo ' </ul> echo ' </ul>
</div><!--/.well --> </div><!--/.well -->
</div><!--/span--> </div><!--/span-->
<div class="span9">'; <div class="span9">';
echo '<div itemscope itemtype="http://schema.org/GovernmentOrganization" typeof="schema:GovernmentOrganization" about="#' . $row['_id'] . '">'; echo '<div itemscope itemtype="http://schema.org/GovernmentOrganization" typeof="schema:GovernmentOrganization org:Organization" about="#' . $row['_id'] . '">';
echo '<div class="hero-unit"> echo '<div class="hero-unit">
<h1 itemprop="name">' . $row['name'] . '</h1>'; <h1 itemprop="name">' . $row['name'] . '</h1>';
if (isset($row['description'])) { if (isset($row['description'])) {
echo '<p>' . $row['description'] . '</p>'; echo '<p>' . $row['description'] . '</p>';
} }
echo '</div><table width="100%">'; echo '</div><table width="100%">';
echo "<tr><th>Field Name</th><th>Field Value</th></tr>"; echo "<tr><th>Field Name</th><th>Field Value</th></tr>";
} }
if ($mode == "edit") { if ($mode == "edit") {
?> ?>
<input id="addfield" type="button" value="Add Field"/> <input id="addfield" type="button" value="Add Field"/>
<script> <script>
window.onload = function () { window.onload = function () {
$(document).ready(function () { $(document).ready(function () {
// put all your jQuery goodness in here. // put all your jQuery goodness in here.
// http://charlie.griefer.com/blog/2009/09/17/jquery-dynamically-adding-form-elements/ // http://charlie.griefer.com/blog/2009/09/17/jquery-dynamically-adding-form-elements/
$('#addfield').click(function () { $('#addfield').click(function () {
var field_name = window.prompt("fieldname?", ""); var field_name = window.prompt("fieldname?", "");
if (field_name != "") { if (field_name != "") {
$('#submitbutton').before($('<span></span>') $('#submitbutton').before($('<span></span>')
.append("<label>" + field_name + "</label>") .append("<label>" + field_name + "</label>")
.append("<input class='input-text' type='text' id='" + field_name + "' name='" + field_name + "'/>") .append("<input class='input-text' type='text' id='" + field_name + "' name='" + field_name + "'/>")
); );
} }
}); });
}); });
}; };
</script> </script>
<form id="editform" class="nice" method="post"> <form id="editform" class="nice" method="post">
<?php <?php
   
} }
foreach ($row as $key => $value) { foreach ($row as $key => $value) {
echo displayValue($key, $value, $mode); echo displayValue($key, $value, $mode);
} }
if ($mode == "view") { if ($mode == "view") {
echo "</table></div>"; echo "</table></div>";
echo ' </div><!--/span--> echo ' </div><!--/span-->
</div><!--/row--> </div><!--/row-->
</div><!--/span--> </div><!--/span-->
</div><!--/row-->'; </div><!--/row-->';
} }
if ($mode == "edit") { if ($mode == "edit") {
echo '<input id="submitbutton" type="submit"/></form>'; echo '<input id="submitbutton" type="submit"/></form>';
} }
} else { } else {
// show all list // show all list
include_header('Agencies'); include_header('Agencies');
echo ' <div class="container-fluid"> echo ' <div class="container-fluid">
<div class="row-fluid"> <div class="row-fluid">
<div class="span3"> <div class="span3">
<div class="well sidebar-nav"> <div class="well sidebar-nav">
<ul class="nav nav-list"> <ul class="nav nav-list">
<li class="nav-header">Sidebar</li>'; <li class="nav-header">Sidebar</li>';
echo ' </ul> echo ' </ul>
</div><!--/.well --> </div><!--/.well -->
</div><!--/span--> </div><!--/span-->
<div class="span9"> <div class="span9">
<div class="hero-unit"> <div class="hero-unit">
<h1>Australian Government Agencies</h1> <h1>Australian Government Agencies</h1>
<p>Explore collected information about Australian Government Agencies below.</p> <p>Explore collected information about Australian Government Agencies below.</p>
   
</div> </div>
<div class="row-fluid"> <div class="row-fluid">
<div class="span4">'; <div class="span4">';
try { try {
$rows = $db->get_view("app", "byCanonicalName")->rows; $rows = $db->get_view("app", "byCanonicalName")->rows;
//print_r($rows); //print_r($rows);
$rowCount = count($rows); $rowCount = count($rows);
foreach ($rows as $i => $row) { foreach ($rows as $i => $row) {
if ($i % ($rowCount / 3) == 0 && $i != 0 && $i != $rowCount - 2) echo '</div><div class="span4">'; if ($i % ($rowCount / 3) == 0 && $i != 0 && $i != $rowCount - 2) echo '</div><div class="span4">';
// print_r($row); // print_r($row);
echo '<span itemscope itemtype="http://schema.org/GovernmentOrganization" typeof="schema:GovernmentOrganization foaf:Organization" about="getAgency.php?id=' . $row->value->_id . '"> echo '<span itemscope itemtype="http://schema.org/GovernmentOrganization" typeof="schema:GovernmentOrganization foaf:Organization" about="getAgency.php?id=' . $row->value->_id . '">
<a href="getAgency.php?id=' . $row->value->_id . '" rel="schema:url foaf:page" property="schema:name foaf:name" itemprop="url"><span itemprop="name">' . <a href="getAgency.php?id=' . $row->value->_id . '" rel="schema:url foaf:page" property="schema:name foaf:name" itemprop="url"><span itemprop="name">' .
(isset($row->value->name) ? $row->value->name : "ERROR NAME MISSING") (isset($row->value->name) ? $row->value->name : "ERROR NAME MISSING")
. '</span></a></span><br><br>'; . '</span></a></span><br><br>';
} }
   
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo ' </div><!--/span--> echo ' </div><!--/span-->
</div><!--/row--> </div><!--/row-->
</div><!--/span--> </div><!--/span-->
</div><!--/row-->'; </div><!--/row-->';
} }
   
include_footer(); include_footer();
?> ?>
   
file:a/graph.php -> file:b/graph.php
<?php <?php
include_once('include/common.inc.php'); include_once('include/common.inc.php');
//include_header(); //include_header();
$format = "html"; $format = "html";
if (isset($_REQUEST['format'])) { if (isset($_REQUEST['format'])) {
$format = $_REQUEST['format']; $format = $_REQUEST['format'];
} }
   
function add_node($id, $label, $parent="") { function add_node($id, $label, $parent="") {
global $format; global $format;
if ($format == "html") { if ($format == "html") {
// echo "nodes[\"$id\"] = graph.newNode({label: \"$label\"});" . PHP_EOL; // echo "nodes[\"$id\"] = graph.newNode({label: \"$label\"});" . PHP_EOL;
} }
if ($format == "dot" && $label != "") { if ($format == "dot" && $label != "") {
echo "$id [label=\"$label\"];". PHP_EOL; echo "\"$id\" [label=\"$label\", shape=plaintext];". PHP_EOL;
} }
if ($format == "gexf") { if ($format == "gexf") {
echo "<node id='$id' label=\"".htmlentities($label,ENT_XML1)."\" ".($parent != ""? "pid='$parent'><viz:size value='1'/>":"><viz:size value='2'/>") echo "<node id='$id' label=\"".htmlentities($label)."\" ".($parent != ""? "pid='$parent'><viz:size value='1'/>":"><viz:size value='2'/>")
."<viz:color b='".rand(0,255)."' g='".rand(0,255)."' r='".rand(0,255)."'/>" ."<viz:color b='".rand(0,255)."' g='".rand(0,255)."' r='".rand(0,255)."'/>"
."</node>". PHP_EOL; ."</node>". PHP_EOL;
} }
} }
   
function add_edge($from, $to, $color) { function add_edge($from, $to, $color) {
global $format; global $format;
if ($format == "html") { if ($format == "html") {
// echo "graph.newEdge(nodes[\"$from\"], nodes['$to'], {color: '$color'});" . PHP_EOL; // echo "graph.newEdge(nodes[\"$from\"], nodes['$to'], {color: '$color'});" . PHP_EOL;
} }
if ($format == "dot") { if ($format == "dot") {
echo "$from -> $to ".($color != ""? "[color=$color]":"").";". PHP_EOL; echo "\"$from\" -> \"$to\" ".($color != ""? "[color=$color]":"").";". PHP_EOL;
} }
if ($format == "gexf") { if ($format == "gexf") {
echo "<edge id='$from$to' source='$from' target='$to' />". PHP_EOL; echo "<edge id='$from$to' source='$from' target='$to' />". PHP_EOL;
} }
} }
if ($format == "gexf") { if ($format == "gexf") {
//header('Content-Type: text/xml'); //header('Content-Type: text/xml');
header('Content-Type: application/gexf+xml'); header('Content-Type: application/gexf+xml');
echo '<?xml version="1.0" encoding="UTF-8"?> echo '<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft" xmlns:viz="http://www.gexf.net/1.2draft/viz" version="1.2"> <gexf xmlns="http://www.gexf.net/1.2draft" xmlns:viz="http://www.gexf.net/1.2draft/viz" version="1.2">
<meta lastmodifieddate="2009-03-20"> <meta lastmodifieddate="2009-03-20">
<creator>Gexf.net</creator> <creator>Gexf.net</creator>
<description>A hello world! file</description> <description>A hello world! file</description>
</meta> </meta>
<graph mode="static" defaultedgetype="directed"> <graph mode="static" defaultedgetype="directed">
<nodes>'. PHP_EOL; <nodes>'. PHP_EOL;
} }
   
if ($format == "dot") { if ($format == "dot") {
echo 'digraph g {'. PHP_EOL; echo 'digraph g {'. PHP_EOL;
} }
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
add_node("fedg","Federal Government - Commonwealth of Australia"); add_node("fedg","Federal Government - Commonwealth of Australia");
try { try {
$rows = $db->get_view("app", "byCanonicalName", null, true)->rows; $rows = $db->get_view("app", "byCanonicalName", null, true)->rows;
//print_r($rows); //print_r($rows);
foreach ($rows as $row) { foreach ($rows as $row) {
add_node($row->id, $row->key); add_node($row->id, $row->value->name);
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
if ($format == "gexf") { if ($format == "gexf") {
echo '</nodes> echo '</nodes>
<edges>'. PHP_EOL; <edges>'. PHP_EOL;
} }
try { try {
$rows = $db->get_view("app", "byDeptStateName", null, true)->rows; $rows = $db->get_view("app", "byDeptStateName", null, true)->rows;
//print_r($rows); //print_r($rows);
foreach ($rows as $row) { foreach ($rows as $row) {
add_edge("fedg", $row->value, 'yellow'); add_edge("fedg", $row->value, 'yellow');
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
   
try { try {
$rows = $db->get_view("app", "parentOrgs", null, true)->rows; $rows = $db->get_view("app", "parentOrgs", null, true)->rows;
// print_r($rows); // print_r($rows);
foreach ($rows as $row) { foreach ($rows as $row) {
add_edge($row->key, $row->value, 'blue'); add_edge($row->key, $row->value, 'blue');
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
if ($format == "html") { if ($format == "html") {
?> ?>
<div id="sigma-example" width="960" style="min-height:800px;background-color: #333;"></div> <div id="sigma-example" width="960" style="min-height:800px;background-color: #333;"></div>
<script src="js/sigma.min.js"></script> <script src="js/sigma.min.js"></script>
<script src="js/sigma/plugins/sigma.parseGexf.js"></script> <script src="js/sigma/plugins/sigma.parseGexf.js"></script>
<script src="js/sigma/plugins/sigma.forceatlas2.js"></script> <script src="js/sigma/plugins/sigma.forceatlas2.js"></script>
<script type="text/javascript">function init() { <script type="text/javascript">function init() {
// Instanciate sigma.js and customize rendering : // Instanciate sigma.js and customize rendering :
var sigInst = sigma.init(document.getElementById('sigma-example')).drawingProperties({ var sigInst = sigma.init(document.getElementById('sigma-example')).drawingProperties({
defaultLabelColor: '#fff', defaultLabelColor: '#fff',
defaultLabelSize: 14, defaultLabelSize: 14,
defaultLabelBGColor: '#fff', defaultLabelBGColor: '#fff',
defaultLabelHoverColor: '#000', defaultLabelHoverColor: '#000',
labelThreshold: 6, labelThreshold: 6,
defaultEdgeType: 'curve' defaultEdgeType: 'curve'
}).graphProperties({ }).graphProperties({
minNodeSize: 0.5, minNodeSize: 0.5,
maxNodeSize: 5, maxNodeSize: 5,
minEdgeSize: 5, minEdgeSize: 5,
maxEdgeSize: 5 maxEdgeSize: 5
}).mouseProperties({ }).mouseProperties({
maxRatio: 32 maxRatio: 32
}); });
   
// Parse a GEXF encoded file to fill the graph // Parse a GEXF encoded file to fill the graph
// (requires "sigma.parseGexf.js" to be included) // (requires "sigma.parseGexf.js" to be included)
sigInst.parseGexf('graph.php?format=gexf'); sigInst.parseGexf('graph.php?format=gexf');
sigInst.bind('downnodes',function(event){ sigInst.bind('downnodes',function(event){
var nodes = event.content; var nodes = event.content;
}); });
// Start the ForceAtlas2 algorithm // Start the ForceAtlas2 algorithm
// (requires "sigma.forceatlas2.js" to be included) // (requires "sigma.forceatlas2.js" to be included)
sigInst.startForceAtlas2(); sigInst.startForceAtlas2();
// Draw the graph : // Draw the graph :
sigInst.draw(); sigInst.draw();
} }
   
if (document.addEventListener) { if (document.addEventListener) {
document.addEventListener("DOMContentLoaded", init, false); document.addEventListener("DOMContentLoaded", init, false);
} else { } else {
window.onload = init; window.onload = init;
} }
</script> </script>
   
<?php <?php
} }
if ($format == "dot") { if ($format == "dot") {
echo "}"; echo "}";
} }
if ($format == "gexf") { if ($format == "gexf") {
echo ' </edges> echo ' </edges>
</graph> </graph>
</gexf>'. PHP_EOL; </gexf>'. PHP_EOL;
} }
//include_footer(); //include_footer();
?> ?>
   
   
<?php <?php
   
date_default_timezone_set("Australia/Sydney"); date_default_timezone_set("Australia/Sydney");
   
$basePath = ""; $basePath = "";
if (strstr($_SERVER['PHP_SELF'], "alaveteli/") if (strstr($_SERVER['PHP_SELF'], "alaveteli/")
|| strstr($_SERVER['PHP_SELF'], "admin/") || strstr($_SERVER['PHP_SELF'], "admin/")
|| strstr($_SERVER['PHP_SELF'], "lib/") || strstr($_SERVER['PHP_SELF'], "lib/")
|| strstr($_SERVER['PHP_SELF'], "include/") || strstr($_SERVER['PHP_SELF'], "include/")
|| strstr($_SERVER['PHP_SELF'], "documents/") || strstr($_SERVER['PHP_SELF'], "documents/")
|| $_SERVER['SERVER_NAME'] == "disclosurelo.gs" || $_SERVER['SERVER_NAME'] == "disclosurelo.gs"
|| $_SERVER['SERVER_NAME'] == "www.disclosurelo.gs" || $_SERVER['SERVER_NAME'] == "www.disclosurelo.gs"
  || $_SERVER['SERVER_NAME'] == "direct.disclosurelo.gs"
) )
$basePath = "../"; $basePath = "../";
   
include_once ('couchdb.inc.php'); include_once ('couchdb.inc.php');
include_once ('template.inc.php'); include_once ('template.inc.php');
require_once $basePath.'lib/Requests/library/Requests.php'; require_once $basePath.'lib/Requests/library/Requests.php';
   
Requests::register_autoloader(); Requests::register_autoloader();
$ENV = "DEV"; $ENV = "DEV";
if (false && isset($_SERVER['SERVER_NAME']) && $_SERVER['SERVER_NAME'] != 'localhost') { if (false && isset($_SERVER['SERVER_NAME']) && $_SERVER['SERVER_NAME'] != 'localhost') {
   
require $basePath."lib/amon-php/amon.php"; require $basePath."lib/amon-php/amon.php";
Amon::config(array('address'=> 'http://127.0.0.1:2464', Amon::config(array('address'=> 'http://127.0.0.1:2464',
'protocol' => 'http', 'protocol' => 'http',
'secret_key' => "I2LJ6dOMmlnXgVAkTPFXd5M3ejkga8Gd2FbBt6iqZdw")); 'secret_key' => "I2LJ6dOMmlnXgVAkTPFXd5M3ejkga8Gd2FbBt6iqZdw"));
Amon::setup_exception_handler(); Amon::setup_exception_handler();
$ENV = "PROD"; $ENV = "PROD";
} }
   
# Convert a stdClass to an Array. http://www.php.net/manual/en/language.types.object.php#102735 # Convert a stdClass to an Array. http://www.php.net/manual/en/language.types.object.php#102735
   
function object_to_array(stdClass $Class) { function object_to_array(stdClass $Class) {
# Typecast to (array) automatically converts stdClass -> array. # Typecast to (array) automatically converts stdClass -> array.
$Class = (array) $Class; $Class = (array) $Class;
   
# Iterate through the former properties looking for any stdClass properties. # Iterate through the former properties looking for any stdClass properties.
# Recursively apply (array). # Recursively apply (array).
foreach ($Class as $key => $value) { foreach ($Class as $key => $value) {
if (is_object($value) && get_class($value) === 'stdClass') { if (is_object($value) && get_class($value) === 'stdClass') {
$Class[$key] = object_to_array($value); $Class[$key] = object_to_array($value);
} }
} }
return $Class; return $Class;
} }
   
# Convert an Array to stdClass. http://www.php.net/manual/en/language.types.object.php#102735 # Convert an Array to stdClass. http://www.php.net/manual/en/language.types.object.php#102735
   
function array_to_object(array $array) { function array_to_object(array $array) {
# Iterate through our array looking for array values. # Iterate through our array looking for array values.
# If found recurvisely call itself. # If found recurvisely call itself.
foreach ($array as $key => $value) { foreach ($array as $key => $value) {
if (is_array($value)) { if (is_array($value)) {
$array[$key] = array_to_object($value); $array[$key] = array_to_object($value);
} }
} }
   
# Typecast to (object) will automatically convert array -> stdClass # Typecast to (object) will automatically convert array -> stdClass
return (object) $array; return (object) $array;
} }
   
function dept_to_portfolio($deptName) { function dept_to_portfolio($deptName) {
return trim(str_replace("Department of", "", str_replace("Department of the", "Department of", $deptName))); return trim(str_replace("Department of", "", str_replace("Department of the", "Department of", $deptName)));
} }
function phrase_to_tag ($phrase) { function phrase_to_tag ($phrase) {
return str_replace(" ","_",str_replace("'","",str_replace(",","",strtolower($phrase)))); return str_replace(" ","_",str_replace("'","",str_replace(",","",strtolower($phrase))));
} }
function local_url() { function local_url() {
return "http://" . $_SERVER['HTTP_HOST'] . rtrim(dirname($_SERVER['PHP_SELF']), '/\\') . "/"; return "http://" . $_SERVER['HTTP_HOST'] . rtrim(dirname($_SERVER['PHP_SELF']), '/\\') . "/";
} }
function GetDomain($url) function GetDomain($url)
{ {
$nowww = ereg_replace('www\.','',$url); $nowww = ereg_replace('www\.','',$url);
$domain = parse_url($nowww); $domain = parse_url($nowww);
if(!empty($domain["host"])) if(!empty($domain["host"]))
{ {
return $domain["host"]; return $domain["host"];
} else } else
{ {
return $domain["path"]; return $domain["path"];
} }
} }
   
<?php <?php
   
function include_header($title) { function include_header($title) {
global $basePath; global $basePath;
?> ?>
<!DOCTYPE html> <!DOCTYPE html>
   
<!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> <!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ -->
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> <!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> <!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> <!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]-->
<!--[if gt IE 8]><!--> <html lang="en"> <!--<![endif]--> <!--[if gt IE 8]><!--> <html lang="en"> <!--<![endif]-->
<head> <head>
<meta charset="utf-8" /> <meta charset="utf-8" />
   
<!-- Set the viewport width to device width for mobile --> <!-- Set the viewport width to device width for mobile -->
<meta name="viewport" content="width=device-width" /> <meta name="viewport" content="width=device-width" />
   
<title><?php echo $title; ?> - Disclosr</title> <title><?php echo $title; ?> - Disclosr</title>
   
<!-- Included CSS Files --> <!-- Included CSS Files -->
<link href="<?php echo $basePath ?>css/bootstrap.min.css" rel="stylesheet"> <link href="<?php echo $basePath ?>css/bootstrap.min.css" rel="stylesheet">
<style type="text/css"> <style type="text/css">
body { body {
padding-top: 60px; padding-top: 60px;
padding-bottom: 40px; padding-bottom: 40px;
} }
.sidebar-nav { .sidebar-nav {
padding: 9px 0; padding: 9px 0;
} }
.flotr-dummy-div { .flotr-dummy-div {
margin-left: -999px; margin-left: -999px;
} }
</style> </style>
<link href="<?php echo $basePath ?>css/bootstrap-responsive.min.css" rel="stylesheet"> <link href="<?php echo $basePath ?>css/bootstrap-responsive.min.css" rel="stylesheet">
<!--[if lt IE 9]> <!--[if lt IE 9]>
<link rel="stylesheet" href="<?php echo $basePath ?>stylesheets/ie.css"> <link rel="stylesheet" href="<?php echo $basePath ?>stylesheets/ie.css">
<![endif]--> <![endif]-->
   
   
<!-- IE Fix for HTML5 Tags --> <!-- IE Fix for HTML5 Tags -->
<!--[if lt IE 9]> <!--[if lt IE 9]>
<script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script> <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script>
<![endif]--> <![endif]-->
   
</head> </head>
<body xmlns:schema="http://schema.org/" xmlns:foaf="http://xmlns.com/foaf/0.1/"> <body xmlns:schema="http://schema.org/" xmlns:foaf="http://xmlns.com/foaf/0.1/" xmlns:org="http://www.w3.org/ns/org#" xmlns:skos="http://www.w3.org/2004/02/skos/core#">
<div class="navbar navbar-inverse navbar-fixed-top"> <div class="navbar navbar-inverse navbar-fixed-top">
<div class="navbar-inner"> <div class="navbar-inner">
<div class="container-fluid"> <div class="container-fluid">
<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> <a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse">
<span class="icon-bar"></span> <span class="icon-bar"></span>
<span class="icon-bar"></span> <span class="icon-bar"></span>
<span class="icon-bar"></span> <span class="icon-bar"></span>
</a> </a>
<a class="brand" href="#">Disclosr</a> <a class="brand" href="#">Disclosr</a>
<div class="nav-collapse collapse"> <div class="nav-collapse collapse">
<ul class="nav"> <ul class="nav">
<li><a href="getAgency.php">Agencies</a></li> <li><a href="getAgency.php">Agencies</a></li>
<li><a href="ranking.php">Open Gov Ranking</a></li> <li><a href="ranking.php">Open Gov Ranking</a></li>
<li><a href="headcount.php">Employee Headcount Graph</a></li> <li><a href="headcount.php">Employee Headcount Graph</a></li>
<li><a href="budget.php">Budget Graph</a></li> <li><a href="budget.php">Budget Graph</a></li>
<li><a href="about.php">About/FAQ</a></li> <li><a href="about.php">About/FAQ</a></li>
</ul> </ul>
</div><!--/.nav-collapse --> </div><!--/.nav-collapse -->
</div> </div>
</div> </div>
</div> </div>
   
<div class="container-fluid"> <div class="container-fluid">
<?php } <?php }
   
function include_footer() { function include_footer() {
global $basePath; global $basePath;
?> ?>
</div> <!-- /container --> </div> <!-- /container -->
<hr> <hr>
   
<footer> <footer>
<p>Not affiliated with or endorsed by any government agency.</p> <p>Not affiliated with or endorsed by any government agency.</p>
</footer> </footer>
   
   
<!-- Included JS Files --> <!-- Included JS Files -->
<script src="http://code.jquery.com/jquery-1.7.1.min.js"></script> <script src="http://code.jquery.com/jquery-1.7.1.min.js"></script>
<script type="text/javascript" src="<?php echo $basePath ?>js/flotr2/flotr2.js"></script> <script type="text/javascript" src="<?php echo $basePath ?>js/flotr2/flotr2.js"></script>
<?php <?php
if (strpos($_SERVER['SERVER_NAME'], ".gs")) { if (strpos($_SERVER['SERVER_NAME'], ".gs")) {
?> ?>
<script type="text/javascript"> <script type="text/javascript">
   
var _gaq = _gaq || []; var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-12341040-2']); _gaq.push(['_setAccount', 'UA-12341040-2']);
_gaq.push(['_trackPageview']); _gaq.push(['_trackPageview']);
   
(function() { (function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})(); })();
   
</script> </script>
   
</body> </body>
</html> </html>
   
<?php } <?php }
} }
   
<?php <?php
include_once('include/common.inc.php'); include_once('include/common.inc.php');
include_header('Open Gov Rankings'); include_header('Open Gov Rankings');
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
?> ?>
<div class="foundation-header"> <div class="foundation-header">
<h1><a href="about.php">Open Government Rankings</a></h1> <h1><a href="about.php">Open Government Rankings</a></h1>
<h4 class="subheader"></h4> <h4 class="subheader"></h4>
</div> </div>
<table> <table>
<?php <?php
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
//$docsdb = $server->get_db('disclosr-documents'); //$docsdb = $server->get_db('disclosr-documents');
$scoredagencies = Array(); $scoredagencies = Array();
$scores = Array(); $scores = Array();
$columnKeys = Array(); $columnKeys = Array();
   
try { try {
$rows = $agenciesdb->get_view("app", "all", null, true)->rows; $rows = $agenciesdb->get_view("app", "all", null, true)->rows;
   
   
if ($rows) { if ($rows) {
foreach ($rows as $row) { foreach ($rows as $row) {
$columns = Array(); $columns = Array();
foreach ($row->value as $key => $value) { foreach ($row->value as $key => $value) {
if ((strstr($key, "has") || strstr($key, "URL")) && $key != "rtkURLs") { if ((strstr($key, "has") || strstr($key, "URL")) && $key != "rtkURLs") {
//echo "$key<br>"; //echo "$key<br>";
$columns[$key] = $value; $columns[$key] = $value;
} }
} }
//print_r(array_keys($columns)); //print_r(array_keys($columns));
$columnKeys = array_unique(array_merge($columnKeys, array_keys($columns))); $columnKeys = array_unique(array_merge($columnKeys, array_keys($columns)));
//print_r($columnKeys); //print_r($columnKeys);
$score = count($columns); $score = count($columns);
$scores[$score]++; if (isset($scores[$score])){
$scoredagencies[] = Array("id"=> $row->key, "website"=> $row->value->website, "name" => $row->value->name, "columns" => $columns, "score" => $score); $scores[$score]++;
  } else {
  $scores[$score] =1;
  }
  $scoredagencies[] = Array("id"=> $row->key, "website"=> (isset($row->value->website)?$row->value->website:""), "name" => $row->value->name, "columns" => $columns, "score" => $score);
} }
} }
   
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
function cmp($a, $b) function cmp($a, $b)
{ {
if ($a['score'] == $b['score']) { if ($a['score'] == $b['score']) {
return strcmp($a['name'], $b['name']); return strcmp($a['name'], $b['name']);
} }
return ($a['score'] > $b['score']) ? -1 : 1; return ($a['score'] > $b['score']) ? -1 : 1;
} }
   
usort($scoredagencies, "cmp"); usort($scoredagencies, "cmp");
echo "<tr>"; echo "<tr>";
echo "<th>Agency Name</th>"; echo "<th>Agency Name</th>";
echo "<th>Score</th>"; echo "<th>Score</th>";
foreach ($columnKeys as $columnID) { foreach ($columnKeys as $columnID) {
echo "<th>" . (isset($schemas['agency']["properties"][$columnID]['x-title']) ? $schemas['agency']["properties"][$columnID]['x-title'] : "<i>$columnID</i>") . "</th>"; echo "<th>" . (isset($schemas['agency']["properties"][$columnID]['x-title']) ? $schemas['agency']["properties"][$columnID]['x-title'] : "<i>$columnID</i>") . "</th>";
} }
echo "</tr>"; echo "</tr>";
foreach ($scoredagencies as $scoredagency) { foreach ($scoredagencies as $scoredagency) {
echo "<tr>"; echo "<tr>";
echo "<td><b><a href='getAgency.php?id=" . $scoredagency['id'] . "'>". $scoredagency['name'] . "</a></b></td>"; echo "<td><b><a href='getAgency.php?id=" . $scoredagency['id'] . "'>". $scoredagency['name'] . "</a></b></td>";
echo "<td><b>" . $scoredagency['score'] . "</b></td>"; echo "<td><b>" . $scoredagency['score'] . "</b></td>";
foreach ($columnKeys as $key) { foreach ($columnKeys as $key) {
echo "<td style='text-align: center;'>"; echo "<td style='text-align: center;'>";
if (isset($scoredagency['columns'][$key])) { if (isset($scoredagency['columns'][$key])) {
$value = $scoredagency['columns'][$key]; $value = $scoredagency['columns'][$key];
if (is_array($value)) { if (is_array($value)) {
if (count($value) == 1) { if (count($value) == 1) {
$href = $value[0]; $href = $value[0];
} else { } else {
$href = $value[0]; $href = $value[0];
} }
   
} else { } else {
$href = $value; $href = $value;
} }
if ($href[0] == "@") { if (isset($href[0]) && $href[0] == "@") {
$href = str_replace("@","https://twitter.com/",$href); $href = str_replace("@","https://twitter.com/",$href);
} }
//$href= urlencode($href); //$href= urlencode($href);
   
echo "<font color='lightgreen'>"; echo "<font color='lightgreen'>";
   
if (strstr($href, "http")) { if (strstr($href, "http")) {
echo "<a title='Yes' href='$href' style='color:lightgreen;'>&check;</a>"; echo "<a title='Yes' href='$href' style='color:lightgreen;'>&check;</a>";
} else { } else {
echo "&check;"; echo "&check;";
} }
   
echo "</font>"; echo "</font>";
} else { } else {
echo "<font color='orange'><abbr title='No'>✘</abbr></font>"; echo "<font color='orange'><abbr title='No'>✘</abbr></font>";
} }
echo "</td>"; echo "</td>";
} }
echo "</tr>\n"; echo "</tr>\n";
} }
?> ?>
</table><br> </table><br>
<div id="criteria" style="width:500px;height:900px;"></div> <div id="criteria" style="width:500px;height:900px;"></div>
<div id="scores" style="width:900px;height:500px;"></div> <div id="scores" style="width:900px;height:500px;"></div>
<script id="source"> <script id="source">
window.onload = function () { window.onload = function () {
$(document).ready(function () { $(document).ready(function () {
var d1 = []; var d1 = [];
var scorelabels = []; var scorelabels = [];
<?php <?php
try { try {
$rows = $db->get_view("app", "scoreHas?group=true", null, true)->rows; $rows = $db->get_view("app", "scoreHas?group=true", null, true)->rows;
   
   
$dataValues = Array(); $dataValues = Array();
foreach ($rows as $row) { foreach ($rows as $row) {
$dataValues[$row->value] = $row->key; $dataValues[$row->value] = $row->key;
} }
$i = 0; $i = 0;
ksort($dataValues); ksort($dataValues);
foreach ($dataValues as $value => $key) { foreach ($dataValues as $value => $key) {
   
echo " d1.push([$value, $i]);" . PHP_EOL; echo " d1.push([$value, $i]);" . PHP_EOL;
echo " scorelabels.push('$key');" . PHP_EOL; echo " scorelabels.push('$key');" . PHP_EOL;
$i++; $i++;
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
?> ?>
function scoretrackformatter(obj) { function scoretrackformatter(obj) {
if (scorelabels[Math.floor(obj.y)]) { if (scorelabels[Math.floor(obj.y)]) {
return (scorelabels[Math.floor(obj.y)]) + "=" + obj.x; return (scorelabels[Math.floor(obj.y)]) + "=" + obj.x;
   
} else { } else {
return ""; return "";
} }
} }
   
function scoretickformatter(val, axis) { function scoretickformatter(val, axis) {
if (scorelabels[Math.floor(val)]) { if (scorelabels[Math.floor(val)]) {
return (scorelabels[Math.floor(val)]) ; return (scorelabels[Math.floor(val)]) ;
   
} else { } else {
return ""; return "";
} }
} }
   
Flotr.draw(document.getElementById("criteria"), [ Flotr.draw(document.getElementById("criteria"), [
{data: d1} {data: d1}
], { ], {
title: 'Total count of agencies with criteria', title: 'Total count of agencies with criteria',
HtmlText: true, HtmlText: true,
bars: { bars: {
show: true, show: true,
horizontal: true horizontal: true
}, },
mouse: { mouse: {
track: true, track: true,
relative: true, relative: true,
trackFormatter: scoretrackformatter trackFormatter: scoretrackformatter
}, yaxis: { }, yaxis: {
autoscaling: true, autoscaling: true,
minorTickFreq: 0.6, minorTickFreq: 0.6,
noTicks: scorelabels.length, noTicks: scorelabels.length,
tickFormatter: scoretickformatter tickFormatter: scoretickformatter
}, },
xaxis: { xaxis: {
autoscaling: true autoscaling: true
   
} }
}); });
   
var d2 = []; var d2 = [];
<?php <?php
try { try {
   
ksort($scores); ksort($scores);
foreach ($scores as $key => $value) { foreach ($scores as $key => $value) {
   
echo " d2.push([$key,$value]);" . PHP_EOL; echo " d2.push([$key,$value]);" . PHP_EOL;
$i++; $i++;
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
?> ?>
   
   
Flotr.draw(document.getElementById("scores"), [ Flotr.draw(document.getElementById("scores"), [
{data: d2} {data: d2}
], { ], {
title: 'Frequency distribution of Scores', title: 'Frequency distribution of Scores',
HtmlText: true, HtmlText: true,
bars: { bars: {
show: true show: true
}, },
mouse: { mouse: {
track: true, track: true,
relative: true relative: true
}, yaxis: { }, yaxis: {
autoscaling: true autoscaling: true
}, },
xaxis: { xaxis: {
autoscaling: true autoscaling: true
   
} }
}); });
   
}); });
}; };
</script> </script>
<?php <?php
include_footer(); include_footer();
?> ?>
   
file:a/robots.txt -> file:b/robots.txt
# www.robotstxt.org/ # www.robotstxt.org/
# www.google.com/support/webmasters/bin/answer.py?hl=en&answer=156449 # www.google.com/support/webmasters/bin/answer.py?hl=en&answer=156449
   
User-agent: * User-agent: *
Disallow: /admin/ Disallow: /admin/
  Disallow: /viewDocument.php
Sitemap: http://orgs.disclosurelo.gs/sitemap.xml.php Sitemap: http://orgs.disclosurelo.gs/sitemap.xml.php
<?php <?php
   
$schemas['agency'] = Array( $schemas['agency'] = Array(
"description" => "Representation of government agency and online transparency measures", "description" => "Representation of government agency and online transparency measures",
"type" => "object", "type" => "object",
"properties" => Array( "properties" => Array(
"name" => Array("type" => "string", "required" => true, "x-itemprop" => "name", "x-property" => "schema:name foaf:name", "x-title" => "Name", "description" => "Name, most recent and broadest"), "name" => Array("type" => "string", "required" => true, "x-itemprop" => "name", "x-property" => "schema:name foaf:name skos:prefLabel ", "x-title" => "Name", "description" => "Name, most recent and broadest"),
"shortName" => Array("type" => "string", "required" => false, "x-title" => "Short Name", "description" => "Name shortened, usually to an acronym"), "shortName" => Array("type" => "string", "required" => false, "x-title" => "Short Name", "description" => "Name shortened, usually to an acronym"),
"description" => Array("type" => "string", "required" => false, "x-title" => "Description", "description" => "Description of roles and responsiblities of organisation"), "description" => Array("type" => "string", "required" => false, "x-title" => "Description", "description" => "Description of roles and responsiblities of organisation"),
"foiEmail" => Array("type" => "string", "required" => false, "x-title" => "FOI Contact Email", "x-itemprop" => "email", "description" => "FOI contact email if not foi@"), "foiEmail" => Array("type" => "string", "required" => false, "x-title" => "FOI Contact Email", "x-itemprop" => "email", "description" => "FOI contact email if not foi@"),
"sameAs" => Array("type" => "array", "required" => false, "x-property"=>"owl:sameAs","x-title" => "Same As", "description" => "Same as other URLs/URIs for this entity", "sameAs" => Array("type" => "array", "required" => false, "x-property"=>"owl:sameAs","x-title" => "Same As", "description" => "Same as other URLs/URIs for this entity",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"otherNames" => Array("type" => "array", "required" => true, "x-title" => "Past/Other Names", "description" => "Other names for organisation", "otherNames" => Array("type" => "array", "required" => true, "x-title" => "Past/Other Names", "description" => "Other names for organisation",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"positions" => Array("type" => "array", "required" => true, "x-title" => "Political Positions", "description" => "Ministers and Parliamentary Secretaries", "positions" => Array("type" => "array", "required" => true, "x-title" => "Political Positions", "description" => "Ministers and Parliamentary Secretaries",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"foiBodies" => Array("type" => "array", "required" => true, "x-title" => "FOI Bodies","x-property"=>"schema:members foaf:knows", "description" => "Organisational units within this agency that are subject to FOI Act but are not autonomous", "foiBodies" => Array("type" => "array", "required" => true, "x-title" => "FOI Bodies","x-property"=>"schema:members foaf:knows org:hasSubOrganization", "description" => "Organisational units within this agency that are subject to FOI Act but are not autonomous",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"legislation" => Array("type" => "array", "required" => true, "x-title" => "Legislation", "description" => "Legislation administered by or created for the establishment of this organisation", "legislation" => Array("type" => "array", "required" => true, "x-title" => "Legislation", "description" => "Legislation administered by or created for the establishment of this organisation",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"orgType" => Array("type" => "string", "required" => true, "x-title" => "Organisation Type", "description" => "Org type based on legal formation via FMA/CAC legislation etc."), "orgType" => Array("type" => "string", "required" => true, "x-title" => "Organisation Type", "x-property" => "org:classification", "description" => "Org type based on legal formation via FMA/CAC legislation etc."),
"parentOrg" => Array("type" => "string", "required" => true, "x-title" => "Parent Organisation", "description" => "Parent organisation, usually a department of state"), "parentOrg" => Array("type" => "string", "required" => true, "x-title" => "Parent Organisation", "x-property" => "org:subOrganizationOf", "description" => "Parent organisation, usually a department of state"),
"website" => Array("type" => "string", "required" => true, "x-title" => "Website", "x-itemprop" => "url", "x-property" => "schema:url foaf:homepage", "description" => "Website URL"), "website" => Array("type" => "string", "required" => true, "x-title" => "Website", "x-itemprop" => "url", "x-property" => "schema:url foaf:homepage", "description" => "Website URL"),
"abn" => Array("type" => "string", "required" => true, "x-title" => "Australian Business Number", "description" => "ABN from business register"), "abn" => Array("type" => "string", "required" => true, "x-title" => "Australian Business Number", "x-property" => "org:identifier", "description" => "ABN from business register"),
"established" => Array("type" => "string", "required" => true, "x-title" => "Date established", "description" => "Date established"), "established" => Array("type" => "string", "required" => true, "x-title" => "Date established", "x-property" => "schema:foundingDate", "description" => "Date established"),
"employees" => Array("type" => "string", "required" => true, "x-title" => "2010-2011 employees", "description" => "2010-2011 employees"), "employees" => Array("type" => "string", "required" => true, "x-title" => "2010-2011 employees", "description" => "2010-2011 employees"),
"contractListURL" => Array("type" => "string", "required" => true, "x-title" => "Contract Listing", "description" => "Departmental and agency contracts, <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a>" ), "contractListURL" => Array("type" => "string", "required" => true, "x-title" => "Contract Listing", "description" => "Departmental and agency contracts, <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a>" ),
"budgetURL" => Array("type" => "string", "required" => true,"x-title" => "Budget", "description" => "Portfolio Budget Statements and Portfolio Additional Estimates Statements"), "budgetURL" => Array("type" => "string", "required" => true,"x-title" => "Budget", "description" => "Portfolio Budget Statements and Portfolio Additional Estimates Statements"),
"grantsReportingURL" => Array("type" => "string", "required" => true, "x-title" => "Grants Awarded", "grantsReportingURL" => Array("type" => "string", "required" => true, "x-title" => "Grants Awarded",
"description" => "Departmental and agency grants <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a> and <a href='http://www.finance.gov.au/publications/fmg-series/23-commonwealth-grant-guidelines.html'>Commonwealth grants guidelines</a> "), "description" => "Departmental and agency grants <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a> and <a href='http://www.finance.gov.au/publications/fmg-series/23-commonwealth-grant-guidelines.html'>Commonwealth grants guidelines</a> "),
"annualReportURL" => Array("type" => "string", "required" => true, "x-title" => "Annual Report(s)", "description" => ""), "annualReportURL" => Array("type" => "string", "required" => true, "x-title" => "Annual Report(s)", "description" => ""),
"consultanciesURL" => Array("type" => "string", "required" => true, "x-title" => "Consultants Hired", "description" => ""), "consultanciesURL" => Array("type" => "string", "required" => true, "x-title" => "Consultants Hired", "description" => ""),
"legalExpenditureURL" => Array("type" => "string", "required" => true, "x-title" => "Legal Services Expenditure", "description" => "Legal Services Expenditure mandated by Legal Services Directions 2005"), "legalExpenditureURL" => Array("type" => "string", "required" => true, "x-title" => "Legal Services Expenditure", "description" => "Legal Services Expenditure mandated by Legal Services Directions 2005"),
"recordsListURL" => Array("type" => "string", "required" => true, "x-title" => "Files/Records Held", "description" => "Indexed lists of departmental and agency files, <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a>"), "recordsListURL" => Array("type" => "string", "required" => true, "x-title" => "Files/Records Held", "description" => "Indexed lists of departmental and agency files, <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a>"),
"FOIDocumentsURL" => Array("type" => "string", "required" => true, "x-title" => "FOI Documents Released", "description" => "FOI Disclosure Log URL"), "FOIDocumentsURL" => Array("type" => "string", "required" => true, "x-title" => "FOI Documents Released", "description" => "FOI Disclosure Log URL"),
"FOIDocumentsRSSURL" => Array("type" => "string", "required" => false, "x-title" => "RSS Feed of FOI Documents Released", "description" => "FOI Disclosure Log in RSS format"), "FOIDocumentsRSSURL" => Array("type" => "string", "required" => false, "x-title" => "RSS Feed of FOI Documents Released", "description" => "FOI Disclosure Log in RSS format"),
"hasFOIPDF" => Array("type" => "array", "required" => false, "x-title" => "Has FOI Documents Released in PDF", "description" => "FOI Disclosure Log contains any PDFs", "hasFOIPDF" => Array("type" => "array", "required" => false, "x-title" => "Has FOI Documents Released in PDF", "description" => "FOI Disclosure Log contains any PDFs",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"infoPublicationSchemeURL" => Array("type" => "string", "required" => true, "x-title" => "Information Publication Scheme", "description" => ""), "infoPublicationSchemeURL" => Array("type" => "string", "required" => true, "x-title" => "Information Publication Scheme", "description" => ""),
"appointmentsURL" => Array("type" => "string", "required" => true, "x-title" => "Agency Appointments/Boards", "description" => "Departmental and agency appointments and vacancies , <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a>"), "appointmentsURL" => Array("type" => "string", "required" => true, "x-title" => "Agency Appointments/Boards", "description" => "Departmental and agency appointments and vacancies , <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a>"),
"advertisingURL" => Array("type" => "string", "required" => true, "x-title" => "Approved Advertising Campaigns", "description" => " Agency advertising and public information projects, <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a> "), "advertisingURL" => Array("type" => "string", "required" => true, "x-title" => "Approved Advertising Campaigns", "description" => " Agency advertising and public information projects, <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a> "),
"hasRSS" => Array("type" => "array", "required" => true, "x-title" => "Has RSS", "description" => ""), "hasRSS" => Array("type" => "array", "required" => true, "x-title" => "Has RSS", "description" => ""),
"hasBlog" => Array("type" => "array", "required" => true, "x-title" => "Has Blog", "description" => ""), "hasBlog" => Array("type" => "array", "required" => true, "x-title" => "Has Blog", "description" => ""),
"hasMobileApp" => Array("type" => "array", "required" => true, "x-title" => "Has Mobile App", "description" => ""), "hasMobileApp" => Array("type" => "array", "required" => true, "x-title" => "Has Mobile App", "description" => ""),
"hasMailingList" => Array("type" => "array", "required" => true, "x-title" => "Has Mailing List", "description" => "", "hasMailingList" => Array("type" => "array", "required" => true, "x-title" => "Has Mailing List", "description" => "",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"hasTwitter" => Array("type" => "array", "required" => true, "x-title" => "Has Twitter", "description" => "", "hasTwitter" => Array("type" => "array", "required" => true, "x-title" => "Has Twitter", "description" => "",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"hasFacebook" => Array("type" => "array", "required" => true, "x-title" => "Has Facebook", "description" => "", "hasFacebook" => Array("type" => "array", "required" => true, "x-title" => "Has Facebook", "description" => "",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"hasYouTube" => Array("type" => "array", "required" => true, "x-title" => "Has YouTube", "description" => "", "hasYouTube" => Array("type" => "array", "required" => true, "x-title" => "Has YouTube", "description" => "",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"hasFlickr" => Array("type" => "array", "required" => true, "x-title" => "Has Flickr", "description" => "", "hasFlickr" => Array("type" => "array", "required" => true, "x-title" => "Has Flickr", "description" => "",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"hasCCBY" => Array("type" => "array", "required" => true, "x-title" => "Has CC-BY", "description" => "Has any page licenced Creative Commons - Attribution", "hasCCBY" => Array("type" => "array", "required" => true, "x-title" => "Has CC-BY", "description" => "Has any page licenced Creative Commons - Attribution",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"hasRestrictiveLicence" => Array("type" => "array","required" => true, "x-title" => "Has Restrictive Licence", "description" => "Has any page licenced under terms more restrictive than Crown Copyright", "hasRestrictiveLicence" => Array("type" => "array","required" => true, "x-title" => "Has Restrictive Licence", "description" => "Has any page licenced under terms more restrictive than Crown Copyright",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"hasPermissiveLicence" => Array("type" => "array","required" => true, "x-title" => "Has Permissive Licence", "description" => "Has any page licenced under terms more permissive than Crown Copyright but not clear CCBY", "hasPermissiveLicence" => Array("type" => "array","required" => true, "x-title" => "Has Permissive Licence", "description" => "Has any page licenced under terms more permissive than Crown Copyright but not clear CCBY",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
"hasCrownCopyright" => Array("type" => "array", "required" => true, "x-title" => "Has Standard Crown Copyright licence", "description" => "Has any page still licenced under the former Commonwealth Copyright Administration", "hasCrownCopyright" => Array("type" => "array", "required" => true, "x-title" => "Has Standard Crown Copyright licence", "description" => "Has any page still licenced under the former Commonwealth Copyright Administration",
"items" => Array("type" => "string")), "items" => Array("type" => "string")),
), ),
); );
?> ?>
   
<?php <?php
   
include ('include/common.inc.php'); include ('include/common.inc.php');
$last_updated = date('Y-m-d', @filemtime('cbrfeed.zip')); $last_updated = date('Y-m-d', @filemtime('cbrfeed.zip'));
header("Content-Type: text/xml"); header("Content-Type: text/xml");
echo "<?xml version='1.0' encoding='UTF-8'?>"; echo "<?xml version='1.0' encoding='UTF-8'?>";
echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' . "\n"; echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' . "\n";
echo " <url><loc>" . local_url() . "index.php</loc><priority>1.0</priority></url>\n"; echo " <url><loc>" . local_url() . "index.php</loc><priority>1.0</priority></url>\n";
foreach (scandir("./") as $file) { foreach (scandir("./") as $file) {
if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php") if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php"&& $file != "viewDocument.php")
echo " <url><loc>" . local_url() . "$file</loc><priority>0.3</priority></url>\n"; echo " <url><loc>" . local_url() . "$file</loc><priority>0.3</priority></url>\n";
} }
   
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
try { try {
$rows = $db->get_view("app", "byCanonicalName")->rows; $rows = $db->get_view("app", "byCanonicalName")->rows;
foreach ($rows as $row) { foreach ($rows as $row) {
echo '<url><loc>' . local_url() . 'getAgency.php?id=' . $row->value->_id . "</loc><priority>0.6</priority></url>\n"; echo '<url><loc>' . local_url() . 'getAgency.php?id=' . $row->value->_id . "</loc><priority>0.6</priority></url>\n";
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo '</urlset>'; echo '</urlset>';
?> ?>