prod fixes
prod fixes


Former-commit-id: 130b8c05fff32afd5b4e3f8a9faadac5381bd456

--- a/couchdb/settee/src/classes/SetteeDatabase.class.php
+++ b/couchdb/settee/src/classes/SetteeDatabase.class.php
@@ -251,7 +251,7 @@
    * 
    * @return void
    */
-  function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false) {
+  function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce=null) {
     $id = "_design/" . urlencode($design_doc);
     $view_name = urlencode($view_name);
     $id .= "/_view/$view_name";
@@ -269,6 +269,13 @@
       if ($descending) {
         $data .= "&descending=true";
       }
+      if ($reduce != null) {
+      if ($reduce == true) {
+        $data .= "&reduce=true";
+      } else {
+          $data .= "&reduce=false";
+      }
+      }
       if ($limit) {
           $data .= "&limit=".$limit;
       }
@@ -281,9 +288,11 @@
     }
 
     $full_uri = $this->dbname . "/" . $this->safe_urlencode($id);
+
 $full_uri = str_replace("%253Fgroup%253D","?group=",$full_uri);
 $full_uri = str_replace("%253Flimit%253D","?limit=",$full_uri);
     $ret = $this->rest_client->http_get($full_uri, $data);
+    //$ret['decoded'] = str_replace("?k","&k",$ret['decoded']);
     return $ret['decoded'];
     
   }

--- a/documents/about.php
+++ b/documents/about.php
@@ -1,7 +1,7 @@
 <?php
 
 include('template.inc.php');
-include_header_documents("");
+include_header_documents("About");
 include_once('../include/common.inc.php');
 ?>
 <h1>About</h1>

--- /dev/null
+++ b/documents/agency.php
@@ -1,1 +1,41 @@
+<?php
+include('template.inc.php');
+include_once('../include/common.inc.php');
+$agenciesdb = $server->get_db('disclosr-agencies');
 
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+    $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+
+include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency'));
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
+?>
+<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
+<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
+<?php
+try {
+    if ($_REQUEST['id']) {
+        $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
+        foreach ($rows as $row) {
+            //print_r($rows);
+            echo displayLogEntry($row, $idtoname);
+            if (!isset($startkey))
+                $startkey = $row->key;
+            $endkey = $row->key;
+        }
+    } else {
+        $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
+        if ($rows) {
+            foreach ($rows as $row) {
+                echo '<a href="agency.php?id=' . $row->key . '">' . $idtoname[$row->key] . " (" . $row->value . " records)</a> <br>\n";
+            }
+        }
+    }
+} catch (SetteeRestClientException $e) {
+    setteErrorHandler($e);
+}
+echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
+include_footer_documents();
+?>

--- a/documents/charts.php
+++ b/documents/charts.php
@@ -1,6 +1,6 @@
 <?php
 include('template.inc.php');
-include_header_documents("");
+include_header_documents("Charts");
 include_once('../include/common.inc.php');
 $agenciesdb = $server->get_db('disclosr-agencies');
 
@@ -15,29 +15,28 @@
     <h1><a href="about.php">Charts</a></h1>
     <h4 class="subheader">Lorem ipsum.</h4>
 </div>
-<div id="employees" style="width:1000px;height:900px;"></div>
+<div id="bydate" style="width:1000px;height:300px;"></div>
+<div id="byagency" style="width:1200px;height:300px;"></div>
 <script id="source">
     window.onload = function() {
         $(document).ready(function() {
   var
     d1    = [],
-    start = new Date("2009/01/01 01:00").getTime(),
-    options,
-    graph,
-    i, x, o;
+    options1,
+     o1;
 
 <?php
     try {
-        $rows = $foidocsdb->get_view("app", "byDate?group=true", null, true)->rows;
+        $rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true",null, false,false,true)->rows;
 
 
         $dataValues = Array();
         foreach ($rows as $row) {
-            $dataValues[$row->value] = $row->key;
+            $dataValues[$row->key] = $row->value;
         }
         $i = 0;
         ksort($dataValues);
-        foreach ($dataValues as $value => $key) {
+        foreach ($dataValues as $key => $value) {
 $date = date_create_from_format('Y-m-d', $key);
 if (date_format($date, 'U') != "") {
             echo "       d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL;
@@ -52,7 +51,7 @@
 
 
         
-  options = {
+  options1 = {
     xaxis : {
       mode : 'time', 
       labelsAngle : 45
@@ -68,19 +67,19 @@
   function drawGraph (opts) {
 
     // Clone the options, so the 'options' variable always keeps intact.
-    o = Flotr._.extend(Flotr._.clone(options), opts || {});
+    o1 = Flotr._.extend(Flotr._.clone(options1), opts || {});
 
     // Return a new graph.
     return Flotr.draw(
-      document.getElementById("employees"),
+      document.getElementById("bydate"),
       [ d1 ],
-      o
+      o1
     );
   }
 
   graph = drawGraph();      
         
-  Flotr.EventAdapter.observe(container, 'flotr:select', function(area){
+  Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:select', function(area){
     // Draw selected area
     graph = drawGraph({
       xaxis : { min : area.x1, max : area.x2, mode : 'time', labelsAngle : 45 },
@@ -89,10 +88,74 @@
   });
         
   // When graph is clicked, draw the graph with default area.
-  Flotr.EventAdapter.observe(container, 'flotr:click', function () { graph = drawGraph(); });
+  Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:click', function () { graph = drawGraph(); });
 
         });
 }; 
+
+var d2 = [];
+var agencylabels = [];
+function agencytrackformatter(obj) {
+                   
+                        return agencylabels[Math.floor(obj.x)] +" = "+obj.y;
+                     
+                }
+                function agencytickformatter(val, axis) {
+                    if (agencylabels[Math.floor(val)]) {
+                        return '<p style="margin-top:8em;-webkit-transform:rotate(-90deg);">'+(agencylabels[Math.floor(val)])+"</b>";
+                     
+                    } else {
+                        return "";
+                    }
+                }
+<?php
+    try {
+        $rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows;
+
+
+        $dataValues = Array();
+        $i = 0;
+        foreach ($rows as $row) {
+            echo "       d2.push([".$i.", $row->value]);" . PHP_EOL;
+            echo "       agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL;
+            
+            $i++;
+        }
+    } catch (SetteeRestClientException $e) {
+        setteErrorHandler($e);
+    }
+    ?>
+  // Draw the graph
+  Flotr.draw(
+   document.getElementById("byagency"),
+    [d2],
+    {
+      bars : {
+        show : true,
+        horizontal : false,
+        shadowSize : 0,
+        barWidth : 0.5
+      },
+mouse : {
+                        track : true,
+                        relative : true,
+                    trackFormatter: agencytrackformatter
+                    },
+      yaxis : {
+        min : 0,
+        autoscaleMargin : 1
+      },
+      xaxis: {
+                    minorTickFreq: 1,
+                    noTicks: agencylabels.length,
+                    showMinorLabels: true,
+                        tickFormatter: agencytickformatter
+                    },
+                    legend: {
+                        show: false
+                    }
+    }
+  );
 </script>
 
 <?php

--- /dev/null
+++ b/documents/date.php
@@ -1,1 +1,34 @@
+<?php
 
+include('template.inc.php');
+include_header_documents("Entries by Date");
+include_once('../include/common.inc.php');
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
+?>
+<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
+<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
+<?php
+/*$agenciesdb = $server->get_db('disclosr-agencies');
+
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+    $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+try {
+    $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
+    if ($rows) {
+        foreach ($rows as $key => $row) {
+            echo displayLogEntry($row, $idtoname);
+		if (!isset($startkey)) $startkey =  $row->key;
+            $endkey = $row->key;
+        }
+    }
+} catch (SetteeRestClientException $e) {
+    setteErrorHandler($e);
+}
+echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
+*/
+include_footer_documents();
+?>
+

--- a/documents/disclogsList.php
+++ b/documents/disclogsList.php
@@ -1,7 +1,7 @@
 <?php
 
 include('template.inc.php');
-include_header_documents("");
+include_header_documents("List of Disclosure Logs");
 include_once('../include/common.inc.php');
 
 echo "<table>

--- /dev/null
+++ b/documents/disclosr-documents.nja
@@ -1,1 +1,7 @@
-
+{
+  "venv": "", 
+  "project-type": "Import from sources", 
+  "name": "disclosr-documents", 
+  "license": "GNU General Public License v3", 
+  "description": ""
+}

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -13,11 +13,9 @@
 from datetime import *
 import codecs
 
+import difflib
+
 from StringIO import StringIO
-
-from docx import *
-from lxml import etree
-import zipfile
 
 from pdfminer.pdfparser import PDFDocument, PDFParser
 from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
@@ -39,20 +37,45 @@
         """ disclosr agency id """
         if self.agencyID is None:
             self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
-            return self.agencyID
+        return self.agencyID
 
     def getURL(self):
         """ disclog URL"""
         if self.disclogURL is None:
             agency = scrape.agencydb.get(self.getAgencyID())
             self.disclogURL = agency['FOIDocumentsURL']
-            return self.disclogURL
+        return self.disclogURL
 
     @abc.abstractmethod
     def doScrape(self):
         """ do the scraping """
         return
 
+class GenericHTMLDisclogScraper(GenericDisclogScraper):
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
+             self.getURL(), "foidocuments", self.getAgencyID())
+        content = rcontent
+        dochash = scrape.mkhash(content)
+        doc = foidocsdb.get(dochash)
+        if doc is None:
+            print "saving " + dochash
+            description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
+            last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
+            if last_attach != None:
+                html_diff = difflib.HtmlDiff()
+                description = description + "\nChanges: "
+                description = description + html_diff.make_table(last_attach.read().split('\n'),
+                           content.split('\n'))
+            edate = date.today().strftime("%Y-%m-%d")
+            doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+            , 'url': self.getURL(), 'docID': dochash,
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
+            foidocsdb.save(doc)
+        else:
+            print "already saved"
 
 class GenericPDFDisclogScraper(GenericDisclogScraper):
 
@@ -62,14 +85,15 @@
              self.getURL(), "foidocuments", self.getAgencyID())
         laparams = LAParams()
         rsrcmgr = PDFResourceManager(caching=True)
-        outfp = StringIO.StringIO()
+        outfp = StringIO()
         device = TextConverter(rsrcmgr, outfp, codec='utf-8',
              laparams=laparams)
-        fp = StringIO.StringIO()
+        fp = StringIO()
         fp.write(content)
-        description = output.getvalue()
+
         process_pdf(rsrcmgr, device, fp, set(), caching=True,
              check_extractable=True)
+        description = outfp.getvalue()
         fp.close()
         device.close()
         outfp.close()
@@ -77,11 +101,10 @@
         doc = foidocsdb.get(dochash)
         if doc is None:
             print "saving " + dochash
-            edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
+            edate = date.today().strftime("%Y-%m-%d")
             doc = {'_id': dochash, 'agencyID': self.getAgencyID()
             , 'url': self.getURL(), 'docID': dochash,
-            "date": edate, "title": "Disclosure Log Updated"}
-            self.getDescription(entry, entry, doc)
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
             foidocsdb.save(doc)
         else:
             print "already saved"
@@ -103,17 +126,16 @@
         for paratext in paratextlist:
             newparatextlist.append(paratext.encode("utf-8"))
         ## Print our documnts test with two newlines under each paragraph
-        description = '\n\n'.join(newparatextlist)
+        description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
         dochash = scrape.mkhash(description)
         doc = foidocsdb.get(dochash)
 
         if doc is None:
             print "saving " + dochash
-            edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
+            edate = time().strftime("%Y-%m-%d")
             doc = {'_id': dochash, 'agencyID': self.getAgencyID()
             , 'url': self.getURL(), 'docID': dochash,
-            "date": edate, "title": "Disclosure Log Updated"}
-            self.getDescription(entry, entry, doc)
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
             foidocsdb.save(doc)
         else:
             print "already saved"
@@ -201,10 +223,9 @@
         (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
             self.getURL(), "foidocuments", self.getAgencyID())
         if content is not None:
-            if mime_type is "text/html"\
-            or mime_type is "application/xhtml+xml"\
-            or mime_type is"application/xml":
+            if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
             # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                print "parsing"
                 soup = BeautifulSoup(content)
                 table = self.getTable(soup)
                 for row in self.getRows(table):
@@ -222,11 +243,11 @@
                             dochash = scrape.mkhash(
                                 self.remove_control_chars(
                                     url + (''.join(id.stripped_strings))))
-                        doc = foidocsdb.get(hash)
+                        doc = foidocsdb.get(dochash)
 
                         if doc is None:
-                            print "saving " + hash
-                            doc = {'_id': hash,
+                            print "saving " + dochash
+                            doc = {'_id': dochash,
                             'agencyID': self.getAgencyID(),
                             'url': self.getURL(),
                             'docID': (''.join(id.stripped_strings))}

 Binary files /dev/null and b/documents/img/feed-icon-14x14.png differ
--- a/documents/index.php
+++ b/documents/index.php
@@ -3,10 +3,11 @@
 include('template.inc.php');
 include_header_documents("");
 include_once('../include/common.inc.php');
-$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99');
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
 ?>
+<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
+<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
 <?php
-
 $agenciesdb = $server->get_db('disclosr-agencies');
 
 $idtoname = Array();
@@ -15,17 +16,18 @@
 }
 $foidocsdb = $server->get_db('disclosr-foidocuments');
 try {
-    $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows;
+    $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
     if ($rows) {
         foreach ($rows as $key => $row) {
             echo displayLogEntry($row, $idtoname);
+		if (!isset($startkey)) $startkey =  $row->key;
             $endkey = $row->key;
         }
     }
 } catch (SetteeRestClientException $e) {
     setteErrorHandler($e);
 }
-echo "<a href='?start_key=$endkey'>next page</a>";
+echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
 include_footer_documents();
 ?>
 

--- a/documents/rss.xml.php
+++ b/documents/rss.xml.php
@@ -8,21 +8,28 @@
 //Creating an instance of FeedWriter class.
 $TestFeed = new RSS2FeedWriter();
 //Setting the channel elements
-//Use wrapper functions for common channelelements
-$TestFeed->setTitle('disclosurelo.gs Newest Entries - All');
-$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php');
-$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies');
-$TestFeed->setChannelElement('language', 'en-us');
-$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
-
-//Retriving informations from database
+////Retriving informations from database
 $idtoname = Array();
 $agenciesdb = $server->get_db('disclosr-agencies');
 foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
     $idtoname[$row->id] = trim($row->value->name);
 }
 $foidocsdb = $server->get_db('disclosr-foidocuments');
-$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
+if (isset($_REQUEST['id'])) {
+    $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
+    $title = $idtoname[$_REQUEST['id']];
+} else {
+    $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
+    $title = 'All Agencies';
+}
+//Use wrapper functions for common channelelements
+$TestFeed->setTitle('disclosurelo.gs Newest Entries - '.$title);
+$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'.(isset($_REQUEST['id'])? '?id='.$_REQUEST['id'] : ''));
+$TestFeed->setDescription('disclosurelo.gs Newest Entries - '.$title);
+$TestFeed->setChannelElement('language', 'en-us');
+$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
+
+
 //print_r($rows);
 foreach ($rows as $row) {
     //Create an empty FeedItem

--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,3 +1,10 @@
-for f in scrapers/*.py; do echo "Processing $f file.."; python $f; done
+for f in scrapers/*.py; 
+	do echo "Processing $f file.."; 
+	python $f; 
+	if [ "$?" -ne "0" ]; then
+		echo "error";
+		sleep 2; 
+	fi
+done
 
 

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -8,186 +8,198 @@
 import time
 import os
 import mimetypes
-import re
 import urllib
 import urlparse
 
 def mkhash(input):
-	return hashlib.md5(input).hexdigest().encode("utf-8")
+    return hashlib.md5(input).hexdigest().encode("utf-8")
 
 def canonurl(url):
-	r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
-	if the URL looks invalid.
-	>>> canonurl('\xe2\x9e\xa1.ws')  # tinyarro.ws
-	'http://xn--hgi.ws/'
-	"""
-	# strip spaces at the ends and ensure it's prefixed with 'scheme://'
-	url = url.strip()
-	if not url:
-		return ''
-	if not urlparse.urlsplit(url).scheme:
-		url = 'http://' + url
-
-	# turn it into Unicode
-	#try:
-	#    url = unicode(url, 'utf-8')
-	#except UnicodeDecodeError:
-	#    return ''  # bad UTF-8 chars in URL
-
-	# parse the URL into its components
-	parsed = urlparse.urlsplit(url)
-	scheme, netloc, path, query, fragment = parsed
-
-	# ensure scheme is a letter followed by letters, digits, and '+-.' chars
-	if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
-		return ''
-	scheme = str(scheme)
-
-	# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
-	match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
-	if not match:
-		return ''
-	domain, port = match.groups()
-	netloc = domain + (port if port else '')
-	netloc = netloc.encode('idna')
-
-	# ensure path is valid and convert Unicode chars to %-encoded
-	if not path:
-		path = '/'  # eg: 'http://google.com' -> 'http://google.com/'
-	path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
-
-	# ensure query is valid
-	query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
-
-	# ensure fragment is valid
-	fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
-
-	# piece it all back together, truncating it to a maximum of 4KB
-	url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
-	return url[:4096]
+    r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
+    if the URL looks invalid.
+    >>> canonurl('\xe2\x9e\xa1.ws')  # tinyarro.ws
+    'http://xn--hgi.ws/'
+    """
+    # strip spaces at the ends and ensure it's prefixed with 'scheme://'
+    url = url.strip()
+    if not url:
+        return ''
+    if not urlparse.urlsplit(url).scheme:
+        url = 'http://' + url
+
+    # turn it into Unicode
+    #try:
+    #    url = unicode(url, 'utf-8')
+    #except UnicodeDecodeError:
+    #    return ''  # bad UTF-8 chars in URL
+
+    # parse the URL into its components
+    parsed = urlparse.urlsplit(url)
+    scheme, netloc, path, query, fragment = parsed
+
+    # ensure scheme is a letter followed by letters, digits, and '+-.' chars
+    if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
+        return ''
+    scheme = str(scheme)
+
+    # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
+    match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
+    if not match:
+        return ''
+    domain, port = match.groups()
+    netloc = domain + (port if port else '')
+    netloc = netloc.encode('idna')
+
+    # ensure path is valid and convert Unicode chars to %-encoded
+    if not path:
+        path = '/'  # eg: 'http://google.com' -> 'http://google.com/'
+    path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
+
+    # ensure query is valid
+    query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
+
+    # ensure fragment is valid
+    fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
+
+    # piece it all back together, truncating it to a maximum of 4KB
+    url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
+    return url[:4096]
 
 def fullurl(url,href):
-	href = href.replace(" ","%20")
-	href = re.sub('#.*$','',href)
-	return urljoin(url,href)
+    href = href.replace(" ","%20")
+    href = re.sub('#.*$','',href)
+    return urljoin(url,href)
 
 #http://diveintopython.org/http_web_services/etags.html
-class NotModifiedHandler(urllib2.BaseHandler):  
-	def http_error_304(self, req, fp, code, message, headers):
-		addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
-		addinfourl.code = code
-		return addinfourl
+class NotModifiedHandler(urllib2.BaseHandler):
+    def http_error_304(self, req, fp, code, message, headers):
+        addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
+        addinfourl.code = code
+        return addinfourl
+
+def getLastAttachment(docsdb,url):
+    hash = mkhash(url)
+    doc = docsdb.get(hash)
+    if doc != None:
+        last_attachment_fname = doc["_attachments"].keys()[-1]
+        last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+        return last_attachment
+    else:
+        return None
 
 def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
-	url = canonurl(url)
-	hash = mkhash(url)
-	req = urllib2.Request(url)
-	print "Fetching %s (%s)" % (url,hash)
-	if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-		print "Not a valid HTTP url"
-		return (None,None,None)
-	doc = docsdb.get(hash) 
-	if doc == None:
-		doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
-	else:
-		if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
-			print "Uh oh, trying to scrape URL again too soon!"
-			last_attachment_fname = doc["_attachments"].keys()[-1]
-			last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
-			content = last_attachment
-			return (doc['url'],doc['mime_type'],content)
-		if scrape_again == False:
-			print "Not scraping this URL again as requested"
-			return (None,None,None)
-
-	time.sleep(3) # wait 3 seconds to give webserver time to recover
-	
-	req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
-	#if there is a previous version stored in couchdb, load caching helper tags
-	if doc.has_key('etag'):
-		req.add_header("If-None-Match", doc['etag'])
-	if doc.has_key('last_modified'):
-		req.add_header("If-Modified-Since", doc['last_modified'])
-	 
-	opener = urllib2.build_opener(NotModifiedHandler())
-	try:
-		url_handle = opener.open(req)
-		doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
-		headers = url_handle.info() # the addinfourls have the .info() too
-		doc['etag'] = headers.getheader("ETag")
-		doc['last_modified'] = headers.getheader("Last-Modified") 
-		doc['date'] = headers.getheader("Date") 
-		doc['page_scraped'] = time.time() 
-		doc['web_server'] = headers.getheader("Server") 
-		doc['via'] = headers.getheader("Via") 
-		doc['powered_by'] = headers.getheader("X-Powered-By") 
-		doc['file_size'] = headers.getheader("Content-Length") 
-		content_type = headers.getheader("Content-Type")
-		if content_type != None:
-			 doc['mime_type'] = content_type.split(";")[0]
-		else:
-			 (type,encoding) = mimetypes.guess_type(url)
-			 doc['mime_type'] = type
-		if hasattr(url_handle, 'code'):
-			if url_handle.code == 304:
-				print "the web page has not been modified"
-				return (None,None,None)
-			else: 
-				content = url_handle.read()
-				docsdb.save(doc)
-				doc = docsdb.get(hash) # need to get a _rev
-				docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) 
-				return (doc['url'], doc['mime_type'], content)
-				#store as attachment epoch-filename
-				
-	except urllib2.URLError as e:
-			error = ""
-			if hasattr(e, 'reason'):
-				error = "error %s in downloading %s" % (str(e.reason), url)
-			elif hasattr(e, 'code'):
-				error = "error %s in downloading %s" % (e.code, url)