fix charts
fix charts


Former-commit-id: d6e49522e61927665c8ba633dad5a13344f34841

--- a/documents/about.php
+++ b/documents/about.php
@@ -1,7 +1,7 @@
 <?php
 
 include('template.inc.php');
-include_header_documents("");
+include_header_documents("About");
 include_once('../include/common.inc.php');
 ?>
 <h1>About</h1>

--- a/documents/charts.php
+++ b/documents/charts.php
@@ -1,6 +1,6 @@
 <?php
 include('template.inc.php');
-include_header_documents("");
+include_header_documents("Charts");
 include_once('../include/common.inc.php');
 $agenciesdb = $server->get_db('disclosr-agencies');
 
@@ -15,29 +15,28 @@
     <h1><a href="about.php">Charts</a></h1>
     <h4 class="subheader">Lorem ipsum.</h4>
 </div>
-<div id="employees" style="width:1000px;height:900px;"></div>
+<div id="bydate" style="width:1000px;height:300px;"></div>
+<div id="byagency" style="width:1200px;height:300px;"></div>
 <script id="source">
     window.onload = function() {
         $(document).ready(function() {
   var
     d1    = [],
-    start = new Date("2009/01/01 01:00").getTime(),
-    options,
-    graph,
-    i, x, o;
+    options1,
+     o1;
 
 <?php
     try {
-        $rows = $foidocsdb->get_view("app", "byDate?group=true", null, true)->rows;
+        $rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true")->rows;
 
 
         $dataValues = Array();
         foreach ($rows as $row) {
-            $dataValues[$row->value] = $row->key;
+            $dataValues[$row->key] = $row->value;
         }
         $i = 0;
         ksort($dataValues);
-        foreach ($dataValues as $value => $key) {
+        foreach ($dataValues as $key => $value) {
 $date = date_create_from_format('Y-m-d', $key);
 if (date_format($date, 'U') != "") {
             echo "       d1.push([".date_format($date, 'U')."000, $value]);" . PHP_EOL;
@@ -52,7 +51,7 @@
 
 
         
-  options = {
+  options1 = {
     xaxis : {
       mode : 'time', 
       labelsAngle : 45
@@ -68,19 +67,19 @@
   function drawGraph (opts) {
 
     // Clone the options, so the 'options' variable always keeps intact.
-    o = Flotr._.extend(Flotr._.clone(options), opts || {});
+    o1 = Flotr._.extend(Flotr._.clone(options1), opts || {});
 
     // Return a new graph.
     return Flotr.draw(
-      document.getElementById("employees"),
+      document.getElementById("bydate"),
       [ d1 ],
-      o
+      o1
     );
   }
 
   graph = drawGraph();      
         
-  Flotr.EventAdapter.observe(container, 'flotr:select', function(area){
+  Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:select', function(area){
     // Draw selected area
     graph = drawGraph({
       xaxis : { min : area.x1, max : area.x2, mode : 'time', labelsAngle : 45 },
@@ -89,10 +88,74 @@
   });
         
   // When graph is clicked, draw the graph with default area.
-  Flotr.EventAdapter.observe(container, 'flotr:click', function () { graph = drawGraph(); });
+  Flotr.EventAdapter.observe(document.getElementById("bydate"), 'flotr:click', function () { graph = drawGraph(); });
 
         });
 }; 
+
+var d2 = [];
+var agencylabels = [];
+function agencytrackformatter(obj) {
+                   
+                        return agencylabels[Math.floor(obj.x)] +" = "+obj.y;
+                     
+                }
+                function agencytickformatter(val, axis) {
+                    if (agencylabels[Math.floor(val)]) {
+                        return '<p style="margin-top:8em;-webkit-transform:rotate(-90deg);">'+(agencylabels[Math.floor(val)])+"</b>";
+                     
+                    } else {
+                        return "";
+                    }
+                }
+<?php
+    try {
+        $rows = $foidocsdb->get_view("app", "byAgencyID?group=true")->rows;
+
+
+        $dataValues = Array();
+        $i = 0;
+        foreach ($rows as $row) {
+            echo "       d2.push([".$i.", $row->value]);" . PHP_EOL;
+            echo "       agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL;
+            
+            $i++;
+        }
+    } catch (SetteeRestClientException $e) {
+        setteErrorHandler($e);
+    }
+    ?>
+  // Draw the graph
+  Flotr.draw(
+   document.getElementById("byagency"),
+    [d2],
+    {
+      bars : {
+        show : true,
+        horizontal : false,
+        shadowSize : 0,
+        barWidth : 0.5
+      },
+mouse : {
+                        track : true,
+                        relative : true,
+                    trackFormatter: agencytrackformatter
+                    },
+      yaxis : {
+        min : 0,
+        autoscaleMargin : 1
+      },
+      xaxis: {
+                    minorTickFreq: 1,
+                    noTicks: agencylabels.length,
+                    showMinorLabels: true,
+                        tickFormatter: agencytickformatter
+                    },
+                    legend: {
+                        show: false
+                    }
+    }
+  );
 </script>
 
 <?php

--- a/documents/disclogsList.php
+++ b/documents/disclogsList.php
@@ -1,7 +1,7 @@
 <?php
 
 include('template.inc.php');
-include_header_documents("");
+include_header_documents("List of Disclosure Logs");
 include_once('../include/common.inc.php');
 
 echo "<table>

--- /dev/null
+++ b/documents/disclosr-documents.nja
@@ -1,1 +1,7 @@
-
+{
+  "venv": "", 
+  "project-type": "Import from sources", 
+  "name": "disclosr-documents", 
+  "license": "GNU General Public License v3", 
+  "description": ""
+}

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -1,155 +1,254 @@
-import sys,os
+import sys
+import os
 sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
 import scrape
 from bs4 import BeautifulSoup
 from time import mktime
 import feedparser
 import abc
-import unicodedata, re
+import unicodedata
+import re
 import dateutil
 from dateutil.parser import *
 from datetime import *
 import codecs
 
+from StringIO import StringIO
+
+from pdfminer.pdfparser import PDFDocument, PDFParser
+from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
+from pdfminer.pdfdevice import PDFDevice, TagExtractor
+from pdfminer.converter import TextConverter
+from pdfminer.cmapdb import CMapDB
+from pdfminer.layout import LAParams
+
+
 class GenericDisclogScraper(object):
-        __metaclass__ = abc.ABCMeta
-	agencyID = None
-	disclogURL = None
-	def remove_control_chars(self, input):
-		return "".join([i for i in input if ord(i) in range(32, 127)])
-        def getAgencyID(self):
-                """ disclosr agency id """
-		if self.agencyID == None:
-			self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
-                return self.agencyID
-
-        def getURL(self):
-                """ disclog URL"""
-		if self.disclogURL == None:
-			agency = scrape.agencydb.get(self.getAgencyID())
-			self.disclogURL = agency['FOIDocumentsURL']
-                return self.disclogURL
-
-	@abc.abstractmethod
-	def doScrape(self):
-		""" do the scraping """
-		return
-
-	@abc.abstractmethod
-        def getDescription(self, content, entry, doc):
-                """ get description"""
-		return
-
+    __metaclass__ = abc.ABCMeta
+    agencyID = None
+    disclogURL = None
+
+    def remove_control_chars(self, input):
+        return "".join([i for i in input if ord(i) in range(32, 127)])
+
+    def getAgencyID(self):
+        """ disclosr agency id """
+        if self.agencyID is None:
+            self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
+        return self.agencyID
+
+    def getURL(self):
+        """ disclog URL"""
+        if self.disclogURL is None:
+            agency = scrape.agencydb.get(self.getAgencyID())
+            self.disclogURL = agency['FOIDocumentsURL']
+        return self.disclogURL
+
+    @abc.abstractmethod
+    def doScrape(self):
+        """ do the scraping """
+        return
+
+
+class GenericPDFDisclogScraper(GenericDisclogScraper):
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+             self.getURL(), "foidocuments", self.getAgencyID())
+        laparams = LAParams()
+        rsrcmgr = PDFResourceManager(caching=True)
+        outfp = StringIO()
+        device = TextConverter(rsrcmgr, outfp, codec='utf-8',
+             laparams=laparams)
+        fp = StringIO()
+        fp.write(content.read())
+
+        process_pdf(rsrcmgr, device, fp, set(), caching=True,
+             check_extractable=True)
+        description = outfp.getvalue()
+        fp.close()
+        device.close()
+        outfp.close()
+        dochash = scrape.mkhash(description)
+        doc = foidocsdb.get(dochash)
+        if doc is None:
+            print "saving " + dochash
+            edate = date.today().strftime("%Y-%m-%d")
+            doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+            , 'url': self.getURL(), 'docID': dochash,
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
+            foidocsdb.save(doc)
+        else:
+            print "already saved"
+
+
+class GenericDOCXDisclogScraper(GenericDisclogScraper):
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
+        , self.getURL(), "foidocuments", self.getAgencyID())
+        mydoc = zipfile.ZipFile(file)
+        xmlcontent = mydoc.read('word/document.xml')
+        document = etree.fromstring(xmlcontent)
+        ## Fetch all the text out of the document we just created
+        paratextlist = getdocumenttext(document)
+        # Make explicit unicode version
+        newparatextlist = []
+        for paratext in paratextlist:
+            newparatextlist.append(paratext.encode("utf-8"))
+        ## Print our documnts test with two newlines under each paragraph
+        description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
+        dochash = scrape.mkhash(description)
+        doc = foidocsdb.get(dochash)
+
+        if doc is None:
+            print "saving " + dochash
+            edate = time().strftime("%Y-%m-%d")
+            doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+            , 'url': self.getURL(), 'docID': dochash,
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
+            foidocsdb.save(doc)
+        else:
+            print "already saved"
 
 
 class GenericRSSDisclogScraper(GenericDisclogScraper):
 
-       	def doScrape(self):
-               	foidocsdb = scrape.couch['disclosr-foidocuments']
-                (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
-		feed = feedparser.parse(content)		
-		for entry in feed.entries:
-			#print entry
-			print entry.id
-			hash = scrape.mkhash(entry.id)
-			#print hash
-		  	doc = foidocsdb.get(hash)
-			#print doc
-			if doc == None:
-                        	print "saving "+ hash
-				edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
-                                doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
-                                "date": edate,"title": entry.title}
-				self.getDescription(entry,entry, doc)
-                                foidocsdb.save(doc)
+        def doScrape(self):
+            foidocsdb = scrape.couch['disclosr-foidocuments']
+            (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+                 self.getURL(), "foidocuments", self.getAgencyID())
+            feed = feedparser.parse(content)
+            for entry in feed.entries:
+                #print entry
+                print entry.id
+                dochash = scrape.mkhash(entry.id)
+                doc = foidocsdb.get(dochash)
+                #print doc
+                if doc is None:
+                    print "saving " + dochash
+                    edate = datetime.fromtimestamp(
+                        mktime(entry.published_parsed)).strftime("%Y-%m-%d")
+                    doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
+                        'url': entry.link, 'docID': entry.id,
+                        "date": edate, "title": entry.title}
+                    self.getDescription(entry, entry, doc)
+                    foidocsdb.save(doc)
+                else:
+                    print "already saved"
+
+            def getDescription(self, content, entry, doc):
+                    """ get description from rss entry"""
+                    doc.update({'description': content.summary})
+            return
+
+
+class GenericOAICDisclogScraper(GenericDisclogScraper):
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def getColumns(self, columns):
+        """ rearranges columns if required """
+        return
+
+    def getColumnCount(self):
+        return 5
+
+    def getDescription(self, content, entry, doc):
+        """ get description from rss entry"""
+        descriptiontxt = ""
+        for string in content.stripped_strings:
+                    descriptiontxt = descriptiontxt + " \n" + string
+        doc.update({'description': descriptiontxt})
+
+    def getTitle(self, content, entry, doc):
+        doc.update({'title': (''.join(content.stripped_strings))})
+
+    def getTable(self, soup):
+        return soup.table
+
+    def getRows(self, table):
+        return table.find_all('tr')
+
+    def getDate(self, content, entry, doc):
+        date = ''.join(content.stripped_strings).strip()
+        (a, b, c) = date.partition("(")
+        date = self.remove_control_chars(a.replace("Octber", "October"))
+        print date
+        edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+        print edate
+        doc.update({'date': edate})
+        return
+
+    def getLinks(self, content, entry, doc):
+        links = []
+        for atag in entry.find_all("a"):
+            if atag.has_key('href'):
+                links.append(scrape.fullurl(content, atag['href']))
+        if links != []:
+                    doc.update({'links': links})
+        return
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+            self.getURL(), "foidocuments", self.getAgencyID())
+        if content is not None:
+            if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
+            # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                print "parsing"
+                soup = BeautifulSoup(content)
+                table = self.getTable(soup)
+                for row in self.getRows(table):
+                    columns = row.find_all('td')
+                    if len(columns) is self.getColumnCount():
+                        (id, date, title,
+                        description, notes) = self.getColumns(columns)
+                        print self.remove_control_chars(
+                            ''.join(id.stripped_strings))
+                        if id.string is None:
+                            dochash = scrape.mkhash(
+                                self.remove_control_chars(
+                                    url + (''.join(date.stripped_strings))))
                         else:
-                        	print "already saved"			
-        def getDescription(self, content, entry, doc):
-                """ get description from rss entry"""
-                doc.update({'description': content.summary})
-		return
-
-class GenericOAICDisclogScraper(GenericDisclogScraper):
-        __metaclass__ = abc.ABCMeta
-	@abc.abstractmethod
-	def getColumns(self,columns):
-		""" rearranges columns if required """
-		return
-        def getColumnCount(self):
-                return 5
-        def getDescription(self, content, entry, doc):
-                """ get description from rss entry"""
-		descriptiontxt = ""
-		for string in content.stripped_strings:
-                	descriptiontxt = descriptiontxt + " \n" + string
-                doc.update({'description': descriptiontxt})
-		return
-        def getTitle(self, content, entry, doc):
-                doc.update({'title': (''.join(content.stripped_strings))})
-		return
-	def getTable(self, soup):
-		return soup.table
-	def getRows(self, table):
-		return table.find_all('tr')
-	def getDate(self, content, entry, doc):
-		date = ''.join(content.stripped_strings).strip()
-		(a,b,c) = date.partition("(")
-		date = self.remove_control_chars(a.replace("Octber","October"))
-		print date
-		edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
-		print edate
-		doc.update({'date': edate})
-		return
-	def getLinks(self, content, entry, doc):
-                links = []
-                for atag in entry.find_all("a"):
-                       	if atag.has_key('href'):
-                               	links.append(scrape.fullurl(content,atag['href']))
-                if links != []:
-	                doc.update({'links': links})
-		return
-
-	def doScrape(self):
-		foidocsdb = scrape.couch['disclosr-foidocuments']
-		(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
-		if content != None:
-			if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
-			# http://www.crummy.com/software/BeautifulSoup/documentation.html
-				soup = BeautifulSoup(content)
-				table = self.getTable(soup)
-				for row in self.getRows(table):
-					columns = row.find_all('td')
-					if len(columns) == self.getColumnCount():
-						(id, date, title, description, notes) = self.getColumns(columns)
-						print self.remove_control_chars(''.join(id.stripped_strings))
-						if id.string == None:
-							hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
-						else:
-							hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
-						doc = foidocsdb.get(hash)
-							
-						if doc == None:
-							print "saving " +hash
-							doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))}
-							self.getLinks(self.getURL(),row,doc)
-                                			self.getTitle(title,row, doc)
-                                			self.getDate(date,row, doc)
-							self.getDescription(description,row, doc)
-							if notes != None:
-                                        			doc.update({ 'notes': (''.join(notes.stripped_strings))})
-                                                        badtitles = ['-','Summary of FOI Request','FOI request(in summary form)','Summary of FOI request received by the ASC',
-'Summary of FOI request received by agency/minister','Description of Documents Requested','FOI request','Description of FOI Request','Summary of request','Description','Summary',
+                            dochash = scrape.mkhash(
+                                self.remove_control_chars(
+                                    url + (''.join(id.stripped_strings))))
+                        doc = foidocsdb.get(dochash)
+
+                        if doc is None:
+                            print "saving " + dochash
+                            doc = {'_id': dochash,
+                            'agencyID': self.getAgencyID(),
+                            'url': self.getURL(),
+                            'docID': (''.join(id.stripped_strings))}
+                            self.getLinks(self.getURL(), row, doc)
+                            self.getTitle(title, row, doc)
+                            self.getDate(date, row, doc)
+                            self.getDescription(description, row, doc)
+                            if notes is not None:
+                                doc.update({ 'notes': (
+                                    ''.join(notes.stripped_strings))})
+                            badtitles = ['-','Summary of FOI Request'
+                            , 'FOI request(in summary form)'
+                            , 'Summary of FOI request received by the ASC',
+'Summary of FOI request received by agency/minister',
+'Description of Documents Requested','FOI request',
+'Description of FOI Request','Summary of request','Description','Summary',
 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of    FOI Request',"FOI request",'Results 1 to 67 of 67']
-							if doc['title'] not in badtitles and doc['description'] != '':
+                            if doc['title'] not in badtitles\
+                            and doc['description'] != '':
                                                             print "saving"
                                                             foidocsdb.save(doc)
-						else:
-							print "already saved "+hash
-					
-					elif len(row.find_all('th')) == self.getColumnCount():
-						print "header row"
-					
-					else:
-						print "ERROR number of columns incorrect"
-						print row
-
+                        else:
+                            print "already saved " + dochash
+
+                    elif len(row.find_all('th')) is self.getColumnCount():
+                        print "header row"
+
+                    else:
+                        print "ERROR number of columns incorrect"
+                        print row
+

 Binary files /dev/null and b/documents/img/feed-icon-14x14.png differ
--- a/documents/index.php
+++ b/documents/index.php
@@ -3,10 +3,11 @@
 include('template.inc.php');
 include_header_documents("");
 include_once('../include/common.inc.php');
-$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99');
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
 ?>
+<div class="headline">Read all the information released by Australian Federal Government agencies under the FOI Act in one place!</div>
+<a style='float:right' href="rss.xml.php"><img src="img/feed-icon-14x14.png" alt="RSS Icon"/> All Agencies RSS Feed</a><br>
 <?php
-
 $agenciesdb = $server->get_db('disclosr-agencies');
 
 $idtoname = Array();
@@ -15,17 +16,18 @@
 }
 $foidocsdb = $server->get_db('disclosr-foidocuments');
 try {
-    $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows;
+    $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
     if ($rows) {
         foreach ($rows as $key => $row) {
             echo displayLogEntry($row, $idtoname);
+		if (!isset($startkey)) $startkey =  $row->key;
             $endkey = $row->key;
         }
     }
 } catch (SetteeRestClientException $e) {
     setteErrorHandler($e);
 }
-echo "<a href='?start_key=$endkey'>next page</a>";
+echo "<a class='btn btn-large btn-primary' href='?end_key=$endkey' style='float:right;'>next page <i class='icon-circle-arrow-right icon-white'></i></a>";
 include_footer_documents();
 ?>
 

--- a/documents/rss.xml.php
+++ b/documents/rss.xml.php
@@ -9,11 +9,12 @@
 $TestFeed = new RSS2FeedWriter();
 //Setting the channel elements
 //Use wrapper functions for common channelelements
-$TestFeed->setTitle('Last Modified - All');
+$TestFeed->setTitle('disclosurelo.gs Newest Entries - All');
 $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php');
-$TestFeed->setDescription('Latest entries');
-  $TestFeed->setChannelElement('language', 'en-us');
-  $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
+$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies');
+$TestFeed->setChannelElement('language', 'en-us');
+$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
+
 //Retriving informations from database
 $idtoname = Array();
 $agenciesdb = $server->get_db('disclosr-agencies');
@@ -21,17 +22,18 @@
     $idtoname[$row->id] = trim($row->value->name);
 }
 $foidocsdb = $server->get_db('disclosr-foidocuments');
-$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00', 50), true)->rows;
+$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
 //print_r($rows);
 foreach ($rows as $row) {
     //Create an empty FeedItem
     $newItem = $TestFeed->createNewItem();
     //Add elements to the feed item
     $newItem->setTitle($row->value->title);
-    $newItem->setLink("view.php?id=".$row->value->_id);
-    $newItem->setDate(date("c", strtotime($row->value->date)));
-    $newItem->setDescription(displayLogEntry($row,$idtoname));
-    $newItem->addElement('guid', $row->value->_id,array('isPermaLink'=>'true'));
+    $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id);
+    $newItem->setDate(strtotime($row->value->date));
+    $newItem->setDescription(displayLogEntry($row, $idtoname));
+    $newItem->setAuthor($idtoname[$row->value->agencyID]);
+    $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true'));
     //Now add the feed item
     $TestFeed->addItem($newItem);
 }

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -8,186 +8,188 @@
 import time
 import os
 import mimetypes
-import re
 import urllib
 import urlparse
 
 def mkhash(input):
-	return hashlib.md5(input).hexdigest().encode("utf-8")
+    return hashlib.md5(input).hexdigest().encode("utf-8")
 
 def canonurl(url):
-	r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
-	if the URL looks invalid.
-	>>> canonurl('\xe2\x9e\xa1.ws')  # tinyarro.ws
-	'http://xn--hgi.ws/'
-	"""
-	# strip spaces at the ends and ensure it's prefixed with 'scheme://'
-	url = url.strip()
-	if not url:
-		return ''
-	if not urlparse.urlsplit(url).scheme:
-		url = 'http://' + url
-
-	# turn it into Unicode
-	#try:
-	#    url = unicode(url, 'utf-8')
-	#except UnicodeDecodeError:
-	#    return ''  # bad UTF-8 chars in URL
-
-	# parse the URL into its components
-	parsed = urlparse.urlsplit(url)
-	scheme, netloc, path, query, fragment = parsed
-
-	# ensure scheme is a letter followed by letters, digits, and '+-.' chars
-	if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
-		return ''
-	scheme = str(scheme)
-
-	# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
-	match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
-	if not match:
-		return ''
-	domain, port = match.groups()
-	netloc = domain + (port if port else '')
-	netloc = netloc.encode('idna')
-
-	# ensure path is valid and convert Unicode chars to %-encoded
-	if not path:
-		path = '/'  # eg: 'http://google.com' -> 'http://google.com/'
-	path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
-
-	# ensure query is valid
-	query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
-
-	# ensure fragment is valid
-	fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
-
-	# piece it all back together, truncating it to a maximum of 4KB
-	url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
-	return url[:4096]
+    r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
+    if the URL looks invalid.
+    >>> canonurl('\xe2\x9e\xa1.ws')  # tinyarro.ws
+    'http://xn--hgi.ws/'
+    """
+    # strip spaces at the ends and ensure it's prefixed with 'scheme://'
+    url = url.strip()
+    if not url:
+        return ''
+    if not urlparse.urlsplit(url).scheme:
+        url = 'http://' + url
+
+    # turn it into Unicode
+    #try:
+    #    url = unicode(url, 'utf-8')
+    #except UnicodeDecodeError:
+    #    return ''  # bad UTF-8 chars in URL
+
+    # parse the URL into its components
+    parsed = urlparse.urlsplit(url)
+    scheme, netloc, path, query, fragment = parsed
+
+    # ensure scheme is a letter followed by letters, digits, and '+-.' chars
+    if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
+        return ''
+    scheme = str(scheme)
+
+    # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
+    match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
+    if not match:
+        return ''
+    domain, port = match.groups()
+    netloc = domain + (port if port else '')
+    netloc = netloc.encode('idna')
+
+    # ensure path is valid and convert Unicode chars to %-encoded
+    if not path:
+        path = '/'  # eg: 'http://google.com' -> 'http://google.com/'
+    path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
+
+    # ensure query is valid
+    query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
+
+    # ensure fragment is valid
+    fragment = urllib.quote(urllib.unquote(fragment.encode