pdf scrapers
pdf scrapers


Former-commit-id: 6a33167b9cf20ed9af0d41f252320532648535db

--- /dev/null
+++ b/documents/disclosr-documents.nja
@@ -1,1 +1,7 @@
-
+{
+  "venv": "", 
+  "project-type": "Import from sources", 
+  "name": "disclosr-documents", 
+  "license": "GNU General Public License v3", 
+  "description": ""
+}

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -1,155 +1,255 @@
-import sys,os
+import sys
+import os
 sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
 import scrape
 from bs4 import BeautifulSoup
 from time import mktime
 import feedparser
 import abc
-import unicodedata, re
+import unicodedata
+import re
 import dateutil
 from dateutil.parser import *
 from datetime import *
 import codecs
 
+from StringIO import StringIO
+
+from pdfminer.pdfparser import PDFDocument, PDFParser
+from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
+from pdfminer.pdfdevice import PDFDevice, TagExtractor
+from pdfminer.converter import TextConverter
+from pdfminer.cmapdb import CMapDB
+from pdfminer.layout import LAParams
+
+
 class GenericDisclogScraper(object):
-        __metaclass__ = abc.ABCMeta
-	agencyID = None
-	disclogURL = None
-	def remove_control_chars(self, input):
-		return "".join([i for i in input if ord(i) in range(32, 127)])
-        def getAgencyID(self):
-                """ disclosr agency id """
-		if self.agencyID == None:
-			self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
-                return self.agencyID
-
-        def getURL(self):
-                """ disclog URL"""
-		if self.disclogURL == None:
-			agency = scrape.agencydb.get(self.getAgencyID())
-			self.disclogURL = agency['FOIDocumentsURL']
-                return self.disclogURL
-
-	@abc.abstractmethod
-	def doScrape(self):
-		""" do the scraping """
-		return
-
-	@abc.abstractmethod
-        def getDescription(self, content, entry, doc):
-                """ get description"""
-		return
-
+    __metaclass__ = abc.ABCMeta
+    agencyID = None
+    disclogURL = None
+
+    def remove_control_chars(self, input):
+        return "".join([i for i in input if ord(i) in range(32, 127)])
+
+    def getAgencyID(self):
+        """ disclosr agency id """
+        if self.agencyID is None:
+            self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
+        return self.agencyID
+
+    def getURL(self):
+        """ disclog URL"""
+        if self.disclogURL is None:
+            agency = scrape.agencydb.get(self.getAgencyID())
+            self.disclogURL = agency['FOIDocumentsURL']
+        return self.disclogURL
+
+    @abc.abstractmethod
+    def doScrape(self):
+        """ do the scraping """
+        return
+
+
+class GenericPDFDisclogScraper(GenericDisclogScraper):
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+             self.getURL(), "foidocuments", self.getAgencyID())
+        laparams = LAParams()
+        rsrcmgr = PDFResourceManager(caching=True)
+        outfp = StringIO()
+        device = TextConverter(rsrcmgr, outfp, codec='utf-8',
+             laparams=laparams)
+        fp = StringIO()
+        fp.write(content.read())
+
+        process_pdf(rsrcmgr, device, fp, set(), caching=True,
+             check_extractable=True)
+        description = outfp.getvalue()
+        fp.close()
+        device.close()
+        outfp.close()
+        dochash = scrape.mkhash(description)
+        doc = foidocsdb.get(dochash)
+        if doc is None:
+            print "saving " + dochash
+            edate = date.today().strftime("%Y-%m-%d")
+            doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+            , 'url': self.getURL(), 'docID': dochash,
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
+            foidocsdb.save(doc)
+        else:
+            print "already saved"
+
+
+class GenericDOCXDisclogScraper(GenericDisclogScraper):
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, content) = scrape.fetchURL(scrape.docsdb
+        , self.getURL(), "foidocuments", self.getAgencyID())
+        mydoc = zipfile.ZipFile(file)
+        xmlcontent = mydoc.read('word/document.xml')
+        document = etree.fromstring(xmlcontent)
+        ## Fetch all the text out of the document we just created
+        paratextlist = getdocumenttext(document)
+        # Make explicit unicode version
+        newparatextlist = []
+        for paratext in paratextlist:
+            newparatextlist.append(paratext.encode("utf-8"))
+        ## Print our documnts test with two newlines under each paragraph
+        description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
+        dochash = scrape.mkhash(description)
+        doc = foidocsdb.get(dochash)
+
+        if doc is None:
+            print "saving " + dochash
+            edate = time().strftime("%Y-%m-%d")
+            doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+            , 'url': self.getURL(), 'docID': dochash,
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
+            foidocsdb.save(doc)
+        else:
+            print "already saved"
 
 
 class GenericRSSDisclogScraper(GenericDisclogScraper):
 
-       	def doScrape(self):
-               	foidocsdb = scrape.couch['disclosr-foidocuments']
-                (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
-		feed = feedparser.parse(content)		
-		for entry in feed.entries:
-			#print entry
-			print entry.id
-			hash = scrape.mkhash(entry.id)
-			#print hash
-		  	doc = foidocsdb.get(hash)
-			#print doc
-			if doc == None:
-                        	print "saving "+ hash
-				edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
-                                doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
-                                "date": edate,"title": entry.title}
-				self.getDescription(entry,entry, doc)
-                                foidocsdb.save(doc)
+        def doScrape(self):
+            foidocsdb = scrape.couch['disclosr-foidocuments']
+            (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+                 self.getURL(), "foidocuments", self.getAgencyID())
+            feed = feedparser.parse(content)
+            for entry in feed.entries:
+                #print entry
+                print entry.id
+                dochash = scrape.mkhash(entry.id)
+                doc = foidocsdb.get(dochash)
+                #print doc
+                if doc is None:
+                    print "saving " + dochash
+                    edate = datetime.fromtimestamp(
+                        mktime(entry.published_parsed)).strftime("%Y-%m-%d")
+                    doc = {'_id': dochash, 'agencyID': self.getAgencyID(),
+                        'url': entry.link, 'docID': entry.id,
+                        "date": edate, "title": entry.title}
+                    self.getDescription(entry, entry, doc)
+                    foidocsdb.save(doc)
+                else:
+                    print "already saved"
+
+            def getDescription(self, content, entry, doc):
+                    """ get description from rss entry"""
+                    doc.update({'description': content.summary})
+            return
+
+
+class GenericOAICDisclogScraper(GenericDisclogScraper):
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def getColumns(self, columns):
+        """ rearranges columns if required """
+        return
+
+    def getColumnCount(self):
+        return 5
+
+    def getDescription(self, content, entry, doc):
+        """ get description from rss entry"""
+        descriptiontxt = ""
+        for string in content.stripped_strings:
+                    descriptiontxt = descriptiontxt + " \n" + string
+        doc.update({'description': descriptiontxt})
+
+    def getTitle(self, content, entry, doc):
+        doc.update({'title': (''.join(content.stripped_strings))})
+
+    def getTable(self, soup):
+        return soup.table
+
+    def getRows(self, table):
+        return table.find_all('tr')
+
+    def getDate(self, content, entry, doc):
+        date = ''.join(content.stripped_strings).strip()
+        (a, b, c) = date.partition("(")
+        date = self.remove_control_chars(a.replace("Octber", "October"))
+        print date
+        edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+        print edate
+        doc.update({'date': edate})
+        return
+
+    def getLinks(self, content, entry, doc):
+        links = []
+        for atag in entry.find_all("a"):
+            if atag.has_key('href'):
+                links.append(scrape.fullurl(content, atag['href']))
+        if links != []:
+                    doc.update({'links': links})
+        return
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+            self.getURL(), "foidocuments", self.getAgencyID())
+        if content is not None:
+            if mime_type is "text/html"\
+            or mime_type is "application/xhtml+xml"\
+            or mime_type is"application/xml":
+            # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                soup = BeautifulSoup(content)
+                table = self.getTable(soup)
+                for row in self.getRows(table):
+                    columns = row.find_all('td')
+                    if len(columns) is self.getColumnCount():
+                        (id, date, title,
+                        description, notes) = self.getColumns(columns)
+                        print self.remove_control_chars(
+                            ''.join(id.stripped_strings))
+                        if id.string is None:
+                            dochash = scrape.mkhash(
+                                self.remove_control_chars(
+                                    url + (''.join(date.stripped_strings))))
                         else:
-                        	print "already saved"			
-        def getDescription(self, content, entry, doc):
-                """ get description from rss entry"""
-                doc.update({'description': content.summary})
-		return
-
-class GenericOAICDisclogScraper(GenericDisclogScraper):
-        __metaclass__ = abc.ABCMeta
-	@abc.abstractmethod
-	def getColumns(self,columns):
-		""" rearranges columns if required """
-		return
-        def getColumnCount(self):
-                return 5
-        def getDescription(self, content, entry, doc):
-                """ get description from rss entry"""
-		descriptiontxt = ""
-		for string in content.stripped_strings:
-                	descriptiontxt = descriptiontxt + " \n" + string
-                doc.update({'description': descriptiontxt})
-		return
-        def getTitle(self, content, entry, doc):
-                doc.update({'title': (''.join(content.stripped_strings))})
-		return
-	def getTable(self, soup):
-		return soup.table
-	def getRows(self, table):
-		return table.find_all('tr')
-	def getDate(self, content, entry, doc):
-		date = ''.join(content.stripped_strings).strip()
-		(a,b,c) = date.partition("(")
-		date = self.remove_control_chars(a.replace("Octber","October"))
-		print date
-		edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
-		print edate
-		doc.update({'date': edate})
-		return
-	def getLinks(self, content, entry, doc):
-                links = []
-                for atag in entry.find_all("a"):
-                       	if atag.has_key('href'):
-                               	links.append(scrape.fullurl(content,atag['href']))
-                if links != []:
-	                doc.update({'links': links})
-		return
-
-	def doScrape(self):
-		foidocsdb = scrape.couch['disclosr-foidocuments']
-		(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
-		if content != None:
-			if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
-			# http://www.crummy.com/software/BeautifulSoup/documentation.html
-				soup = BeautifulSoup(content)
-				table = self.getTable(soup)
-				for row in self.getRows(table):
-					columns = row.find_all('td')
-					if len(columns) == self.getColumnCount():
-						(id, date, title, description, notes) = self.getColumns(columns)
-						print self.remove_control_chars(''.join(id.stripped_strings))
-						if id.string == None:
-							hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
-						else:
-							hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
-						doc = foidocsdb.get(hash)
-							
-						if doc == None:
-							print "saving " +hash
-							doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))}
-							self.getLinks(self.getURL(),row,doc)
-                                			self.getTitle(title,row, doc)
-                                			self.getDate(date,row, doc)
-							self.getDescription(description,row, doc)
-							if notes != None:
-                                        			doc.update({ 'notes': (''.join(notes.stripped_strings))})
-                                                        badtitles = ['-','Summary of FOI Request','FOI request(in summary form)','Summary of FOI request received by the ASC',
-'Summary of FOI request received by agency/minister','Description of Documents Requested','FOI request','Description of FOI Request','Summary of request','Description','Summary',
+                            dochash = scrape.mkhash(
+                                self.remove_control_chars(
+                                    url + (''.join(id.stripped_strings))))
+                        doc = foidocsdb.get(hash)
+
+                        if doc is None:
+                            print "saving " + hash
+                            doc = {'_id': hash,
+                            'agencyID': self.getAgencyID(),
+                            'url': self.getURL(),
+                            'docID': (''.join(id.stripped_strings))}
+                            self.getLinks(self.getURL(), row, doc)
+                            self.getTitle(title, row, doc)
+                            self.getDate(date, row, doc)
+                            self.getDescription(description, row, doc)
+                            if notes is not None:
+                                doc.update({ 'notes': (
+                                    ''.join(notes.stripped_strings))})
+                            badtitles = ['-','Summary of FOI Request'
+                            , 'FOI request(in summary form)'
+                            , 'Summary of FOI request received by the ASC',
+'Summary of FOI request received by agency/minister',
+'Description of Documents Requested','FOI request',
+'Description of FOI Request','Summary of request','Description','Summary',
 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of    FOI Request',"FOI request",'Results 1 to 67 of 67']
-							if doc['title'] not in badtitles and doc['description'] != '':
+                            if doc['title'] not in badtitles\
+                            and doc['description'] != '':
                                                             print "saving"
                                                             foidocsdb.save(doc)
-						else:
-							print "already saved "+hash
-					
-					elif len(row.find_all('th')) == self.getColumnCount():
-						print "header row"
-					
-					else:
-						print "ERROR number of columns incorrect"
-						print row
-
+                        else:
+                            print "already saved " + dochash
+
+                    elif len(row.find_all('th')) is self.getColumnCount():
+                        print "header row"
+
+                    else:
+                        print "ERROR number of columns incorrect"
+                        print row
+

--- a/documents/rss.xml.php
+++ b/documents/rss.xml.php
@@ -9,11 +9,12 @@
 $TestFeed = new RSS2FeedWriter();
 //Setting the channel elements
 //Use wrapper functions for common channelelements
-$TestFeed->setTitle('Last Modified - All');
+$TestFeed->setTitle('disclosurelo.gs Newest Entries - All');
 $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php');
-$TestFeed->setDescription('Latest entries');
-  $TestFeed->setChannelElement('language', 'en-us');
-  $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
+$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies');
+$TestFeed->setChannelElement('language', 'en-us');
+$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
+
 //Retriving informations from database
 $idtoname = Array();
 $agenciesdb = $server->get_db('disclosr-agencies');
@@ -21,17 +22,18 @@
     $idtoname[$row->id] = trim($row->value->name);
 }
 $foidocsdb = $server->get_db('disclosr-foidocuments');
-$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00', 50), true)->rows;
+$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
 //print_r($rows);
 foreach ($rows as $row) {
     //Create an empty FeedItem
     $newItem = $TestFeed->createNewItem();
     //Add elements to the feed item
     $newItem->setTitle($row->value->title);
-    $newItem->setLink("view.php?id=".$row->value->_id);
-    $newItem->setDate(date("c", strtotime($row->value->date)));
-    $newItem->setDescription(displayLogEntry($row,$idtoname));
-    $newItem->addElement('guid', $row->value->_id,array('isPermaLink'=>'true'));
+    $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id);
+    $newItem->setDate(strtotime($row->value->date));
+    $newItem->setDescription(displayLogEntry($row, $idtoname));
+    $newItem->setAuthor($idtoname[$row->value->agencyID]);
+    $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true'));
     //Now add the feed item
     $TestFeed->addItem($newItem);
 }

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -8,186 +8,188 @@
 import time
 import os
 import mimetypes
-import re
 import urllib
 import urlparse
 
 def mkhash(input):
-	return hashlib.md5(input).hexdigest().encode("utf-8")
+    return hashlib.md5(input).hexdigest().encode("utf-8")
 
 def canonurl(url):
-	r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
-	if the URL looks invalid.
-	>>> canonurl('\xe2\x9e\xa1.ws')  # tinyarro.ws
-	'http://xn--hgi.ws/'
-	"""
-	# strip spaces at the ends and ensure it's prefixed with 'scheme://'
-	url = url.strip()
-	if not url:
-		return ''
-	if not urlparse.urlsplit(url).scheme:
-		url = 'http://' + url
-
-	# turn it into Unicode
-	#try:
-	#    url = unicode(url, 'utf-8')
-	#except UnicodeDecodeError:
-	#    return ''  # bad UTF-8 chars in URL
-
-	# parse the URL into its components
-	parsed = urlparse.urlsplit(url)
-	scheme, netloc, path, query, fragment = parsed
-
-	# ensure scheme is a letter followed by letters, digits, and '+-.' chars
-	if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
-		return ''
-	scheme = str(scheme)
-
-	# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
-	match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
-	if not match:
-		return ''
-	domain, port = match.groups()
-	netloc = domain + (port if port else '')
-	netloc = netloc.encode('idna')
-
-	# ensure path is valid and convert Unicode chars to %-encoded
-	if not path:
-		path = '/'  # eg: 'http://google.com' -> 'http://google.com/'
-	path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
-
-	# ensure query is valid
-	query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
-
-	# ensure fragment is valid
-	fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
-
-	# piece it all back together, truncating it to a maximum of 4KB
-	url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
-	return url[:4096]
+    r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
+    if the URL looks invalid.
+    >>> canonurl('\xe2\x9e\xa1.ws')  # tinyarro.ws
+    'http://xn--hgi.ws/'
+    """
+    # strip spaces at the ends and ensure it's prefixed with 'scheme://'
+    url = url.strip()
+    if not url:
+        return ''
+    if not urlparse.urlsplit(url).scheme:
+        url = 'http://' + url
+
+    # turn it into Unicode
+    #try:
+    #    url = unicode(url, 'utf-8')
+    #except UnicodeDecodeError:
+    #    return ''  # bad UTF-8 chars in URL
+
+    # parse the URL into its components
+    parsed = urlparse.urlsplit(url)
+    scheme, netloc, path, query, fragment = parsed
+
+    # ensure scheme is a letter followed by letters, digits, and '+-.' chars
+    if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
+        return ''
+    scheme = str(scheme)
+
+    # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
+    match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
+    if not match:
+        return ''
+    domain, port = match.groups()
+    netloc = domain + (port if port else '')
+    netloc = netloc.encode('idna')
+
+    # ensure path is valid and convert Unicode chars to %-encoded
+    if not path:
+        path = '/'  # eg: 'http://google.com' -> 'http://google.com/'
+    path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
+
+    # ensure query is valid
+    query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
+
+    # ensure fragment is valid
+    fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
+
+    # piece it all back together, truncating it to a maximum of 4KB
+    url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
+    return url[:4096]
 
 def fullurl(url,href):
-	href = href.replace(" ","%20")
-	href = re.sub('#.*$','',href)
-	return urljoin(url,href)
+    href = href.replace(" ","%20")
+    href = re.sub('#.*$','',href)
+    return urljoin(url,href)
 
 #http://diveintopython.org/http_web_services/etags.html
-class NotModifiedHandler(urllib2.BaseHandler):  
-	def http_error_304(self, req, fp, code, message, headers):
-		addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
-		addinfourl.code = code
-		return addinfourl
+class NotModifiedHandler(urllib2.BaseHandler):
+    def http_error_304(self, req, fp, code, message, headers):
+        addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
+        addinfourl.code = code
+        return addinfourl
 
 def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
-	url = canonurl(url)
-	hash = mkhash(url)
-	req = urllib2.Request(url)
-	print "Fetching %s (%s)" % (url,hash)
-	if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-		print "Not a valid HTTP url"
-		return (None,None,None)
-	doc = docsdb.get(hash) 
-	if doc == None:
-		doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
-	else:
-		if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
-			print "Uh oh, trying to scrape URL again too soon!"
-			last_attachment_fname = doc["_attachments"].keys()[-1]
-			last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
-			content = last_attachment
-			return (doc['url'],doc['mime_type'],content)
-		if scrape_again == False:
-			print "Not scraping this URL again as requested"
-			return (None,None,None)
-
-	time.sleep(3) # wait 3 seconds to give webserver time to recover
-	
-	req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
-	#if there is a previous version stored in couchdb, load caching helper tags
-	if doc.has_key('etag'):
-		req.add_header("If-None-Match", doc['etag'])
-	if doc.has_key('last_modified'):
-		req.add_header("If-Modified-Since", doc['last_modified'])
-	 
-	opener = urllib2.build_opener(NotModifiedHandler())
-	try:
-		url_handle = opener.open(req)
-		doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
-		headers = url_handle.info() # the addinfourls have the .info() too
-		doc['etag'] = headers.getheader("ETag")
-		doc['last_modified'] = headers.getheader("Last-Modified") 
-		doc['date'] = headers.getheader("Date") 
-		doc['page_scraped'] = time.time() 
-		doc['web_server'] = headers.getheader("Server") 
-		doc['via'] = headers.getheader("Via") 
-		doc['powered_by'] = headers.getheader("X-Powered-By") 
-		doc['file_size'] = headers.getheader("Content-Length") 
-		content_type = headers.getheader("Content-Type")
-		if content_type != None:
-			 doc['mime_type'] = content_type.split(";")[0]
-		else:
-			 (type,encoding) = mimetypes.guess_type(url)
-			 doc['mime_type'] = type
-		if hasattr(url_handle, 'code'):
-			if url_handle.code == 304:
-				print "the web page has not been modified"
-				return (None,None,None)
-			else: 
-				content = url_handle.read()
-				docsdb.save(doc)
-				doc = docsdb.get(hash) # need to get a _rev
-				docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) 
-				return (doc['url'], doc['mime_type'], content)
-				#store as attachment epoch-filename
-				
-	except urllib2.URLError as e:
-			error = ""
-			if hasattr(e, 'reason'):
-				error = "error %s in downloading %s" % (str(e.reason), url)
-			elif hasattr(e, 'code'):
-				error = "error %s in downloading %s" % (e.code, url)
-			print error
-			doc['error'] = error
-			docsdb.save(doc)
-			return (None,None,None)
+    url = canonurl(url)
+    hash = mkhash(url)
+    req = urllib2.Request(url)
+    print "Fetching %s (%s)" % (url,hash)
+    if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
+        print "Not a valid HTTP url"
+        return (None,None,None)
+    doc = docsdb.get(hash)
+    if doc == None:
+        doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
+    else:
+        if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
+            print "Uh oh, trying to scrape URL again too soon!"+hash
+            last_attachment_fname = doc["_attachments"].keys()[-1]
+            last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+            content = last_attachment
+            return (doc['url'],doc['mime_type'],content)
+        if scrape_again == False:
+            print "Not scraping this URL again as requested"
+            return (None,None,None)
+
+    req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
+    #if there is a previous version stored in couchdb, load caching helper tags
+    if doc.has_key('etag'):
+        req.add_header("If-None-Match", doc['etag'])
+    if doc.has_key('last_modified'):
+        req.add_header("If-Modified-Since", doc['last_modified'])
+
+    opener = urllib2.build_opener(NotModifiedHandler())
+    try:
+        url_handle = opener.open(req)
+        doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
+        headers = url_handle.info() # the addinfourls have the .info() too
+        doc['etag'] = headers.getheader("ETag")
+        doc['last_modified'] = headers.getheader("Last-Modified")
+        doc['date'] = headers.getheader("Date")
+        doc['page_scraped'] = time.time()
+        doc['web_server'] = headers.getheader("Server")
+        doc['via'] = headers.getheader("Via")
+        doc['powered_by'] = headers.getheader("X-Powered-By")
+        doc['file_size'] = headers.getheader("Content-Length")
+        content_type = headers.getheader("Content-Type")
+        if content_type != None:
+             doc['mime_type'] = content_type.split(";")[0]
+        else:
+             (type,encoding) = mimetypes.guess_type(url)
+             doc['mime_type'] = type
+        if hasattr(url_handle, 'code'):
+            if url_handle.code == 304:
+                print "the web page has not been modified"+hash
+                last_attachment_fname = doc["_attachments"].keys()[-1]
+                last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+                content = last_attachment
+                return (doc['url'],doc['mime_type'],content)
+            else:
+                print "new webpage loaded"
+                content = url_handle.read()
+                docsdb.save(doc)
+                doc = docsdb.get(hash) # need to get a _rev
+                docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
+                return (doc['url'], doc['mime_type'], content)
+                #store as attachment epoch-filename
+
+    except urllib2.URLError as e:
+            print "error!"
+            error = ""
+            if hasattr(e, 'reason'):
+                error = "error %s in downloading %s" % (str(e.reason), url)
+            elif hasattr(e, 'code'):
+                error = "error %s in downloading %s" % (e.code, url)
+            print error
+            doc['error'] = error
+            docsdb.save(doc)
+            return (None,None,None)
 
 
 
 def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
-	(url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
-	badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
-	if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
-		if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
-				# http://www.crummy.com/software/BeautifulSoup/documentation.html
-				soup = BeautifulSoup(content)
-				navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
-				for nav in navIDs:
-					print "Removing element", nav['id']
-					nav.extract()
-					navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
-					for nav in navClasses:
-						print "Removing element", nav['class']
-						nav.extract()
-					links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
-					linkurls = set([])
-					for link in links:
-						if link.has_key("href"):
-							if link['href'].startswith("http"):
-								# lets not do externa