Don't hit the same URL twice in the same day
Don't hit the same URL twice in the same day


Former-commit-id: fc7d691cec408a85b38dc74e1fece1e3f10f388e

file:a/charts.php -> file:b/charts.php
--- a/charts.php
+++ b/charts.php
@@ -41,6 +41,8 @@
 ?>
     
         $.plot($("#placeholder"), [ d1], {
+             grid: { hoverable: true },
+             
             series: {
                 bars: { show: true, barWidth: 0.6 }
             },
@@ -58,11 +60,41 @@
             }
         });
     
+var previousPoint = null;
+$("#placeholder").bind("plothover", function (event, pos, item) {
+    if (item) {
+        if (previousPoint != item.datapoint) {
+            previousPoint = item.datapoint;
+
+            $("#tooltip").remove();
+            var x = item.datapoint[0],
+                y = item.datapoint[1] - item.datapoint[2];
+
+            showTooltip(item.pageX, item.pageY, y );
+        }
+    }
+    else {
+        $("#tooltip").remove();
+        previousPoint = null;            
+    }
+});
 
     
     
 });
 };
+ function showTooltip(x, y, contents) {
+        $('<div id="tooltip">' + contents + '</div>').css( {
+            position: 'absolute',
+            display: 'none',
+            top: y + 5,
+            left: x + 5,
+            border: '1px solid #fdd',
+            padding: '2px',
+            'background-color': '#fee',
+            opacity: 0.80
+        }).appendTo("body").fadeIn(200);
+    }
 </script>
 
 <?php

--- a/getAgency.php
+++ b/getAgency.php
@@ -124,7 +124,7 @@
         }
     }
 
-    $mode = "view";
+    $mode = "edit";
     if ($mode == "edit") {
         $row = addDefaultFields(object_to_array($row));
     } else {

--- a/include/couchdb.inc.php
+++ b/include/couchdb.inc.php
@@ -26,14 +26,16 @@
         emit(doc._id, doc.parentOrg);
         }
 };";
-    $obj->views->byName->map = "function(doc) {  
+    $obj->views->byName->map = 'function(doc) { 
+        if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") {
         emit(doc.name, doc._id); 
  for (name in doc.otherNames) {
-if (doc.otherNames[name] != '' && doc.otherNames[name] != doc.name) {
+if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) {
        	 emit(doc.otherNames[name], doc._id); 
 }
         }
-};";
+        }
+};';
 
     $obj->views->foiEmails->map = "function(doc) {  
         emit(doc._id, doc.foiEmail);

--- a/schemas/agency.json.php
+++ b/schemas/agency.json.php
@@ -24,8 +24,10 @@
         "consultanciesURL" => Array("type" => "string", "required" => true, "x-title" => "Consultants Hired", "description" => ""),
         "legalExpenditureURL" => Array("type" => "string", "required" => true, "x-title" => "Legal Services Expenditure", "description" => "Legal Services Expenditure mandated by Legal Services Directions 2005"),
         "recordsListURL" => Array("type" => "string", "required" => true, "x-title" => "Files/Records Held", "description" => "Indexed lists of departmental and agency files, <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a>"),
-        "FOIDocumentsURL" => Array("type" => "string", "required" => true, "x-title" => "FOI Documents Released", "description" => ""),
-        "infoPublicationSchemeURL" => Array("type" => "string", "required" => true, "x-title" => "Information Publication Scheme", "description" => ""),
+        "FOIDocumentsURL" => Array("type" => "string", "required" => true, "x-title" => "FOI Documents Released", "description" => "FOI Disclosure Log URL"),
+   "FOIDocumentsRSSURL" => Array("type" => "string", "required" => false, "x-title" => "RSS Feed of FOI Documents Released", "description" => "FOI Disclosure Log in RSS format"),
+        "hasFOIPDF" => Array("type" => "string", "required" => false, "x-title" => "Has FOI Documents Released in PDF", "description" => "FOI Disclosure Log contains any PDFs"),
+         "infoPublicationSchemeURL" => Array("type" => "string", "required" => true, "x-title" => "Information Publication Scheme", "description" => ""),
         "appointmentsURL" => Array("type" => "string", "required" => true, "x-title" => "Agency Appointments/Boards", "description" => "Departmental and agency appointments and vacancies , <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a>"),
         "advertisingURL" => Array("type" => "string", "required" => true, "x-title" => "Approved Advertising Campaigns", "description" => " Agency advertising and public information projects, <a href='http://www.aph.gov.au/senate/pubs/standing_orders/d05.htm'>mandated by the Senate</a> "),
         "hasRSS" => Array("type" => "string", "required" => true, "x-title" => "Has RSS", "description" => ""),

file:a/scrape.py -> file:b/scrape.py
--- a/scrape.py
+++ b/scrape.py
@@ -3,6 +3,10 @@
 import urllib2
 from BeautifulSoup import BeautifulSoup
 import re
+import hashlib
+from urlparse import urljoin
+import time
+import os
 
 #http://diveintopython.org/http_web_services/etags.html
 class NotModifiedHandler(urllib2.BaseHandler):  
@@ -11,66 +15,102 @@
         addinfourl.code = code
         return addinfourl
 
-def scrapeAndStore(URL, depth, agency):
-    URL = "http://www.google.com"
-    req = urllib2.Request(URL)
-    etag = 'y'
-    last_modified = 'y'
-    #if there is a previous version sotred in couchdb, load caching helper tags
-    if etag:
-        req.add_header("If-None-Match", etag)
-    if last_modified:
-        req.add_header("If-Modified-Since", last_modified)
+def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
+    hash = hashlib.md5(url).hexdigest()
+    req = urllib2.Request(url)
+    print "Fetching %s" % url
+    doc = docsdb.get(hash) 
+    if doc == None:
+	doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
+    else:
+	if (time.time() - doc['page_scraped']) < 3600:
+		print "Uh oh, trying to scrape URL again too soon!"
+		last_attachment_fname = doc["_attachments"].keys()[-1]
+		last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+		return (doc['mime_type'],last_attachment)
+	if scrape_again == False:
+		print "Not scraping this URL again as requested"
+		return (None,None)
+
+    time.sleep(3) # wait 3 seconds to give webserver time to recover
+    
+    #if there is a previous version stored in couchdb, load caching helper tags
+    if doc.has_key('etag'):
+        req.add_header("If-None-Match", doc['etag'])
+    if doc.has_key('last_modified'):
+        req.add_header("If-Modified-Since", doc['last_modified'])
      
     opener = urllib2.build_opener(NotModifiedHandler())
     url_handle = opener.open(req)
     headers = url_handle.info() # the addinfourls have the .info() too
-    etag = headers.getheader("ETag")
-    last_modified = headers.getheader("Last-Modified") 
-    web_server = headers.getheader("Server") 
-    file_size = headers.getheader("Content-Length") 
-    mime_type = headers.getheader("Content-Type") 
-     
+    doc['etag'] = headers.getheader("ETag")
+    doc['last_modified'] = headers.getheader("Last-Modified") 
+    doc['date'] = headers.getheader("Date") 
+    doc['page_scraped'] = time.time() 
+    doc['web_server'] = headers.getheader("Server") 
+    doc['powered_by'] = headers.getheader("X-Powered-By") 
+    doc['file_size'] = headers.getheader("Content-Length") 
+    doc['mime_type'] = headers.getheader("Content-Type").split(";")[0]
     if hasattr(url_handle, 'code'): 
         if url_handle.code == 304:
             print "the web page has not been modified"
+	    return (None,None)
         else: 
-            #do scraping
-            html = url_handle.read()
-            # http://www.crummy.com/software/BeautifulSoup/documentation.html
-            soup = BeautifulSoup(html)
-        links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
-        for link in links:
-            print link['href']
-            #for each unique link
-            #if html mimetype
-            # go down X levels,
-            # diff with last stored attachment, store in document
-            #if not
-            #   remember to save parentURL and title (link text that lead to document)
-    
+            content = url_handle.read()
+	    docsdb.save(doc)
+	    doc = docsdb.get(hash) # need to get a _rev
+	    docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) 
+	    return (doc['mime_type'], content)
     #store as attachment epoch-filename
     else:
-        print "error %s in downloading %s", url_handle.code, URL
-        #record/alert error to error database
-    
-    
+        print "error %s in downloading %s" % url_handle.code, URL
+	doc['error'] = "error %s in downloading %s" % url_handle.code, URL    
+        docsdb.save(doc)
+        return (None,None)
 
 
 
+def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
+    (mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
+    if content != None and depth > 0:
+	if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+            # http://www.crummy.com/software/BeautifulSoup/documentation.html
+            soup = BeautifulSoup(content)
+	    navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar'))
+	    for nav in navIDs:
+		print "Removing element", nav['id']
+		nav.extract()
+	    navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')})
+	    for nav in navClasses:
+		print "Removing element", nav['class']
+		nav.extract()
+            links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
+	    linkurls = set([])
+            for link in links:
+            	if link.has_key("href"):
+			if link['href'].startswith("http"):
+				# lets not do external links for now
+				# linkurls.add(link['href'])
+				None
+			else:
+                		linkurls.add(urljoin(url,link['href'].replace(" ","%20")))
+            for linkurl in linkurls:
+		#print linkurl
+		scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)    
 
-
-
-
-
-
-couch = couchdb.Server('http://192.168.1.148:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
 
 # select database
 agencydb = couch['disclosr-agencies']
+docsdb = couch['disclosr-documents']
 
 for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
     agency = agencydb.get(row.id)
     print agency['name']
-scrapeAndStore("A",1,1)
+    for key in agency.keys():
+	if key == 'website' or key.endswith('URL'):
+		print key
+    		scrapeAndStore(docsdb, agency[key],agency['scrapeDepth'],key,agency['_id'])
+    agency['metadata']['lastscraped'] = time.time()
+    agencydb.save(agency)