Don't hit the same URL twice in the same day
Don't hit the same URL twice in the same day


Former-commit-id: fc7d691cec408a85b38dc74e1fece1e3f10f388e

file:a/scrape.py -> file:b/scrape.py
--- a/scrape.py
+++ b/scrape.py
@@ -4,6 +4,9 @@
 from BeautifulSoup import BeautifulSoup
 import re
 import hashlib
+from urlparse import urljoin
+import time
+import os
 
 #http://diveintopython.org/http_web_services/etags.html
 class NotModifiedHandler(urllib2.BaseHandler):  
@@ -12,11 +15,25 @@
         addinfourl.code = code
         return addinfourl
 
-def scrapeAndStore(docsdb, url, depth, agencyID):
+def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
     hash = hashlib.md5(url).hexdigest()
     req = urllib2.Request(url)
-    print "Fetching %s", url
-    doc = docsdb['hash']
+    print "Fetching %s" % url
+    doc = docsdb.get(hash) 
+    if doc == None:
+	doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
+    else:
+	if (time.time() - doc['page_scraped']) < 3600:
+		print "Uh oh, trying to scrape URL again too soon!"
+		last_attachment_fname = doc["_attachments"].keys()[-1]
+		last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+		return (doc['mime_type'],last_attachment)
+	if scrape_again == False:
+		print "Not scraping this URL again as requested"
+		return (None,None)
+
+    time.sleep(3) # wait 3 seconds to give webserver time to recover
+    
     #if there is a previous version stored in couchdb, load caching helper tags
     if doc.has_key('etag'):
         req.add_header("If-None-Match", doc['etag'])
@@ -28,45 +45,60 @@
     headers = url_handle.info() # the addinfourls have the .info() too
     doc['etag'] = headers.getheader("ETag")
     doc['last_modified'] = headers.getheader("Last-Modified") 
+    doc['date'] = headers.getheader("Date") 
+    doc['page_scraped'] = time.time() 
     doc['web_server'] = headers.getheader("Server") 
+    doc['powered_by'] = headers.getheader("X-Powered-By") 
     doc['file_size'] = headers.getheader("Content-Length") 
-    doc['mime_type'] = headers.getheader("Content-Type") 
-     
+    doc['mime_type'] = headers.getheader("Content-Type").split(";")[0]
     if hasattr(url_handle, 'code'): 
         if url_handle.code == 304:
             print "the web page has not been modified"
+	    return (None,None)
         else: 
-            #do scraping
-            html = url_handle.read()
-            # http://www.crummy.com/software/BeautifulSoup/documentation.html
-            soup = BeautifulSoup(html)
-        links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
-        for link in links:
-            if link.has_key("href"):
-                print link['href']
-                #for each unique link
-                #if html mimetype
-                # go down X levels,
-                # diff with last stored attachment, store in document
-                #if not
-                #   remember to save parentURL and title (link text that lead to document)
-    
+            content = url_handle.read()
+	    docsdb.save(doc)
+	    doc = docsdb.get(hash) # need to get a _rev
+	    docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) 
+	    return (doc['mime_type'], content)
     #store as attachment epoch-filename
     else:
-        print "error %s in downloading %s", url_handle.code, URL
-        #record/alert error to error database
-    
-    
+        print "error %s in downloading %s" % url_handle.code, URL
+	doc['error'] = "error %s in downloading %s" % url_handle.code, URL    
+        docsdb.save(doc)
+        return (None,None)
 
 
 
+def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
+    (mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
+    if content != None and depth > 0:
+	if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+            # http://www.crummy.com/software/BeautifulSoup/documentation.html
+            soup = BeautifulSoup(content)
+	    navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar'))
+	    for nav in navIDs:
+		print "Removing element", nav['id']
+		nav.extract()
+	    navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')})
+	    for nav in navClasses:
+		print "Removing element", nav['class']
+		nav.extract()
+            links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
+	    linkurls = set([])
+            for link in links:
+            	if link.has_key("href"):
+			if link['href'].startswith("http"):
+				# lets not do external links for now
+				# linkurls.add(link['href'])
+				None
+			else:
+                		linkurls.add(urljoin(url,link['href'].replace(" ","%20")))
+            for linkurl in linkurls:
+		#print linkurl
+		scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)    
 
-
-
-
-
-
-couch = couchdb.Server('http://192.168.1.148:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
 
 # select database
 agencydb = couch['disclosr-agencies']
@@ -75,5 +107,10 @@
 for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
     agency = agencydb.get(row.id)
     print agency['name']
-    scrapeAndStore(docsdb, agency['website'],1,agency['_id'])
+    for key in agency.keys():
+	if key == 'website' or key.endswith('URL'):
+		print key
+    		scrapeAndStore(docsdb, agency[key],agency['scrapeDepth'],key,agency['_id'])
+    agency['metadata']['lastscraped'] = time.time()
+    agencydb.save(agency)