more scraper work
[disclosr.git] / documents / scrape.py
blob:a/documents/scrape.py -> blob:b/documents/scrape.py
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -8,9 +8,10 @@
 import time
 import os
 import mimetypes
-import re
 import urllib
 import urlparse
+import socket
+
 
 def mkhash(input):
     return hashlib.md5(input).hexdigest().encode("utf-8")
@@ -72,82 +73,99 @@
     return urljoin(url,href)
 
 #http://diveintopython.org/http_web_services/etags.html
-class NotModifiedHandler(urllib2.BaseHandler):  
+class NotModifiedHandler(urllib2.BaseHandler):
     def http_error_304(self, req, fp, code, message, headers):
         addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
         addinfourl.code = code
         return addinfourl
 
+def getLastAttachment(docsdb,url):
+    hash = mkhash(url)
+    doc = docsdb.get(hash)
+    if doc != None:
+        last_attachment_fname = doc["_attachments"].keys()[-1]
+        last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+        return last_attachment
+    else:
+        return None
+
 def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
     url = canonurl(url)
     hash = mkhash(url)
     req = urllib2.Request(url)
-    print "Fetching %s" % url
+    print "Fetching %s (%s)" % (url,hash)
     if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-		print "Not a valid HTTP url"
-		return (None,None,None)
-    doc = docsdb.get(hash) 
+        print "Not a valid HTTP url"
+        return (None,None,None)
+    doc = docsdb.get(hash)
     if doc == None:
-	doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
+        doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName, 'type': 'website'}
     else:
-	if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 999999):
-		print "Uh oh, trying to scrape URL again too soon!"
-		last_attachment_fname = doc["_attachments"].keys()[-1]
-		last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
-		return (doc['url'],doc['mime_type'],last_attachment.read())
-	if scrape_again == False:
-		print "Not scraping this URL again as requested"
-		return (None,None,None)
-
-    time.sleep(3) # wait 3 seconds to give webserver time to recover
-    
+        if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
+            print "Uh oh, trying to scrape URL again too soon!"+hash
+            last_attachment_fname = doc["_attachments"].keys()[-1]
+            last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+            content = last_attachment
+            return (doc['url'],doc['mime_type'],content.read())
+        if scrape_again == False:
+            print "Not scraping this URL again as requested"
+            return (doc['url'],doc['mime_type'],content.read())
+
     req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
     #if there is a previous version stored in couchdb, load caching helper tags
     if doc.has_key('etag'):
         req.add_header("If-None-Match", doc['etag'])
     if doc.has_key('last_modified'):
         req.add_header("If-Modified-Since", doc['last_modified'])
-     
+
     opener = urllib2.build_opener(NotModifiedHandler())
     try:
-     url_handle = opener.open(req)
-     doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
-     headers = url_handle.info() # the addinfourls have the .info() too
-     doc['etag'] = headers.getheader("ETag")
-     doc['last_modified'] = headers.getheader("Last-Modified") 
-     doc['date'] = headers.getheader("Date") 
-     doc['page_scraped'] = time.time() 
-     doc['web_server'] = headers.getheader("Server") 
-     doc['via'] = headers.getheader("Via") 
-     doc['powered_by'] = headers.getheader("X-Powered-By") 
-     doc['file_size'] = headers.getheader("Content-Length") 
-     content_type = headers.getheader("Content-Type")
-     if content_type != None:
-    	doc['mime_type'] = content_type.split(";")[0]
-     else:
-	(type,encoding) = mimetypes.guess_type(url)
-	doc['mime_type'] = type
-     if hasattr(url_handle, 'code'): 
-        if url_handle.code == 304:
-            print "the web page has not been modified"
-	    return (None,None,None)
-        else: 
-            content = url_handle.read()
-	    docsdb.save(doc)
-	    doc = docsdb.get(hash) # need to get a _rev
-	    docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) 
-	    return (doc['url'], doc['mime_type'], content)
-	    #store as attachment epoch-filename
-    except urllib2.URLError as e:
-    	error = ""
-	if hasattr(e, 'reason'):
-		error = "error %s in downloading %s" % (str(e.reason), url)
-	elif hasattr(e, 'code'):
-		error = "error %s in downloading %s" % (e.code, url)
-        print error
-	doc['error'] = error
-        docsdb.save(doc)
-        return (None,None,None)
+	#default_timeout = 12
+	#socket.setdefaulttimeout(default_timeout)
+        url_handle = opener.open(req,None,3)
+        doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
+        headers = url_handle.info() # the addinfourls have the .info() too
+        doc['etag'] = headers.getheader("ETag")
+        doc['last_modified'] = headers.getheader("Last-Modified")
+        doc['date'] = headers.getheader("Date")
+        doc['page_scraped'] = time.time()
+        doc['web_server'] = headers.getheader("Server")
+        doc['via'] = headers.getheader("Via")
+        doc['powered_by'] = headers.getheader("X-Powered-By")
+        doc['file_size'] = headers.getheader("Content-Length")
+        content_type = headers.getheader("Content-Type")
+        if content_type != None:
+             doc['mime_type'] = content_type.split(";")[0]
+        else:
+             (type,encoding) = mimetypes.guess_type(url)
+             doc['mime_type'] = type
+        if hasattr(url_handle, 'code'):
+            if url_handle.code == 304:
+                print "the web page has not been modified"+hash
+                last_attachment_fname = doc["_attachments"].keys()[-1]
+                last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+                content = last_attachment
+                return (doc['url'],doc['mime_type'],content.read())
+            else:
+                print "new webpage loaded"
+                content = url_handle.read()
+                docsdb.save(doc)
+                doc = docsdb.get(hash) # need to get a _rev
+                docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
+                return (doc['url'], doc['mime_type'], content)
+                #store as attachment epoch-filename
+
+    except (urllib2.URLError, socket.timeout) as e:
+            print "error!"
+            error = ""
+            if hasattr(e, 'reason'):
+                error = "error %s in downloading %s" % (str(e.reason), url)
+            elif hasattr(e, 'code'):
+                error = "error %s in downloading %s" % (e.code, url)
+            print error
+            doc['error'] = error
+            docsdb.save(doc)
+            return (None,None,None)
 
 
 
@@ -155,56 +173,62 @@
     (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
     badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
     if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
-	if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
-            # http://www.crummy.com/software/BeautifulSoup/documentation.html
-            soup = BeautifulSoup(content)
-	    navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
-	    for nav in navIDs:
-		print "Removing element", nav['id']
-		nav.extract()
-	    navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
-	    for nav in navClasses:
-		print "Removing element", nav['class']
-		nav.extract()
-            links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
-	    linkurls = set([])
-            for link in links:
-            	if link.has_key("href"):
-			if link['href'].startswith("http"):
-				# lets not do external links for now
-				# linkurls.add(link['href'])
-				None
-			if link['href'].startswith("mailto"):
-				# not http
-				None
-			if link['href'].startswith("javascript"):
-				# not http
-				None
-			else:
-				# remove anchors and spaces in urls
-                		linkurls.add(fullurl(url,link['href']))
-            for linkurl in linkurls:
-		#print linkurl
-		scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)    
-
-couch = couchdb.Server('http://127.0.0.1:5984/')
+        if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                soup = BeautifulSoup(content)
+                navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
+                for nav in navIDs:
+                    print "Removing element", nav['id']
+                    nav.extract()
+                    navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
+                    for nav in navClasses:
+                        print "Removing element", nav['class']
+                        nav.extract()
+                    links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
+                    linkurls = set([])
+                    for link in links:
+                        if link.has_key("href"):
+                            if link['href'].startswith("http"):
+                                # lets not do external links for now
+                                # linkurls.add(link['href'])
+                                None
+                            if link['href'].startswith("mailto"):
+                                # not http
+                                None
+                            if link['href'].startswith("javascript"):
+                                # not http
+                                None
+                            else:
+                                # remove anchors and spaces in urls
+                                linkurls.add(fullurl(url,link['href']))
+                    for linkurl in linkurls:
+                               #print linkurl
+                               scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
+
+#couch = couchdb.Server('http://192.168.1.148:5984/')
+couch = couchdb.Server('http://192.168.1.113:5984/')
+#couch = couchdb.Server('http://127.0.0.1:5984/')
 # select database
 agencydb = couch['disclosr-agencies']
 docsdb = couch['disclosr-documents']
 
 if __name__ == "__main__":
-	for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
-		agency = agencydb.get(row.id)
-		print agency['name']
-		for key in agency.keys():
-			if key == 'website':
-	    			scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
-			if key.endswith('URL'):
-				print key 
-				depth = 1
-				if 'scrapeDepth' in agency.keys():
-					depth = agency['scrapeDepth']
-   				scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
-	     	agency['metadata']['lastScraped'] = time.time()
-   	     	agencydb.save(agency)
-
+    for row in agencydb.view('app/all'): #not recently scraped agencies view?
+        agency = agencydb.get(row.id)
+        print agency['name']
+        for key in agency.keys():
+            if key == "FOIDocumentsURL" and "status" not in agency.keys() and False:
+                scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
+            if key == 'website' and True:
+                scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
+		if "metadata" not in agency.keys():
+			agency['metadata'] = {}
+                agency['metadata']['lastScraped'] = time.time()
+            if key.endswith('URL') and False:
+                print key
+                depth = 1
+                if 'scrapeDepth' in agency.keys():
+                    depth = agency['scrapeDepth']
+                scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
+        agencydb.save(agency)
+