prod fixes
[disclosr.git] / documents / scrape.py
blob:a/documents/scrape.py -> blob:b/documents/scrape.py
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -7,12 +7,20 @@
 from urlparse import urljoin
 import time
 import os
+import sys
 import mimetypes
 import urllib
 import urlparse
+import socket
+
+#couch = couchdb.Server('http://192.168.1.148:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
+
 
 def mkhash(input):
     return hashlib.md5(input).hexdigest().encode("utf-8")
+
 
 def canonurl(url):
     r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
@@ -65,10 +73,11 @@
     url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
     return url[:4096]
 
-def fullurl(url,href):
-    href = href.replace(" ","%20")
-    href = re.sub('#.*$','',href)
-    return urljoin(url,href)
+
+def fullurl(url, href):
+    href = href.replace(" ", "%20")
+    href = re.sub('#.*$', '', href)
+    return urljoin(url, href)
 
 #http://diveintopython.org/http_web_services/etags.html
 class NotModifiedHandler(urllib2.BaseHandler):
@@ -77,37 +86,41 @@
         addinfourl.code = code
         return addinfourl
 
-def getLastAttachment(docsdb,url):
+
+def getLastAttachment(docsdb, url):
     hash = mkhash(url)
     doc = docsdb.get(hash)
-    if doc != None:
+    if doc != None and "_attachments" in doc.keys():
         last_attachment_fname = doc["_attachments"].keys()[-1]
-        last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+        last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
         return last_attachment
     else:
         return None
+
 
 def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
     url = canonurl(url)
     hash = mkhash(url)
     req = urllib2.Request(url)
-    print "Fetching %s (%s)" % (url,hash)
+    print "Fetching %s (%s)" % (url, hash)
     if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-        print "Not a valid HTTP url"
-        return (None,None,None)
+        print >> sys.stderr, "Not a valid HTTP url"
+        return (None, None, None)
     doc = docsdb.get(hash)
     if doc == None:
-        doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
+        doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'}
     else:
-        if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
-            print "Uh oh, trying to scrape URL again too soon!"+hash
-            last_attachment_fname = doc["_attachments"].keys()[-1]
-            last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
-            content = last_attachment
-            return (doc['url'],doc['mime_type'],content.read())
-        if scrape_again == False:
-            print "Not scraping this URL again as requested"
-            return (doc['url'],doc['mime_type'],content.read())
+        if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
+            print "Uh oh, trying to scrape URL again too soon!" + hash
+	    if (not doc.has_key('file_size') or doc["file_size"] != "0") and "_attachments" in doc.keys():
+	            last_attachment_fname = doc["_attachments"].keys()[-1]
+	            last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
+        	    content = last_attachment.read()
+		    mime_type = doc['mime_type']
+	    else:
+		    content = None
+		    mime_type = None
+            return (doc['url'], mime_type, content)
 
     req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
     #if there is a previous version stored in couchdb, load caching helper tags
@@ -118,7 +131,7 @@
 
     opener = urllib2.build_opener(NotModifiedHandler())
     try:
-        url_handle = opener.open(req)
+        url_handle = opener.open(req, None, 20)
         doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
         headers = url_handle.info() # the addinfourls have the .info() too
         doc['etag'] = headers.getheader("ETag")
@@ -131,97 +144,98 @@
         doc['file_size'] = headers.getheader("Content-Length")
         content_type = headers.getheader("Content-Type")
         if content_type != None:
-             doc['mime_type'] = content_type.split(";")[0]
+            doc['mime_type'] = content_type.split(";")[0]
         else:
-             (type,encoding) = mimetypes.guess_type(url)
-             doc['mime_type'] = type
+            (type, encoding) = mimetypes.guess_type(url)
+            doc['mime_type'] = type
         if hasattr(url_handle, 'code'):
             if url_handle.code == 304:
-                print "the web page has not been modified"+hash
+                print "the web page has not been modified" + hash
                 last_attachment_fname = doc["_attachments"].keys()[-1]
-                last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+                last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
                 content = last_attachment
-                return (doc['url'],doc['mime_type'],content.read())
+                return (doc['url'], doc['mime_type'], content.read())
             else:
                 print "new webpage loaded"
                 content = url_handle.read()
                 docsdb.save(doc)
                 doc = docsdb.get(hash) # need to get a _rev
-                docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
+                docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type'])
                 return (doc['url'], doc['mime_type'], content)
                 #store as attachment epoch-filename
 
-    except urllib2.URLError as e:
-            print "error!"
-            error = ""
-            if hasattr(e, 'reason'):
-                error = "error %s in downloading %s" % (str(e.reason), url)
-            elif hasattr(e, 'code'):
-                error = "error %s in downloading %s" % (e.code, url)
-            print error
-            doc['error'] = error
-            docsdb.save(doc)
-            return (None,None,None)
-
+    except (urllib2.URLError, socket.timeout) as e:
+        print >> sys.stderr,"error!"
+        error = ""
+        if hasattr(e, 'reason'):
+            error = "error %s in downloading %s" % (str(e.reason), url)
+        elif hasattr(e, 'code'):
+            error = "error %s in downloading %s" % (e.code, url)
+        print >> sys.stderr, error
+        doc['error'] = error
+        docsdb.save(doc)
+        return (None, None, None)
 
 
 def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
-    (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
+    (url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID)
     badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
     if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
-        if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
-                # http://www.crummy.com/software/BeautifulSoup/documentation.html
-                soup = BeautifulSoup(content)
-                navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
-                for nav in navIDs:
-                    print "Removing element", nav['id']
+        if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
+            # http://www.crummy.com/software/BeautifulSoup/documentation.html
+            soup = BeautifulSoup(content)
+            navIDs = soup.findAll(
+                id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
+            for nav in navIDs:
+                print "Removing element", nav['id']
+                nav.extract()
+                navClasses = soup.findAll(
+                    attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
+                for nav in navClasses:
+                    print "Removing element", nav['class']
                     nav.extract()
-                    navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
-                    for nav in navClasses:
-                        print "Removing element", nav['class']
-                        nav.extract()
-                    links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
-                    linkurls = set([])
-                    for link in links:
-                        if link.has_key("href"):
-                            if link['href'].startswith("http"):
-                                # lets not do external links for now
-                                # linkurls.add(link['href'])
-                                None
-                            if link['href'].startswith("mailto"):
-                                # not http
-                                None
-                            if link['href'].startswith("javascript"):
-                                # not http
-                                None
-                            else:
-                                # remove anchors and spaces in urls
-                                linkurls.add(fullurl(url,link['href']))
-                    for linkurl in linkurls:
-                               #print linkurl
-                               scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
-
-#couch = couchdb.Server('http://192.168.1.148:5984/')
-couch = couchdb.Server('http://127.0.0.1:5984/')
+                links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
+                linkurls = set([])
+                for link in links:
+                    if link.has_attr("href"):
+                        if link['href'].startswith("http"):
+                            # lets not do external links for now
+                            # linkurls.add(link['href'])
+                            None
+                        if link['href'].startswith("mailto"):
+                            # not http
+                            None
+                        if link['href'].startswith("javascript"):
+                            # not http
+                            None
+                        else:
+                            # remove anchors and spaces in urls
+                            linkurls.add(fullurl(url, link['href']))
+                for linkurl in linkurls:
+                    #print linkurl
+                    scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID)
+
 # select database
 agencydb = couch['disclosr-agencies']
 docsdb = couch['disclosr-documents']
 
 if __name__ == "__main__":
-    for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
+    for row in agencydb.view('app/all'): #not recently scraped agencies view?
         agency = agencydb.get(row.id)
         print agency['name']
         for key in agency.keys():
-            if key == "FOIDocumentsURL" and "status" not in agency.keys:
-                scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
-            if key == 'website' and False:
-                scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
+            if key == "FOIDocumentsURL" and "status" not in agency.keys() and False:
+                scrapeAndStore(docsdb, agency[key], 0, key, agency['_id'])
+            if key == 'website' and True:
+                scrapeAndStore(docsdb, agency[key], 0, key, agency['_id'])
+                if "metadata" not in agency.keys():
+                    agency['metadata'] = {}
                 agency['metadata']['lastScraped'] = time.time()
             if key.endswith('URL') and False:
                 print key
                 depth = 1
                 if 'scrapeDepth' in agency.keys():
                     depth = agency['scrapeDepth']
-                scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
+                scrapeAndStore(docsdb, agency[key], depth, key, agency['_id'])
         agencydb.save(agency)