Store scraper results to couchdb
Former-commit-id: 234bb19e5682c98cb4cbd9c6d6b1bf542ff16d50
--- a/scrape.py
+++ b/scrape.py
@@ -4,6 +4,9 @@
from BeautifulSoup import BeautifulSoup
import re
import hashlib
+from urlparse import urljoin
+import time
+import os
#http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler):
@@ -12,11 +15,13 @@
addinfourl.code = code
return addinfourl
-def scrapeAndStore(docsdb, url, depth, agencyID):
+def fetchURL(docsdb, url, agencyID):
hash = hashlib.md5(url).hexdigest()
req = urllib2.Request(url)
print "Fetching %s", url
- doc = docsdb['hash']
+ doc = docsdb.get(hash)
+ if doc == None:
+ doc = {'_id': hash, 'agencyID': agencyID}
#if there is a previous version stored in couchdb, load caching helper tags
if doc.has_key('etag'):
req.add_header("If-None-Match", doc['etag'])
@@ -29,44 +34,59 @@
doc['etag'] = headers.getheader("ETag")
doc['last_modified'] = headers.getheader("Last-Modified")
doc['web_server'] = headers.getheader("Server")
+ doc['powered_by'] = headers.getheader("X-Powered-By")
doc['file_size'] = headers.getheader("Content-Length")
- doc['mime_type'] = headers.getheader("Content-Type")
-
+ doc['mime_type'] = headers.getheader("Content-Type").split(";")[0]
if hasattr(url_handle, 'code'):
if url_handle.code == 304:
print "the web page has not been modified"
+ return None
else:
- #do scraping
- html = url_handle.read()
+ content = url_handle.read()
+ docsdb.save(doc)
+ doc = docsdb.get(hash) # need to get a _rev
+ docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
+ return (doc['mime_type'], content)
+ #store as attachment epoch-filename
+ else:
+ print "error %s in downloading %s", url_handle.code, URL
+ #record/alert error to error database
+
+
+
+
+def scrapeAndStore(docsdb, url, depth, agencyID):
+ (mime_type,content) = fetchURL(docsdb, url, agencyID)
+ if content != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
- soup = BeautifulSoup(html)
- links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
- for link in links:
- if link.has_key("href"):
- print link['href']
+ soup = BeautifulSoup(content)
+ navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar'))
+ for nav in navIDs:
+ print "Removing element", nav['id']
+ nav.extract()
+ navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')})
+ for nav in navClasses:
+ print "Removing element", nav['class']
+ nav.extract()
+ links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
+ for link in links:
+ if link.has_key("href"):
+ if link['href'].startswith("http"):
+ linkurl = link['href']
+ else:
+ linkurl = urljoin(url,link['href'])
+ print linkurl
#for each unique link
+ # if
#if html mimetype
# go down X levels,
# diff with last stored attachment, store in document
#if not
# remember to save parentURL and title (link text that lead to document)
- #store as attachment epoch-filename
- else:
- print "error %s in downloading %s", url_handle.code, URL
- #record/alert error to error database
-
-
-
-
-
-
-
-
-
-
-couch = couchdb.Server('http://192.168.1.148:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
# select database
agencydb = couch['disclosr-agencies']