|
#http://packages.python.org/CouchDB/client.html |
|
import couchdb |
|
import urllib2 |
|
from BeautifulSoup import BeautifulSoup |
|
import re |
|
import hashlib |
|
|
|
#http://diveintopython.org/http_web_services/etags.html |
|
class NotModifiedHandler(urllib2.BaseHandler): |
|
def http_error_304(self, req, fp, code, message, headers): |
|
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
|
addinfourl.code = code |
|
return addinfourl |
|
|
|
def scrapeAndStore(docsdb, url, depth, agencyID): |
|
hash = hashlib.md5(url).hexdigest() |
|
req = urllib2.Request(url) |
|
print "Fetching %s", url |
|
doc = docsdb['hash'] |
|
#if there is a previous version stored in couchdb, load caching helper tags |
|
if doc.has_key('etag'): |
|
req.add_header("If-None-Match", doc['etag']) |
|
if doc.has_key('last_modified'): |
|
req.add_header("If-Modified-Since", doc['last_modified']) |
|
|
|
opener = urllib2.build_opener(NotModifiedHandler()) |
|
url_handle = opener.open(req) |
|
headers = url_handle.info() # the addinfourls have the .info() too |
|
doc['etag'] = headers.getheader("ETag") |
|
doc['last_modified'] = headers.getheader("Last-Modified") |
|
doc['web_server'] = headers.getheader("Server") |
|
doc['file_size'] = headers.getheader("Content-Length") |
|
doc['mime_type'] = headers.getheader("Content-Type") |
|
|
|
if hasattr(url_handle, 'code'): |
|
if url_handle.code == 304: |
|
print "the web page has not been modified" |
|
else: |
|
#do scraping |
|
html = url_handle.read() |
|
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
|
soup = BeautifulSoup(html) |
|
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) |
|
for link in links: |
|
if link.has_key("href"): |
|
print link['href'] |
|
#for each unique link |
|
#if html mimetype |
|
# go down X levels, |
|
# diff with last stored attachment, store in document |
|
#if not |
|
# remember to save parentURL and title (link text that lead to document) |
|
|
|
#store as attachment epoch-filename |
|
else: |
|
print "error %s in downloading %s", url_handle.code, URL |
|
#record/alert error to error database |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
couch = couchdb.Server('http://192.168.1.148:5984/') |
|
|
|
# select database |
|
agencydb = couch['disclosr-agencies'] |
|
docsdb = couch['disclosr-documents'] |
|
|
|
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? |
|
agency = agencydb.get(row.id) |
|
print agency['name'] |
|
scrapeAndStore(docsdb, agency['website'],1,agency['_id']) |
|
|