1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | #http://packages.python.org/CouchDB/client.html import couchdb import urllib2 from BeautifulSoup import BeautifulSoup import re import hashlib from urlparse import urljoin import time import os #http://diveintopython.org/http_web_services/etags.html class NotModifiedHandler(urllib2.BaseHandler): def http_error_304(self, req, fp, code, message, headers): addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl.code = code return addinfourl def fetchURL(docsdb, url, agencyID): hash = hashlib.md5(url).hexdigest() req = urllib2.Request(url) print "Fetching %s", url doc = docsdb.get(hash) if doc == None: doc = {'_id': hash, 'agencyID': agencyID} #if there is a previous version stored in couchdb, load caching helper tags if doc.has_key('etag'): req.add_header("If-None-Match", doc['etag']) if doc.has_key('last_modified'): req.add_header("If-Modified-Since", doc['last_modified']) opener = urllib2.build_opener(NotModifiedHandler()) url_handle = opener.open(req) headers = url_handle.info() # the addinfourls have the .info() too doc['etag'] = headers.getheader("ETag") doc['last_modified'] = headers.getheader("Last-Modified") doc['web_server'] = headers.getheader("Server") doc['powered_by'] = headers.getheader("X-Powered-By") doc['file_size'] = headers.getheader("Content-Length") doc['mime_type'] = headers.getheader("Content-Type").split(";")[0] if hasattr(url_handle, 'code'): if url_handle.code == 304: print "the web page has not been modified" return None else: content = url_handle.read() docsdb.save(doc) doc = docsdb.get(hash) # need to get a _rev docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) return (doc['mime_type'], content) #store as attachment epoch-filename else: print "error %s in downloading %s", url_handle.code, URL #record/alert error to error database def scrapeAndStore(docsdb, url, depth, agencyID): (mime_type,content) = fetchURL(docsdb, url, agencyID) if content != None: if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar')) for nav in navIDs: print "Removing element", nav['id'] nav.extract() navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')}) for nav in navClasses: print "Removing element", nav['class'] nav.extract() links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) for link in links: if link.has_key("href"): if link['href'].startswith("http"): linkurl = link['href'] else: linkurl = urljoin(url,link['href']) print linkurl #for each unique link # if #if html mimetype # go down X levels, # diff with last stored attachment, store in document #if not # remember to save parentURL and title (link text that lead to document) couch = couchdb.Server('http://127.0.0.1:5984/') # select database agencydb = couch['disclosr-agencies'] docsdb = couch['disclosr-documents'] for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? agency = agencydb.get(row.id) print agency['name'] scrapeAndStore(docsdb, agency['website'],1,agency['_id']) |