Don't hit the same URL twice in the same day
Don't hit the same URL twice in the same day


Former-commit-id: fc7d691cec408a85b38dc74e1fece1e3f10f388e

file:a/scrape.py -> file:b/scrape.py
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def fetchURL(docsdb, url, agencyID): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
hash = hashlib.md5(url).hexdigest() hash = hashlib.md5(url).hexdigest()
req = urllib2.Request(url) req = urllib2.Request(url)
print "Fetching %s", url print "Fetching %s" % url
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc == None: if doc == None:
doc = {'_id': hash, 'agencyID': agencyID} doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
  else:
  if (time.time() - doc['page_scraped']) < 3600:
  print "Uh oh, trying to scrape URL again too soon!"
  last_attachment_fname = doc["_attachments"].keys()[-1]
  last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
  return (doc['mime_type'],last_attachment)
  if scrape_again == False:
  print "Not scraping this URL again as requested"
  return (None,None)
   
  time.sleep(3) # wait 3 seconds to give webserver time to recover
   
#if there is a previous version stored in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if doc.has_key('etag'): if doc.has_key('etag'):
req.add_header("If-None-Match", doc['etag']) req.add_header("If-None-Match", doc['etag'])
if doc.has_key('last_modified'): if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", doc['last_modified']) req.add_header("If-Modified-Since", doc['last_modified'])
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
url_handle = opener.open(req) url_handle = opener.open(req)
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
doc['etag'] = headers.getheader("ETag") doc['etag'] = headers.getheader("ETag")
doc['last_modified'] = headers.getheader("Last-Modified") doc['last_modified'] = headers.getheader("Last-Modified")
  doc['date'] = headers.getheader("Date")
  doc['page_scraped'] = time.time()
doc['web_server'] = headers.getheader("Server") doc['web_server'] = headers.getheader("Server")
doc['powered_by'] = headers.getheader("X-Powered-By") doc['powered_by'] = headers.getheader("X-Powered-By")
doc['file_size'] = headers.getheader("Content-Length") doc['file_size'] = headers.getheader("Content-Length")
doc['mime_type'] = headers.getheader("Content-Type").split(";")[0] doc['mime_type'] = headers.getheader("Content-Type").split(";")[0]
if hasattr(url_handle, 'code'): if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified" print "the web page has not been modified"
return None return (None,None)
else: else:
content = url_handle.read() content = url_handle.read()
docsdb.save(doc) docsdb.save(doc)
doc = docsdb.get(hash) # need to get a _rev doc = docsdb.get(hash) # need to get a _rev
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
return (doc['mime_type'], content) return (doc['mime_type'], content)
#store as attachment epoch-filename #store as attachment epoch-filename
else: else:
print "error %s in downloading %s", url_handle.code, URL print "error %s in downloading %s" % url_handle.code, URL
#record/alert error to error database doc['error'] = "error %s in downloading %s" % url_handle.code, URL
  docsdb.save(doc)
  return (None,None)
   
   
   
def scrapeAndStore(docsdb, url, depth, agencyID): def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
(mime_type,content) = fetchURL(docsdb, url, agencyID) (mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
if content != None: if content != None and depth > 0:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar')) navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar'))
for nav in navIDs: for nav in navIDs:
print "Removing element", nav['id'] print "Removing element", nav['id']
nav.extract() nav.extract()
navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')}) navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')})
for nav in navClasses: for nav in navClasses:
print "Removing element", nav['class'] print "Removing element", nav['class']
nav.extract() nav.extract()
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
  linkurls = set([])
for link in links: for link in links:
if link.has_key("href"): if link.has_key("href"):
if link['href'].startswith("http"): if link['href'].startswith("http"):
linkurl = link['href'] # lets not do external links for now
  # linkurls.add(link['href'])
  None
else: else:
linkurl = urljoin(url,link['href']) linkurls.add(urljoin(url,link['href'].replace(" ","%20")))
print linkurl for linkurl in linkurls:
#for each unique link #print linkurl
# if scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
#if html mimetype  
# go down X levels,  
# diff with last stored attachment, store in document  
#if not  
# remember to save parentURL and title (link text that lead to document)  
   
   
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
   
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
scrapeAndStore(docsdb, agency['website'],1,agency['_id']) for key in agency.keys():
  if key == 'website' or key.endswith('URL'):
  print key
  scrapeAndStore(docsdb, agency[key],agency['scrapeDepth'],key,agency['_id'])
  agency['metadata']['lastscraped'] = time.time()
  agencydb.save(agency)