Scrape required and chart of complied features views
[disclosr.git] / scrape.py
blob:a/scrape.py -> blob:b/scrape.py
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
  import hashlib
  from urlparse import urljoin
  import time
  import os
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def scrapeAndStore(URL, depth, agency): def fetchURL(docsdb, url, agencyID):
URL = "http://www.google.com" hash = hashlib.md5(url).hexdigest()
req = urllib2.Request(URL) req = urllib2.Request(url)
etag = 'y' print "Fetching %s", url
last_modified = 'y' doc = docsdb.get(hash)
#if there is a previous version sotred in couchdb, load caching helper tags if doc == None:
if etag: doc = {'_id': hash, 'agencyID': agencyID}
req.add_header("If-None-Match", etag) #if there is a previous version stored in couchdb, load caching helper tags
if last_modified: if doc.has_key('etag'):
req.add_header("If-Modified-Since", last_modified) req.add_header("If-None-Match", doc['etag'])
  if doc.has_key('last_modified'):
  req.add_header("If-Modified-Since", doc['last_modified'])
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
url_handle = opener.open(req) url_handle = opener.open(req)
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
etag = headers.getheader("ETag") doc['etag'] = headers.getheader("ETag")
last_modified = headers.getheader("Last-Modified") doc['last_modified'] = headers.getheader("Last-Modified")
web_server = headers.getheader("Server") doc['web_server'] = headers.getheader("Server")
file_size = headers.getheader("Content-Length") doc['powered_by'] = headers.getheader("X-Powered-By")
mime_type = headers.getheader("Content-Type") doc['file_size'] = headers.getheader("Content-Length")
  doc['mime_type'] = headers.getheader("Content-Type").split(";")[0]
if hasattr(url_handle, 'code'): if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified" print "the web page has not been modified"
  return None
else: else:
#do scraping content = url_handle.read()
html = url_handle.read() docsdb.save(doc)
# http://www.crummy.com/software/BeautifulSoup/documentation.html doc = docsdb.get(hash) # need to get a _rev
soup = BeautifulSoup(html) docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) return (doc['mime_type'], content)
for link in links:  
print link['href']  
#for each unique link  
#if html mimetype  
# go down X levels,  
# diff with last stored attachment, store in document  
#if not  
# remember to save parentURL and title (link text that lead to document)  
   
#store as attachment epoch-filename #store as attachment epoch-filename
else: else:
print "error %s in downloading %s", url_handle.code, URL print "error %s in downloading %s", url_handle.code, URL
#record/alert error to error database #record/alert error to error database
   
   
   
   
  def scrapeAndStore(docsdb, url, depth, agencyID):
  (mime_type,content) = fetchURL(docsdb, url, agencyID)
  if content != None:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  soup = BeautifulSoup(content)
  navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar'))
  for nav in navIDs:
  print "Removing element", nav['id']
  nav.extract()
  navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')})
  for nav in navClasses:
  print "Removing element", nav['class']
  nav.extract()
  links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
  for link in links:
  if link.has_key("href"):
  if link['href'].startswith("http"):
  linkurl = link['href']
  else:
  linkurl = urljoin(url,link['href'])
  print linkurl
  #for each unique link
  # if
  #if html mimetype
  # go down X levels,
  # diff with last stored attachment, store in document
  #if not
  # remember to save parentURL and title (link text that lead to document)
   
   
  couch = couchdb.Server('http://127.0.0.1:5984/')
   
   
   
   
couch = couchdb.Server('http://192.168.1.148:5984/')  
   
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
  docsdb = couch['disclosr-documents']
   
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
scrapeAndStore("A",1,1) scrapeAndStore(docsdb, agency['website'],1,agency['_id'])