remove sag copy
remove sag copy

directory:a/sag (deleted)
 
file:a/scrape.py -> file:b/scrape.py
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
  from BeautifulSoup import BeautifulSoup
  import re
   
couch = couchdb.Server() # Assuming localhost:5984 couch = couchdb.Server() # Assuming localhost:5984
# If your CouchDB server is running elsewhere, set it up like this: # If your CouchDB server is running elsewhere, set it up like this:
# couch = couchdb.Server('http://example.com:5984/') # couch = couchdb.Server('http://example.com:5984/')
   
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
   
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['agencyName'] print agency['agencyName']
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def scrapeAndStore(URL, depth, agency): def scrapeAndStore(URL, depth, agency):
URL = "http://www.hole.fi/jajvirta/weblog/" URL = "http://www.hole.fi/jajvirta/weblog/"
req = urllib2.Request(URL) req = urllib2.Request(URL)
#if there is a previous version sotred in couchdb, load caching helper tags #if there is a previous version sotred in couchdb, load caching helper tags
if etag: if etag:
req.add_header("If-None-Match", etag) req.add_header("If-None-Match", etag)
if last_modified: if last_modified:
req.add_header("If-Modified-Since", last_modified) req.add_header("If-Modified-Since", last_modified)
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
url_handle = opener.open(req) url_handle = opener.open(req)
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
etag = headers.getheader("ETag") etag = headers.getheader("ETag")
last_modified = headers.getheader("Last-Modified") last_modified = headers.getheader("Last-Modified")
web_server = headers.getheader("Server") web_server = headers.getheader("Server")
file_size = headers.getheader("Content-Length") file_size = headers.getheader("Content-Length")
mime_type = headers.getheader("Content-Type") mime_type = headers.getheader("Content-Type")
if hasattr(url_handle, 'code') and url_handle.code == 304: if hasattr(url_handle, 'code') and url_handle.code == 304:
print "the web page has not been modified" print "the web page has not been modified"
else: else:
print "error %s in downloading %s", url_handle.code, URL print "error %s in downloading %s", url_handle.code, URL
#record/alert error #record/alert error to error database
#do scraping #do scraping
  html = ?
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  soup = BeautifulSoup(html)
  links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
  for link in links:
  print link['href']
#for each unique link #for each unique link
#if html mimetype #if html mimetype
# go down X levels, # go down X levels,
# diff with last stored attachment, store in document # diff with last stored attachment, store in document
#if not #if not
# remember to save parentURL and title (link text that lead to document) # remember to save parentURL and title (link text that lead to document)
#store as attachment epoch-filename #store as attachment epoch-filename