Redid direct agency info import
[disclosr.git] / scrape.py
blob:a/scrape.py -> blob:b/scrape.py
#http://packages.python.org/CouchDB/client.html  
import couchdb  
import urllib2  
from BeautifulSoup import BeautifulSoup  
import re  
   
couch = couchdb.Server() # Assuming localhost:5984  
# If your CouchDB server is running elsewhere, set it up like this:  
# couch = couchdb.Server('http://example.com:5984/')  
   
# select database  
agencydb = couch['disclosr-agencies']  
   
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?  
agency = agencydb.get(row.id)  
print agency['agencyName']  
   
#http://diveintopython.org/http_web_services/etags.html  
class NotModifiedHandler(urllib2.BaseHandler):  
def http_error_304(self, req, fp, code, message, headers):  
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())  
addinfourl.code = code  
return addinfourl  
   
def scrapeAndStore(URL, depth, agency):  
URL = "http://www.hole.fi/jajvirta/weblog/"  
req = urllib2.Request(URL)  
   
#if there is a previous version sotred in couchdb, load caching helper tags  
if etag:  
req.add_header("If-None-Match", etag)  
if last_modified:  
req.add_header("If-Modified-Since", last_modified)  
   
opener = urllib2.build_opener(NotModifiedHandler())  
url_handle = opener.open(req)  
headers = url_handle.info() # the addinfourls have the .info() too  
etag = headers.getheader("ETag")  
last_modified = headers.getheader("Last-Modified")  
web_server = headers.getheader("Server")  
file_size = headers.getheader("Content-Length")  
mime_type = headers.getheader("Content-Type")  
   
if hasattr(url_handle, 'code') and url_handle.code == 304:  
print "the web page has not been modified"  
else:  
print "error %s in downloading %s", url_handle.code, URL  
#record/alert error to error database  
   
#do scraping  
html = ?  
# http://www.crummy.com/software/BeautifulSoup/documentation.html  
soup = BeautifulSoup(html)  
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))  
for link in links:  
print link['href']  
#for each unique link  
#if html mimetype  
# go down X levels,  
# diff with last stored attachment, store in document  
#if not  
# remember to save parentURL and title (link text that lead to document)  
   
#store as attachment epoch-filename