#http://packages.python.org/CouchDB/client.html |
#http://packages.python.org/CouchDB/client.html |
import couchdb |
import couchdb |
import urllib2 |
import urllib2 |
from BeautifulSoup import BeautifulSoup |
from BeautifulSoup import BeautifulSoup |
import re |
import re |
|
import hashlib |
|
|
#http://diveintopython.org/http_web_services/etags.html |
#http://diveintopython.org/http_web_services/etags.html |
class NotModifiedHandler(urllib2.BaseHandler): |
class NotModifiedHandler(urllib2.BaseHandler): |
def http_error_304(self, req, fp, code, message, headers): |
def http_error_304(self, req, fp, code, message, headers): |
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
addinfourl.code = code |
addinfourl.code = code |
return addinfourl |
return addinfourl |
|
|
def scrapeAndStore(URL, depth, agency): |
def scrapeAndStore(docsdb, url, depth, agencyID): |
URL = "http://www.google.com" |
hash = hashlib.md5(url).hexdigest() |
req = urllib2.Request(URL) |
req = urllib2.Request(url) |
etag = 'y' |
print "Fetching %s", url |
last_modified = 'y' |
doc = docsdb['hash'] |
#if there is a previous version sotred in couchdb, load caching helper tags |
#if there is a previous version stored in couchdb, load caching helper tags |
if etag: |
if doc.has_key('etag'): |
req.add_header("If-None-Match", etag) |
req.add_header("If-None-Match", doc['etag']) |
if last_modified: |
if doc.has_key('last_modified'): |
req.add_header("If-Modified-Since", last_modified) |
req.add_header("If-Modified-Since", doc['last_modified']) |
|
|
opener = urllib2.build_opener(NotModifiedHandler()) |
opener = urllib2.build_opener(NotModifiedHandler()) |
url_handle = opener.open(req) |
url_handle = opener.open(req) |
headers = url_handle.info() # the addinfourls have the .info() too |
headers = url_handle.info() # the addinfourls have the .info() too |
etag = headers.getheader("ETag") |
doc['etag'] = headers.getheader("ETag") |
last_modified = headers.getheader("Last-Modified") |
doc['last_modified'] = headers.getheader("Last-Modified") |
web_server = headers.getheader("Server") |
doc['web_server'] = headers.getheader("Server") |
file_size = headers.getheader("Content-Length") |
doc['file_size'] = headers.getheader("Content-Length") |
mime_type = headers.getheader("Content-Type") |
doc['mime_type'] = headers.getheader("Content-Type") |
|
|
if hasattr(url_handle, 'code'): |
if hasattr(url_handle, 'code'): |
if url_handle.code == 304: |
if url_handle.code == 304: |
print "the web page has not been modified" |
print "the web page has not been modified" |
else: |
else: |
#do scraping |
#do scraping |
html = url_handle.read() |
html = url_handle.read() |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(html) |
soup = BeautifulSoup(html) |
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) |
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) |
for link in links: |
for link in links: |
print link['href'] |
if link.has_key("href"): |
#for each unique link |
print link['href'] |
#if html mimetype |
#for each unique link |
# go down X levels, |
#if html mimetype |
# diff with last stored attachment, store in document |
# go down X levels, |
#if not |
# diff with last stored attachment, store in document |
# remember to save parentURL and title (link text that lead to document) |
#if not |
|
# remember to save parentURL and title (link text that lead to document) |
|
|
#store as attachment epoch-filename |
#store as attachment epoch-filename |
else: |
else: |
print "error %s in downloading %s", url_handle.code, URL |
print "error %s in downloading %s", url_handle.code, URL |
#record/alert error to error database |
#record/alert error to error database |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
couch = couchdb.Server('http://192.168.1.148:5984/') |
couch = couchdb.Server('http://192.168.1.148:5984/') |
|
|
# select database |
# select database |
agencydb = couch['disclosr-agencies'] |
agencydb = couch['disclosr-agencies'] |
|
docsdb = couch['disclosr-documents'] |
|
|
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? |
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? |
agency = agencydb.get(row.id) |
agency = agencydb.get(row.id) |
print agency['name'] |
print agency['name'] |
scrapeAndStore("A",1,1) |
scrapeAndStore(docsdb, agency['website'],1,agency['_id']) |
|
|