#http://packages.python.org/CouchDB/client.html |
#http://packages.python.org/CouchDB/client.html |
import couchdb |
import couchdb |
import urllib2 |
import urllib2 |
from BeautifulSoup import BeautifulSoup |
from BeautifulSoup import BeautifulSoup |
import re |
import re |
|
|
couch = couchdb.Server() # Assuming localhost:5984 |
couch = couchdb.Server('http://192.168.1.148:5984/') |
# If your CouchDB server is running elsewhere, set it up like this: |
|
# couch = couchdb.Server('http://example.com:5984/') |
|
|
|
# select database |
# select database |
agencydb = couch['disclosr-agencies'] |
agencydb = couch['disclosr-agencies'] |
|
|
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? |
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? |
agency = agencydb.get(row.id) |
agency = agencydb.get(row.id) |
print agency['agencyName'] |
print agency['agencyName'] |
|
|
#http://diveintopython.org/http_web_services/etags.html |
#http://diveintopython.org/http_web_services/etags.html |
class NotModifiedHandler(urllib2.BaseHandler): |
class NotModifiedHandler(urllib2.BaseHandler): |
def http_error_304(self, req, fp, code, message, headers): |
def http_error_304(self, req, fp, code, message, headers): |
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
addinfourl.code = code |
addinfourl.code = code |
return addinfourl |
return addinfourl |
|
|
def scrapeAndStore(URL, depth, agency): |
def scrapeAndStore(URL, depth, agency): |
URL = "http://www.hole.fi/jajvirta/weblog/" |
URL = "http://www.hole.fi/jajvirta/weblog/" |
req = urllib2.Request(URL) |
req = urllib2.Request(URL) |
|
|
#if there is a previous version sotred in couchdb, load caching helper tags |
#if there is a previous version sotred in couchdb, load caching helper tags |
if etag: |
if etag: |
req.add_header("If-None-Match", etag) |
req.add_header("If-None-Match", etag) |
if last_modified: |
if last_modified: |
req.add_header("If-Modified-Since", last_modified) |
req.add_header("If-Modified-Since", last_modified) |
|
|
opener = urllib2.build_opener(NotModifiedHandler()) |
opener = urllib2.build_opener(NotModifiedHandler()) |
url_handle = opener.open(req) |
url_handle = opener.open(req) |
headers = url_handle.info() # the addinfourls have the .info() too |
headers = url_handle.info() # the addinfourls have the .info() too |
etag = headers.getheader("ETag") |
etag = headers.getheader("ETag") |
last_modified = headers.getheader("Last-Modified") |
last_modified = headers.getheader("Last-Modified") |
web_server = headers.getheader("Server") |
web_server = headers.getheader("Server") |
file_size = headers.getheader("Content-Length") |
file_size = headers.getheader("Content-Length") |
mime_type = headers.getheader("Content-Type") |
mime_type = headers.getheader("Content-Type") |
|
|
if hasattr(url_handle, 'code') and url_handle.code == 304: |
if hasattr(url_handle, 'code') |
print "the web page has not been modified" |
if url_handle.code == 304: |
|
print "the web page has not been modified" |
|
else: |
|
#do scraping |
|
html = url_handle.read() |
|
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
|
soup = BeautifulSoup(html) |
|
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) |
|
for link in links: |
|
print link['href'] |
|
#for each unique link |
|
#if html mimetype |
|
# go down X levels, |
|
# diff with last stored attachment, store in document |
|
#if not |
|
# remember to save parentURL and title (link text that lead to document) |
|
|
|
#store as attachment epoch-filename |
else: |
else: |
print "error %s in downloading %s", url_handle.code, URL |
print "error %s in downloading %s", url_handle.code, URL |
#record/alert error to error database |
#record/alert error to error database |
|
|
#do scraping |
|
html = ? |
|
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
|
soup = BeautifulSoup(html) |
|
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) |
|
for link in links: |
|
print link['href'] |
|
#for each unique link |
|
#if html mimetype |
|
# go down X levels, |
|
# diff with last stored attachment, store in document |
|
#if not |
|
# remember to save parentURL and title (link text that lead to document) |
|
|
|
#store as attachment epoch-filename |
|