Redid direct agency info import
[disclosr.git] / unimplemented / scrape.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#http://packages.python.org/CouchDB/client.html
import couchdb
import urllib2
from BeautifulSoup import BeautifulSoup
import re
 
couch = couchdb.Server() # Assuming localhost:5984
# If your CouchDB server is running elsewhere, set it up like this:
# couch = couchdb.Server('http://example.com:5984/')
 
# select database
agencydb = couch['disclosr-agencies']
 
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
    agency = agencydb.get(row.id)
    print agency['agencyName']
 
#http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler):  
    def http_error_304(self, req, fp, code, message, headers):
        addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
        addinfourl.code = code
        return addinfourl
 
def scrapeAndStore(URL, depth, agency):
    URL = "http://www.hole.fi/jajvirta/weblog/"
    req = urllib2.Request(URL)
    
    #if there is a previous version sotred in couchdb, load caching helper tags
    if etag:
        req.add_header("If-None-Match", etag)
    if last_modified:
        req.add_header("If-Modified-Since", last_modified)
     
    opener = urllib2.build_opener(NotModifiedHandler())
    url_handle = opener.open(req)
    headers = url_handle.info() # the addinfourls have the .info() too
    etag = headers.getheader("ETag")
    last_modified = headers.getheader("Last-Modified") 
    web_server = headers.getheader("Server") 
    file_size = headers.getheader("Content-Length") 
    mime_type = headers.getheader("Content-Type") 
     
    if hasattr(url_handle, 'code') and url_handle.code == 304:
        print "the web page has not been modified"
    else:
        print "error %s in downloading %s", url_handle.code, URL
        #record/alert error to error database
    
    #do scraping
    html = ?
    # http://www.crummy.com/software/BeautifulSoup/documentation.html
    soup = BeautifulSoup(html)
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
for link in links:
    print link['href']
    #for each unique link
    #if html mimetype
    # go down X levels,
    # diff with last stored attachment, store in document
    #if not
    #   remember to save parentURL and title (link text that lead to document)
    
    #store as attachment epoch-filename