add innovation scrAper
[disclosr.git] / documents / scrape.py



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
#http://packages.python.org/CouchDB/client.html
import couchdb
import urllib2
from BeautifulSoup import BeautifulSoup
import re
import hashlib
from urlparse import urljoin
import time
import os
import mimetypes
import urllib
import urlparse
 
def mkhash(input):
    return hashlib.md5(input).hexdigest().encode("utf-8")
 
def canonurl(url):
    r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
    if the URL looks invalid.
    >>> canonurl('\xe2\x9e\xa1.ws')  # tinyarro.ws
    'http://xn--hgi.ws/'
    """
    # strip spaces at the ends and ensure it's prefixed with 'scheme://'
    url = url.strip()
    if not url:
        return ''
    if not urlparse.urlsplit(url).scheme:
        url = 'http://' + url
 
    # turn it into Unicode
    #try:
    #    url = unicode(url, 'utf-8')
    #except UnicodeDecodeError:
    #    return ''  # bad UTF-8 chars in URL
 
    # parse the URL into its components
    parsed = urlparse.urlsplit(url)
    scheme, netloc, path, query, fragment = parsed
 
    # ensure scheme is a letter followed by letters, digits, and '+-.' chars
    if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
        return ''
    scheme = str(scheme)
 
    # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
    match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
    if not match:
        return ''
    domain, port = match.groups()
    netloc = domain + (port if port else '')
    netloc = netloc.encode('idna')
 
    # ensure path is valid and convert Unicode chars to %-encoded
    if not path:
        path = '/'  # eg: 'http://google.com' -> 'http://google.com/'
    path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
 
    # ensure query is valid
    query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
 
    # ensure fragment is valid
    fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
 
    # piece it all back together, truncating it to a maximum of 4KB
    url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
    return url[:4096]
 
def fullurl(url,href):
    href = href.replace(" ","%20")
    href = re.sub('#.*$','',href)
    return urljoin(url,href)
 
#http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler):
    def http_error_304(self, req, fp, code, message, headers):
        addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
        addinfourl.code = code
        return addinfourl
 
def getLastAttachment(docsdb,url):
    hash = mkhash(url)
    doc = docsdb.get(hash)
    if doc != None:
        last_attachment_fname = doc["_attachments"].keys()[-1]
        last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
        return last_attachment
    else:
        return None
 
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
    url = canonurl(url)
    hash = mkhash(url)
    req = urllib2.Request(url)
    print "Fetching %s (%s)" % (url,hash)
    if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
        print "Not a valid HTTP url"
        return (None,None,None)
    doc = docsdb.get(hash)
    if doc == None:
        doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
    else:
        if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
            print "Uh oh, trying to scrape URL again too soon!"+hash
            last_attachment_fname = doc["_attachments"].keys()[-1]
            last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
            content = last_attachment
            return (doc['url'],doc['mime_type'],content)
        if scrape_again == False:
            print "Not scraping this URL again as requested"
            return (None,None,None)
 
    req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
    #if there is a previous version stored in couchdb, load caching helper tags
    if doc.has_key('etag'):
        req.add_header("If-None-Match", doc['etag'])
    if doc.has_key('last_modified'):
        req.add_header("If-Modified-Since", doc['last_modified'])
 
    opener = urllib2.build_opener(NotModifiedHandler())
    try:
        url_handle = opener.open(req)
        doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
        headers = url_handle.info() # the addinfourls have the .info() too
        doc['etag'] = headers.getheader("ETag")
        doc['last_modified'] = headers.getheader("Last-Modified")
        doc['date'] = headers.getheader("Date")
        doc['page_scraped'] = time.time()
        doc['web_server'] = headers.getheader("Server")
        doc['via'] = headers.getheader("Via")
        doc['powered_by'] = headers.getheader("X-Powered-By")
        doc['file_size'] = headers.getheader("Content-Length")
        content_type = headers.getheader("Content-Type")
        if content_type != None:
             doc['mime_type'] = content_type.split(";")[0]
        else:
             (type,encoding) = mimetypes.guess_type(url)
             doc['mime_type'] = type
        if hasattr(url_handle, 'code'):
            if url_handle.code == 304:
                print "the web page has not been modified"+hash
                last_attachment_fname = doc["_attachments"].keys()[-1]
                last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
                content = last_attachment
                return (doc['url'],doc['mime_type'],content)
            else:
                print "new webpage loaded"
                content = url_handle.read()
                docsdb.save(doc)
                doc = docsdb.get(hash) # need to get a _rev
                docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
                return (doc['url'], doc['mime_type'], content)
                #store as attachment epoch-filename
 
    except urllib2.URLError as e:
            print "error!"
            error = ""
            if hasattr(e, 'reason'):
                error = "error %s in downloading %s" % (str(e.reason), url)
            elif hasattr(e, 'code'):
                error = "error %s in downloading %s" % (e.code, url)
            print error
            doc['error'] = error
            docsdb.save(doc)
            return (None,None,None)
 
 
 
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
    (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
    badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
    if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
        if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
                # http://www.crummy.com/software/BeautifulSoup/documentation.html
                soup = BeautifulSoup(content)
                navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
                for nav in navIDs:
                    print "Removing element", nav['id']
                    nav.extract()
                    navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
                    for nav in navClasses:
                        print "Removing element", nav['class']
                        nav.extract()
                    links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
                    linkurls = set([])
                    for link in links:
                        if link.has_key("href"):
                            if link['href'].startswith("http"):
                                # lets not do external links for now
                                # linkurls.add(link['href'])
                                None
                            if link['href'].startswith("mailto"):
                                # not http
                                None
                            if link['href'].startswith("javascript"):
                                # not http
                                None
                            else:
                                # remove anchors and spaces in urls
                                linkurls.add(fullurl(url,link['href']))
                    for linkurl in linkurls:
                               #print linkurl
                               scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
 
#couch = couchdb.Server('http://192.168.1.148:5984/')
couch = couchdb.Server('http://127.0.0.1:5984/')
# select database
agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents']
 
if __name__ == "__main__":
    for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
        agency = agencydb.get(row.id)
        print agency['name']
        for key in agency.keys():
            if key == "FOIDocumentsURL" and "status" not in agency.keys:
                scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
            if key == 'website' and False:
                scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
                agency['metadata']['lastScraped'] = time.time()
            if key.endswith('URL') and False:
                print key
                depth = 1
                if 'scrapeDepth' in agency.keys():
                    depth = agency['scrapeDepth']
                scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
        agencydb.save(agency)