pdf scrapers
[disclosr.git] / documents / scrape.py
blob:a/documents/scrape.py -> blob:b/documents/scrape.py
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
import mimetypes import mimetypes
import re  
import urllib import urllib
import urlparse import urlparse
   
def mkhash(input): def mkhash(input):
return hashlib.md5(input).hexdigest().encode("utf-8") return hashlib.md5(input).hexdigest().encode("utf-8")
   
def canonurl(url): def canonurl(url):
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
if the URL looks invalid. if the URL looks invalid.
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
'http://xn--hgi.ws/' 'http://xn--hgi.ws/'
""" """
# strip spaces at the ends and ensure it's prefixed with 'scheme://' # strip spaces at the ends and ensure it's prefixed with 'scheme://'
url = url.strip() url = url.strip()
if not url: if not url:
return '' return ''
if not urlparse.urlsplit(url).scheme: if not urlparse.urlsplit(url).scheme:
url = 'http://' + url url = 'http://' + url
   
# turn it into Unicode # turn it into Unicode
#try: #try:
# url = unicode(url, 'utf-8') # url = unicode(url, 'utf-8')
#except UnicodeDecodeError: #except UnicodeDecodeError:
# return '' # bad UTF-8 chars in URL # return '' # bad UTF-8 chars in URL
   
# parse the URL into its components # parse the URL into its components
parsed = urlparse.urlsplit(url) parsed = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = parsed scheme, netloc, path, query, fragment = parsed
   
# ensure scheme is a letter followed by letters, digits, and '+-.' chars # ensure scheme is a letter followed by letters, digits, and '+-.' chars
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
return '' return ''
scheme = str(scheme) scheme = str(scheme)
   
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
if not match: if not match:
return '' return ''
domain, port = match.groups() domain, port = match.groups()
netloc = domain + (port if port else '') netloc = domain + (port if port else '')
netloc = netloc.encode('idna') netloc = netloc.encode('idna')
   
# ensure path is valid and convert Unicode chars to %-encoded # ensure path is valid and convert Unicode chars to %-encoded
if not path: if not path:
path = '/' # eg: 'http://google.com' -> 'http://google.com/' path = '/' # eg: 'http://google.com' -> 'http://google.com/'
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
   
# ensure query is valid # ensure query is valid
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
   
# ensure fragment is valid # ensure fragment is valid
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
   
# piece it all back together, truncating it to a maximum of 4KB # piece it all back together, truncating it to a maximum of 4KB
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
return url[:4096] return url[:4096]
   
def fullurl(url,href): def fullurl(url,href):
href = href.replace(" ","%20") href = href.replace(" ","%20")
href = re.sub('#.*$','',href) href = re.sub('#.*$','',href)
return urljoin(url,href) return urljoin(url,href)
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
url = canonurl(url) url = canonurl(url)
hash = mkhash(url) hash = mkhash(url)
req = urllib2.Request(url) req = urllib2.Request(url)
print "Fetching %s (%s)" % (url,hash) print "Fetching %s (%s)" % (url,hash)
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
print "Not a valid HTTP url" print "Not a valid HTTP url"
return (None,None,None) return (None,None,None)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc == None: if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
else: else:
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
print "Uh oh, trying to scrape URL again too soon!" print "Uh oh, trying to scrape URL again too soon!"+hash
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
content = last_attachment content = last_attachment
return (doc['url'],doc['mime_type'],content) return (doc['url'],doc['mime_type'],content)
if scrape_again == False: if scrape_again == False:
print "Not scraping this URL again as requested" print "Not scraping this URL again as requested"
return (None,None,None) return (None,None,None)
   
time.sleep(3) # wait 3 seconds to give webserver time to recover req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
  #if there is a previous version stored in couchdb, load caching helper tags
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") if doc.has_key('etag'):
#if there is a previous version stored in couchdb, load caching helper tags req.add_header("If-None-Match", doc['etag'])
if doc.has_key('etag'): if doc.has_key('last_modified'):
req.add_header("If-None-Match", doc['etag']) req.add_header("If-Modified-Since", doc['last_modified'])
if doc.has_key('last_modified'):  
req.add_header("If-Modified-Since", doc['last_modified']) opener = urllib2.build_opener(NotModifiedHandler())
  try:
opener = urllib2.build_opener(NotModifiedHandler()) url_handle = opener.open(req)
try: doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
url_handle = opener.open(req) headers = url_handle.info() # the addinfourls have the .info() too
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url doc['etag'] = headers.getheader("ETag")
headers = url_handle.info() # the addinfourls have the .info() too doc['last_modified'] = headers.getheader("Last-Modified")
doc['etag'] = headers.getheader("ETag") doc['date'] = headers.getheader("Date")
doc['last_modified'] = headers.getheader("Last-Modified") doc['page_scraped'] = time.time()
doc['date'] = headers.getheader("Date") doc['web_server'] = headers.getheader("Server")
doc['page_scraped'] = time.time() doc['via'] = headers.getheader("Via")
doc['web_server'] = headers.getheader("Server") doc['powered_by'] = headers.getheader("X-Powered-By")
doc['via'] = headers.getheader("Via") doc['file_size'] = headers.getheader("Content-Length")
doc['powered_by'] = headers.getheader("X-Powered-By") content_type = headers.getheader("Content-Type")
doc['file_size'] = headers.getheader("Content-Length") if content_type != None:
content_type = headers.getheader("Content-Type") doc['mime_type'] = content_type.split(";")[0]
if content_type != None: else:
doc['mime_type'] = content_type.split(";")[0] (type,encoding) = mimetypes.guess_type(url)
else: doc['mime_type'] = type
(type,encoding) = mimetypes.guess_type(url) if hasattr(url_handle, 'code'):
doc['mime_type'] = type if url_handle.code == 304:
if hasattr(url_handle, 'code'): print "the web page has not been modified"+hash
if url_handle.code == 304: last_attachment_fname = doc["_attachments"].keys()[-1]
print "the web page has not been modified" last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
return (None,None,None) content = last_attachment
else: return (doc['url'],doc['mime_type'],content)
content = url_handle.read() else:
docsdb.save(doc) print "new webpage loaded"
doc = docsdb.get(hash) # need to get a _rev content = url_handle.read()
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) docsdb.save(doc)
return (doc['url'], doc['mime_type'], content) doc = docsdb.get(hash) # need to get a _rev
#store as attachment epoch-filename docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
  return (doc['url'], doc['mime_type'], content)
except urllib2.URLError as e: #store as attachment epoch-filename
error = ""  
if hasattr(e, 'reason'): except urllib2.URLError as e:
error = "error %s in downloading %s" % (str(e.reason), url) print "error!"
elif hasattr(e, 'code'): error = ""
error = "error %s in downloading %s" % (e.code, url) if hasattr(e, 'reason'):
print error error = "error %s in downloading %s" % (str(e.reason), url)
doc['error'] = error elif hasattr(e, 'code'):
docsdb.save(doc) error = "error %s in downloading %s" % (e.code, url)
return (None,None,None) print error
  doc['error'] = error
  docsdb.save(doc)
  return (None,None,None)
   
   
   
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
(url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
for nav in navIDs: for nav in navIDs:
print "Removing element", nav['id'] print "Removing element", nav['id']
nav.extract() nav.extract()
navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
for nav in navClasses: for nav in navClasses:
print "Removing element", nav['class'] print "Removing element", nav['class']
nav.extract() nav.extract()
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
linkurls = set([]) linkurls = set([])
for link in links: for link in links:
if link.has_key("href"): if link.has_key("href"):
if link['href'].startswith("http"): if link['href'].startswith("http"):
# lets not do external links for now # lets not do external links for now
# linkurls.add(link['href']) # linkurls.add(link['href'])
None None
if link['href'].startswith("mailto"): if link['href'].startswith("mailto"):
# not http # not http
None None
if link['href'].startswith("javascript"): if link['href'].startswith("javascript"):
# not http # not http
None None
else: else:
# remove anchors and spaces in urls # remove anchors and spaces in urls
linkurls.add(fullurl(url,link['href'])) linkurls.add(fullurl(url,link['href']))
for linkurl in linkurls: for linkurl in linkurls:
#print linkurl #print linkurl
scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
   
#couch = couchdb.Server('http://192.168.1.148:5984/') #couch = couchdb.Server('http://192.168.1.148:5984/')
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
if __name__ == "__main__": if __name__ == "__main__":
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
for key in agency.keys(): for key in agency.keys():
if key == "FOIDocumentsURL" and "status" not in agency.keys: if key == "FOIDocumentsURL" and "status" not in agency.keys:
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
if key == 'website' and False: if key == 'website' and False:
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
agency['metadata']['lastScraped'] = time.time() agency['metadata']['lastScraped'] = time.time()
if key.endswith('URL') and False: if key.endswith('URL') and False:
print key print key
depth = 1 depth = 1
if 'scrapeDepth' in agency.keys(): if 'scrapeDepth' in agency.keys():
depth = agency['scrapeDepth'] depth = agency['scrapeDepth']
scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
agencydb.save(agency) agencydb.save(agency)