Handle more http edge cases in scraper
Handle more http edge cases in scraper


Former-commit-id: 994d782d8883843a55bf2558f8e6a6c9ffbcebde

file:a/scrape.py -> file:b/scrape.py
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
import hashlib import hashlib
from urlparse import urljoin from urlparse import urljoin
import time import time
import os import os
  import mimetypes
  import re
  import urllib
  import urlparse
   
  def canonurl(url):
  r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
  if the URL looks invalid.
  >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
  'http://xn--hgi.ws/'
  """
  # strip spaces at the ends and ensure it's prefixed with 'scheme://'
  url = url.strip()
  if not url:
  return ''
  if not urlparse.urlsplit(url).scheme:
  url = 'http://' + url
   
  # turn it into Unicode
  #try:
  # url = unicode(url, 'utf-8')
  #except UnicodeDecodeError:
  # return '' # bad UTF-8 chars in URL
   
  # parse the URL into its components
  parsed = urlparse.urlsplit(url)
  scheme, netloc, path, query, fragment = parsed
   
  # ensure scheme is a letter followed by letters, digits, and '+-.' chars
  if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
  return ''
  scheme = str(scheme)
   
  # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
  match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
  if not match:
  return ''
  domain, port = match.groups()
  netloc = domain + (port if port else '')
  netloc = netloc.encode('idna')
   
  # ensure path is valid and convert Unicode chars to %-encoded
  if not path:
  path = '/' # eg: 'http://google.com' -> 'http://google.com/'
  path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
   
  # ensure query is valid
  query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
   
  # ensure fragment is valid
  fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
   
  # piece it all back together, truncating it to a maximum of 4KB
  url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
  return url[:4096]
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
hash = hashlib.md5(url).hexdigest() url = canonurl(url)
  hash = hashlib.md5(url).hexdigest().encode("utf-8")
req = urllib2.Request(url) req = urllib2.Request(url)
print "Fetching %s" % url print "Fetching %s" % url
  if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
  print "Not a valid HTTP url"
  return (None,None)
doc = docsdb.get(hash) doc = docsdb.get(hash)
if doc == None: if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
else: else:
if (time.time() - doc['page_scraped']) < 3600: if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 999999):
print "Uh oh, trying to scrape URL again too soon!" print "Uh oh, trying to scrape URL again too soon!"
last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
return (doc['mime_type'],last_attachment) return (doc['mime_type'],last_attachment)
if scrape_again == False: if scrape_again == False:
print "Not scraping this URL again as requested" print "Not scraping this URL again as requested"
return (None,None) return (None,None)
   
time.sleep(3) # wait 3 seconds to give webserver time to recover time.sleep(3) # wait 3 seconds to give webserver time to recover
#if there is a previous version stored in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if doc.has_key('etag'): if doc.has_key('etag'):
req.add_header("If-None-Match", doc['etag']) req.add_header("If-None-Match", doc['etag'])
if doc.has_key('last_modified'): if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", doc['last_modified']) req.add_header("If-Modified-Since", doc['last_modified'])
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
url_handle = opener.open(req) try:
headers = url_handle.info() # the addinfourls have the .info() too url_handle = opener.open(req)
doc['etag'] = headers.getheader("ETag") headers = url_handle.info() # the addinfourls have the .info() too
doc['last_modified'] = headers.getheader("Last-Modified") doc['etag'] = headers.getheader("ETag")
doc['date'] = headers.getheader("Date") doc['last_modified'] = headers.getheader("Last-Modified")
doc['page_scraped'] = time.time() doc['date'] = headers.getheader("Date")
doc['web_server'] = headers.getheader("Server") doc['page_scraped'] = time.time()
doc['powered_by'] = headers.getheader("X-Powered-By") doc['web_server'] = headers.getheader("Server")
doc['file_size'] = headers.getheader("Content-Length") doc['powered_by'] = headers.getheader("X-Powered-By")
doc['mime_type'] = headers.getheader("Content-Type").split(";")[0] doc['file_size'] = headers.getheader("Content-Length")
if hasattr(url_handle, 'code'): content_type = headers.getheader("Content-Type")
  if content_type != None:
  doc['mime_type'] = content_type.split(";")[0]
  else:
  (type,encoding) = mimetypes.guess_type(url)
  doc['mime_type'] = type
  if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified" print "the web page has not been modified"
return (None,None) return (None,None)
else: else:
content = url_handle.read() content = url_handle.read()
docsdb.save(doc) docsdb.save(doc)
doc = docsdb.get(hash) # need to get a _rev doc = docsdb.get(hash) # need to get a _rev
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
return (doc['mime_type'], content) return (doc['mime_type'], content)
#store as attachment epoch-filename #store as attachment epoch-filename
else: except urllib2.URLError as e:
print "error %s in downloading %s" % url_handle.code, URL error = ""
doc['error'] = "error %s in downloading %s" % url_handle.code, URL if hasattr(e, 'reason'):
  error = "error %s in downloading %s" % (str(e.reason), url)
  elif hasattr(e, 'code'):
  error = "error %s in downloading %s" % (e.code, url)
  print error
  doc['error'] = error
docsdb.save(doc) docsdb.save(doc)
return (None,None) return (None,None)
   
   
   
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
(mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) (mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
if content != None and depth > 0: if content != None and depth > 0:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar')) navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar'))
for nav in navIDs: for nav in navIDs:
print "Removing element", nav['id'] print "Removing element", nav['id']
nav.extract() nav.extract()
navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')}) navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')})
for nav in navClasses: for nav in navClasses:
print "Removing element", nav['class'] print "Removing element", nav['class']
nav.extract() nav.extract()
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
linkurls = set([]) linkurls = set([])
for link in links: for link in links:
if link.has_key("href"): if link.has_key("href"):
if link['href'].startswith("http"): if link['href'].startswith("http"):
# lets not do external links for now # lets not do external links for now
# linkurls.add(link['href']) # linkurls.add(link['href'])
None None
  if link['href'].startswith("mailto"):
  # not http
  None
  if link['href'].startswith("javascript"):
  # not http
  None
else: else:
linkurls.add(urljoin(url,link['href'].replace(" ","%20"))) linkurls.add(urljoin(url,link['href'].replace(" ","%20")))
for linkurl in linkurls: for linkurl in linkurls:
#print linkurl #print linkurl
scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
   
couch = couchdb.Server('http://127.0.0.1:5984/') couch = couchdb.Server('http://127.0.0.1:5984/')
   
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents'] docsdb = couch['disclosr-documents']
   
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
for key in agency.keys(): for key in agency.keys():
if key == 'website' or key.endswith('URL'): if key == 'website':
print key scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
scrapeAndStore(docsdb, agency[key],agency['scrapeDepth'],key,agency['_id']) if key.endswith('URL'):
agency['metadata']['lastscraped'] = time.time() print key
  depth = 1
  if 'scrapeDepth' in agency.keys():
  depth = agency['scrapeDepth']
  scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
   
  agency['metadata']['lastScraped'] = time.time()
agencydb.save(agency) agencydb.save(agency)