remove sag copy
[disclosr.git] / scrape.py
blob:a/scrape.py -> blob:b/scrape.py
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
  import hashlib
  from urlparse import urljoin
  import time
  import os
  import mimetypes
  import re
  import urllib
  import urlparse
   
couch = couchdb.Server() # Assuming localhost:5984 def canonurl(url):
# If your CouchDB server is running elsewhere, set it up like this: r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
# couch = couchdb.Server('http://example.com:5984/') if the URL looks invalid.
  >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
  'http://xn--hgi.ws/'
  """
  # strip spaces at the ends and ensure it's prefixed with 'scheme://'
  url = url.strip()
  if not url:
  return ''
  if not urlparse.urlsplit(url).scheme:
  url = 'http://' + url
   
# select database # turn it into Unicode
agencydb = couch['disclosr-agencies'] #try:
  # url = unicode(url, 'utf-8')
  #except UnicodeDecodeError:
  # return '' # bad UTF-8 chars in URL
   
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? # parse the URL into its components
agency = agencydb.get(row.id) parsed = urlparse.urlsplit(url)
print agency['agencyName'] scheme, netloc, path, query, fragment = parsed
   
  # ensure scheme is a letter followed by letters, digits, and '+-.' chars
  if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
  return ''
  scheme = str(scheme)
   
  # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
  match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
  if not match:
  return ''
  domain, port = match.groups()
  netloc = domain + (port if port else '')
  netloc = netloc.encode('idna')
   
  # ensure path is valid and convert Unicode chars to %-encoded
  if not path:
  path = '/' # eg: 'http://google.com' -> 'http://google.com/'
  path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
   
  # ensure query is valid
  query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
   
  # ensure fragment is valid
  fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
   
  # piece it all back together, truncating it to a maximum of 4KB
  url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
  return url[:4096]
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def scrapeAndStore(URL, depth, agency): def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
URL = "http://www.hole.fi/jajvirta/weblog/" url = canonurl(url)
req = urllib2.Request(URL) hash = hashlib.md5(url).hexdigest().encode("utf-8")
  req = urllib2.Request(url)
  print "Fetching %s" % url
  if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
  print "Not a valid HTTP url"
  return (None,None)
  doc = docsdb.get(hash)
  if doc == None:
  doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
  else:
  if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 999999):
  print "Uh oh, trying to scrape URL again too soon!"
  last_attachment_fname = doc["_attachments"].keys()[-1]
  last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
  return (doc['mime_type'],last_attachment)
  if scrape_again == False:
  print "Not scraping this URL again as requested"
  return (None,None)
   
  time.sleep(3) # wait 3 seconds to give webserver time to recover
#if there is a previous version sotred in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if etag: if doc.has_key('etag'):
req.add_header("If-None-Match", etag) req.add_header("If-None-Match", doc['etag'])
if last_modified: if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", last_modified) req.add_header("If-Modified-Since", doc['last_modified'])
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
url_handle = opener.open(req) try:
headers = url_handle.info() # the addinfourls have the .info() too url_handle = opener.open(req)
etag = headers.getheader("ETag") headers = url_handle.info() # the addinfourls have the .info() too
last_modified = headers.getheader("Last-Modified") doc['etag'] = headers.getheader("ETag")
web_server = headers.getheader("Server") doc['last_modified'] = headers.getheader("Last-Modified")
file_size = headers.getheader("Content-Length") doc['date'] = headers.getheader("Date")
mime_type = headers.getheader("Content-Type") doc['page_scraped'] = time.time()
  doc['web_server'] = headers.getheader("Server")
if hasattr(url_handle, 'code') and url_handle.code == 304: doc['powered_by'] = headers.getheader("X-Powered-By")
print "the web page has not been modified" doc['file_size'] = headers.getheader("Content-Length")
else: content_type = headers.getheader("Content-Type")
print "error %s in downloading %s", url_handle.code, URL if content_type != None:
#record/alert error to error database doc['mime_type'] = content_type.split(";")[0]
  else:
#do scraping (type,encoding) = mimetypes.guess_type(url)
html = ? doc['mime_type'] = type
# http://www.crummy.com/software/BeautifulSoup/documentation.html if hasattr(url_handle, 'code'):
soup = BeautifulSoup(html) if url_handle.code == 304:
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) print "the web page has not been modified"
for link in links: return (None,None)
print link['href'] else:
#for each unique link content = url_handle.read()
#if html mimetype docsdb.save(doc)
# go down X levels, doc = docsdb.get(hash) # need to get a _rev
# diff with last stored attachment, store in document docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
#if not return (doc['mime_type'], content)
# remember to save parentURL and title (link text that lead to document) #store as attachment epoch-filename
  except urllib2.URLError as e:
#store as attachment epoch-filename error = ""
  if hasattr(e, 'reason'):
  error = "error %s in downloading %s" % (str(e.reason), url)
  elif hasattr(e, 'code'):
  error = "error %s in downloading %s" % (e.code, url)
  print error
  doc['error'] = error
  docsdb.save(doc)
  return (None,None)
   
   
   
  def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
  (mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
  if content != None and depth > 0:
  if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
  # http://www.crummy.com/software/BeautifulSoup/documentation.html
  soup = BeautifulSoup(content)
  navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar'))
  for nav in navIDs:
  print "Removing element", nav['id']
  nav.extract()
  navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar')})
  for nav in navClasses:
  print "Removing element", nav['class']
  nav.extract()
  links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
  linkurls = set([])
  for link in links:
  if link.has_key("href"):
  if link['href'].startswith("http"):
  # lets not do external links for now
  # linkurls.add(link['href'])
  None
  if link['href'].startswith("mailto"):
  # not http
  None
  if link['href'].startswith("javascript"):
  # not http
  None
  else:
  linkurls.add(urljoin(url,link['href'].replace(" ","%20")))
  for linkurl in linkurls:
  #print linkurl
  scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
   
  couch = couchdb.Server('http://127.0.0.1:5984/')
   
  # select database
  agencydb = couch['disclosr-agencies']
  docsdb = couch['disclosr-documents']
   
  for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
  agency = agencydb.get(row.id)
  print agency['name']
  for key in agency.keys():
  if key == 'website':
  scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
  if key.endswith('URL'):
  print key
  depth = 1
  if 'scrapeDepth' in agency.keys():
  depth = agency['scrapeDepth']
  scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
   
  agency['metadata']['lastScraped'] = time.time()
  agencydb.save(agency)