Hash urls for insertion during scraping
Hash urls for insertion during scraping


Former-commit-id: 68bfb5914592737b5409075f197de39d8ab19319

file:a/charts.php -> file:b/charts.php
<?php <?php
include_once('include/common.inc.php'); include_once('include/common.inc.php');
include_header(); include_header();
$db = $server->get_db('disclosr-agencies'); $db = $server->get_db('disclosr-agencies');
   
?> ?>
<div class="foundation-header"> <div class="foundation-header">
<h1><a href="about.php">Charts</a></h1> <h1><a href="about.php">Charts</a></h1>
<h4 class="subheader">Lorem ipsum.</h4> <h4 class="subheader">Lorem ipsum.</h4>
</div> </div>
<div id="placeholder" style="width:900px;height:600px;"></div> <div id="placeholder" style="width:900px;height:600px;"></div>
<script id="source"> <script id="source">
window.onload = function() { window.onload = function() {
$(document).ready(function() { $(document).ready(function() {
var d1 = []; var d1 = [];
var labels = []; var labels = [];
<?php <?php
try { try {
$rows = $db->get_view("app", "scoreHas?group=true", null, true)->rows; $rows = $db->get_view("app", "scoreHas?group=true", null, true)->rows;
   
/*foreach ($rows as $key => $row) { /*foreach ($rows as $key => $row) {
echo " d1.push([$key, {$row->value}]);".PHP_EOL; echo " d1.push([$key, {$row->value}]);".PHP_EOL;
echo " labels.push('{$row->key}');".PHP_EOL; echo " labels.push('{$row->key}');".PHP_EOL;
}*/ }*/
$dataValues = Array(); $dataValues = Array();
foreach ($rows as $row) { foreach ($rows as $row) {
$dataValues[$row->value] = $row->key; $dataValues[$row->value] = $row->key;
} }
$i = 0; $i = 0;
ksort($dataValues); ksort($dataValues);
foreach($dataValues as $value => $key) { foreach($dataValues as $value => $key) {
echo " d1.push([$i, $value]);".PHP_EOL; echo " d1.push([$i, $value]);".PHP_EOL;
echo " labels.push('$key');".PHP_EOL; echo " labels.push('$key');".PHP_EOL;
$i++; $i++;
} }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
?> ?>
$.plot($("#placeholder"), [ d1], { $.plot($("#placeholder"), [ d1], {
  grid: { hoverable: true },
   
series: { series: {
bars: { show: true, barWidth: 0.6 } bars: { show: true, barWidth: 0.6 }
}, },
xaxis: { xaxis: {
tickFormatter: function formatter(val, axis) { tickFormatter: function formatter(val, axis) {
if (labels[val]) { if (labels[val]) {
return(labels[val]); return(labels[val]);
} else { } else {
return ""; return "";
} }
}, },
labelAngle: 90 labelAngle: 90
} }
}); });
  var previousPoint = null;
  $("#placeholder").bind("plothover", function (event, pos, item) {
  if (item) {
  if (previousPoint != item.datapoint) {
  previousPoint = item.datapoint;
   
  $("#tooltip").remove();
  var x = item.datapoint[0],
  y = item.datapoint[1] - item.datapoint[2];
   
  showTooltip(item.pageX, item.pageY, y );
  }
  }
  else {
  $("#tooltip").remove();
  previousPoint = null;
  }
  });
   
}); });
}; };
  function showTooltip(x, y, contents) {
  $('<div id="tooltip">' + contents + '</div>').css( {
  position: 'absolute',
  display: 'none',
  top: y + 5,
  left: x + 5,
  border: '1px solid #fdd',
  padding: '2px',
  'background-color': '#fee',
  opacity: 0.80
  }).appendTo("body").fadeIn(200);
  }
</script> </script>
   
<?php <?php
include_footer(); include_footer();
?> ?>
file:a/scrape.py -> file:b/scrape.py
#http://packages.python.org/CouchDB/client.html #http://packages.python.org/CouchDB/client.html
import couchdb import couchdb
import urllib2 import urllib2
from BeautifulSoup import BeautifulSoup from BeautifulSoup import BeautifulSoup
import re import re
  import hashlib
   
#http://diveintopython.org/http_web_services/etags.html #http://diveintopython.org/http_web_services/etags.html
class NotModifiedHandler(urllib2.BaseHandler): class NotModifiedHandler(urllib2.BaseHandler):
def http_error_304(self, req, fp, code, message, headers): def http_error_304(self, req, fp, code, message, headers):
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code addinfourl.code = code
return addinfourl return addinfourl
   
def scrapeAndStore(URL, depth, agency): def scrapeAndStore(docsdb, url, depth, agencyID):
URL = "http://www.google.com" hash = hashlib.md5(url).hexdigest()
req = urllib2.Request(URL) req = urllib2.Request(url)
etag = 'y' print "Fetching %s", url
last_modified = 'y' doc = docsdb['hash']
#if there is a previous version sotred in couchdb, load caching helper tags #if there is a previous version stored in couchdb, load caching helper tags
if etag: if doc.has_key('etag'):
req.add_header("If-None-Match", etag) req.add_header("If-None-Match", doc['etag'])
if last_modified: if doc.has_key('last_modified'):
req.add_header("If-Modified-Since", last_modified) req.add_header("If-Modified-Since", doc['last_modified'])
opener = urllib2.build_opener(NotModifiedHandler()) opener = urllib2.build_opener(NotModifiedHandler())
url_handle = opener.open(req) url_handle = opener.open(req)
headers = url_handle.info() # the addinfourls have the .info() too headers = url_handle.info() # the addinfourls have the .info() too
etag = headers.getheader("ETag") doc['etag'] = headers.getheader("ETag")
last_modified = headers.getheader("Last-Modified") doc['last_modified'] = headers.getheader("Last-Modified")
web_server = headers.getheader("Server") doc['web_server'] = headers.getheader("Server")
file_size = headers.getheader("Content-Length") doc['file_size'] = headers.getheader("Content-Length")
mime_type = headers.getheader("Content-Type") doc['mime_type'] = headers.getheader("Content-Type")
if hasattr(url_handle, 'code'): if hasattr(url_handle, 'code'):
if url_handle.code == 304: if url_handle.code == 304:
print "the web page has not been modified" print "the web page has not been modified"
else: else:
#do scraping #do scraping
html = url_handle.read() html = url_handle.read()
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(html) soup = BeautifulSoup(html)
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
for link in links: for link in links:
print link['href'] if link.has_key("href"):
#for each unique link print link['href']
#if html mimetype #for each unique link
# go down X levels, #if html mimetype
# diff with last stored attachment, store in document # go down X levels,
#if not # diff with last stored attachment, store in document
# remember to save parentURL and title (link text that lead to document) #if not
  # remember to save parentURL and title (link text that lead to document)
#store as attachment epoch-filename #store as attachment epoch-filename
else: else:
print "error %s in downloading %s", url_handle.code, URL print "error %s in downloading %s", url_handle.code, URL
#record/alert error to error database #record/alert error to error database
   
   
   
   
   
   
   
   
   
couch = couchdb.Server('http://192.168.1.148:5984/') couch = couchdb.Server('http://192.168.1.148:5984/')
   
# select database # select database
agencydb = couch['disclosr-agencies'] agencydb = couch['disclosr-agencies']
  docsdb = couch['disclosr-documents']
   
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
agency = agencydb.get(row.id) agency = agencydb.get(row.id)
print agency['name'] print agency['name']
scrapeAndStore("A",1,1) scrapeAndStore(docsdb, agency['website'],1,agency['_id'])