[423] Handle many more graphing edge-cases.
[423] Handle many more graphing edge-cases.

Prepare graphs for an expected time period (since July 2012) rather than for the time period found in the DB, which can be reduced to absurdity with certain queries. Graphs always have a consistent X-axis, ugly logic to combine disparate data series can be removed.

On 'Publisher' and 'Dataset' tabs, always graph the *top 20* series regardless of the month currently rendered in the table. This makes more sense from a useability POV.

Finally, some client side error checking was improved.

--- a/ckanext/ga_report/command.py
+++ b/ckanext/ga_report/command.py
@@ -23,7 +23,7 @@
         import ckan.model as model
         model.Session.remove()
         model.Session.configure(bind=model.meta.engine)
-        log = logging.getLogger('ckanext.ga-report')
+        log = logging.getLogger('ckanext.ga_report')
 
         import ga_model
         ga_model.init_tables()

--- a/ckanext/ga_report/controller.py
+++ b/ckanext/ga_report/controller.py
@@ -191,25 +191,11 @@
             q = model.Session.query(GA_Stat).\
                 filter(GA_Stat.stat_name==k).\
                 order_by(GA_Stat.period_name)
-            # Run the query on all months to gather graph data
-            graph = {}
-            for stat in q:
-                graph[ stat.key ] = graph.get(stat.key,{
-                    'name':stat.key, 
-                    'data': []
-                    })
-                graph[ stat.key ]['data'].append({
-                    'x':_get_unix_epoch(stat.period_name),
-                    'y':float(stat.value)
-                    })
-            setattr(c, v+'_graph', json.dumps( _to_rickshaw(graph.values(),percentageMode=True) ))
-
             # Buffer the tabular data
             if c.month:
                 entries = []
                 q = q.filter(GA_Stat.period_name==c.month).\
                           order_by('ga_stat.value::int desc')
-
             d = collections.defaultdict(int)
             for e in q.all():
                 d[e.key] += int(e.value)
@@ -218,6 +204,23 @@
                 entries.append((key,val,))
             entries = sorted(entries, key=operator.itemgetter(1), reverse=True)
 
+            # Run a query on all months to gather graph data
+            graph_query = model.Session.query(GA_Stat).\
+                filter(GA_Stat.stat_name==k).\
+                order_by(GA_Stat.period_name)
+            graph_dict = {}
+            for stat in graph_query:
+                graph_dict[ stat.key ] = graph_dict.get(stat.key,{
+                    'name':stat.key, 
+                    'raw': {}
+                    })
+                graph_dict[ stat.key ]['raw'][stat.period_name] = float(stat.value)
+            stats_in_table = [x[0] for x in entries]
+            stats_not_in_table = set(graph_dict.keys()) - set(stats_in_table)
+            stats = stats_in_table + sorted(list(stats_not_in_table))
+            graph = [graph_dict[x] for x in stats]
+            setattr(c, v+'_graph', json.dumps( _to_rickshaw(graph,percentageMode=True) ))
+
             # Get the total for each set of values and then set the value as
             # a percentage of the total
             if k == 'Social sources':
@@ -246,7 +249,7 @@
         writer = csv.writer(response)
         writer.writerow(["Publisher Title", "Publisher Name", "Views", "Visits", "Period Name"])
 
-        top_publishers, top_publishers_graph = _get_top_publishers(None)
+        top_publishers = _get_top_publishers(limit=None)
 
         for publisher,view,visit in top_publishers:
             writer.writerow([publisher.title.encode('utf-8'),
@@ -268,7 +271,7 @@
             if not c.publisher:
                 abort(404, 'A publisher with that name could not be found')
 
-        packages = self._get_packages(c.publisher)
+        packages = self._get_packages(publisher=c.publisher, month=c.month)
         response.headers['Content-Type'] = "text/csv; charset=utf-8"
         response.headers['Content-Disposition'] = \
             str('attachment; filename=datasets_%s_%s.csv' % (c.publisher_name, month,))
@@ -297,15 +300,16 @@
         if c.month:
             c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month])
 
-        c.top_publishers, graph_data = _get_top_publishers()
-        c.top_publishers_graph = json.dumps( _to_rickshaw(graph_data.values()) )
+        c.top_publishers = _get_top_publishers()
+        graph_data = _get_top_publishers_graph()
+        c.top_publishers_graph = json.dumps( _to_rickshaw(graph_data) )
 
         return render('ga_report/publisher/index.html')
 
-    def _get_packages(self, publisher=None, count=-1):
+    def _get_packages(self, publisher=None, month='', count=-1):
         '''Returns the datasets in order of views'''
         have_download_data = True
-        month = c.month or 'All'
+        month = month or 'All'
         if month != 'All':
             have_download_data = month >= DOWNLOADS_AVAILABLE_FROM
 
@@ -382,74 +386,71 @@
         entry = q.filter(GA_Url.period_name==c.month).first()
         c.publisher_page_views = entry.pageviews if entry else 0
 
-        c.top_packages = self._get_packages(c.publisher, 20)
+        c.top_packages = self._get_packages(publisher=c.publisher, count=20, month=c.month)
 
         # Graph query
-        top_package_names = [ x[0].name for x in c.top_packages ]
+        top_packages_all_time = self._get_packages(publisher=c.publisher, count=20, month='All')
+        top_package_names = [ x[0].name for x in top_packages_all_time ]
         graph_query = model.Session.query(GA_Url,model.Package)\
             .filter(model.Package.name==GA_Url.package_id)\
             .filter(GA_Url.url.like('/dataset/%'))\
             .filter(GA_Url.package_id.in_(top_package_names))
-        graph_data = {}
+        all_series = {}
         for entry,package in graph_query:
             if not package: continue
             if entry.period_name=='All': continue
-            graph_data[package.id] = graph_data.get(package.id,{
+            all_series[package.name] = all_series.get(package.name,{
                 'name':package.title,
-                'data':[]
+                'raw': {}
                 })
-            graph_data[package.id]['data'].append({
-                'x':_get_unix_epoch(entry.period_name),
-                'y':int(entry.pageviews),
-                })
-                    
-        c.graph_data = json.dumps( _to_rickshaw(graph_data.values()) )
+            all_series[package.name]['raw'][entry.period_name] = int(entry.pageviews)
+        graph = [ all_series[series_name] for series_name in top_package_names ]
+        c.graph_data = json.dumps( _to_rickshaw(graph) )
 
         return render('ga_report/publisher/read.html')
 
 def _to_rickshaw(data, percentageMode=False):
     if data==[]:
         return data
-    # Create a consistent x-axis
-    num_points = [ len(package['data']) for package in data ]
-    ideal_index = num_points.index( max(num_points) )
-    x_axis = [ point['x'] for point in data[ideal_index]['data'] ]
-    for package in data:
-        xs = [ point['x'] for point in package['data'] ]
-        assert set(xs).issubset( set(x_axis) ), (xs, x_axis)
-        # Zero pad any missing values
-        for x in set(x_axis).difference(set(xs)):
-            package['data'].append( {'x':x, 'y':0} )
-        assert len(package['data'])==len(x_axis), (len(package['data']),len(x_axis),package['data'],x_axis,set(x_axis).difference(set(xs)))
-    if percentageMode:
-        # Transform data into percentage stacks
-        totals = {}
-        for x in x_axis:
-            for package in data:
-                for point in package['data']:
-                    totals[ point['x'] ] = totals.get(point['x'],0) + point['y']
-        # Roll insignificant series into a catch-all
-        THRESHOLD = 0.01
-        significant_series = []
-        for package in data:
-            for point in package['data']:
-                fraction = float(point['y']) / totals[point['x']]
-                if fraction>THRESHOLD and not (package in significant_series):
-                    significant_series.append(package)
-        temp = {}
-        for package in data:
-            if package in significant_series: continue
-            for point in package['data']:
-                temp[point['x']] = temp.get(point['x'],0) + point['y']
-        catch_all = { 'name':'Other','data': [ {'x':x,'y':y} for x,y in temp.items() ] }
-        # Roll insignificant series into one
-        data = significant_series
-        data.append(catch_all)
-    # Sort the points
-    for package in data:
-        package['data'] = sorted( package['data'], key=lambda x:x['x'] )
-        # Strip the latest month's incomplete analytics
-        package['data'] = package['data'][:-1]
+    # x-axis is every month in c.months. Note that data might not exist 
+    # for entire history, eg. for recently-added datasets
+    x_axis = [x[0] for x in c.months]
+    x_axis.reverse() # Ascending order
+    x_axis = x_axis[:-1] # Remove latest month
+    totals = {}
+    for series in data:
+        series['data'] = []
+        for x_string in x_axis:
+            x = _get_unix_epoch( x_string )
+            y = series['raw'].get(x_string,0)
+            series['data'].append({'x':x,'y':y})
+            totals[x] = totals.get(x,0)+y
+    if not percentageMode:
+        return data
+    # Turn all data into percentages
+    # Roll insignificant series into a catch-all
+    THRESHOLD = 1
+    raw_data = data
+    data = []
+    for series in raw_data:
+        for point in series['data']:
+            percentage = (100*float(point['y'])) / totals[point['x']]
+            if not (series in data) and percentage>THRESHOLD:
+                data.append(series)
+            point['y'] = percentage
+    others = [ x for x in raw_data if not (x in data) ]
+    if len(others):
+        data_other = []
+        for i in range(len(x_axis)):
+            x = _get_unix_epoch(x_axis[i])
+            y = 0
+            for series in others: 
+                y += series['data'][i]['y']
+            data_other.append({'x':x,'y':y})
+        data.append({ 
+            'name':'Other',
+            'data': data_other
+            })
     return data
 
 
@@ -474,35 +475,51 @@
 
     top_publishers = []
     res = connection.execute(q, month)
-    department_ids = []
     for row in res:
         g = model.Group.get(row[0])
         if g:
-            department_ids.append(row[0])
             top_publishers.append((g, row[1], row[2]))
-
-    graph = {}
-    if limit is not None:
-        # Query for a history graph of these publishers
-        q = model.Session.query(
-                GA_Url.department_id, 
-                GA_Url.period_name, 
-                func.sum(cast(GA_Url.pageviews,sqlalchemy.types.INT)))\
-            .filter( GA_Url.department_id.in_(department_ids) )\
-            .filter( GA_Url.period_name!='All' )\
-            .filter( GA_Url.url.like('/dataset/%') )\
-            .filter( GA_Url.package_id!='' )\
-            .group_by( GA_Url.department_id, GA_Url.period_name )
-        for dept_id,period_name,views in q:
-            graph[dept_id] = graph.get( dept_id, {
-                'name' : model.Group.get(dept_id).title,
-                'data' : []
-                })
-            graph[dept_id]['data'].append({
-                'x': _get_unix_epoch(period_name),
-                'y': views
-                })
-    return top_publishers, graph
+    return top_publishers
+
+
+def _get_top_publishers_graph(limit=20):
+    '''
+    Returns a list of the top 20 publishers by dataset visits.
+    (The number to show can be varied with 'limit')
+    '''
+    connection = model.Session.connection()
+    q = """
+        select department_id, sum(pageviews::int) views
+        from ga_url
+        where department_id <> ''
+          and package_id <> ''
+          and url like '/dataset/%%'
+          and period_name='All'
+        group by department_id order by views desc
+        """
+    if limit:
+        q = q + " limit %s;" % (limit)
+
+    res = connection.execute(q)
+    department_ids = [ row[0] for row in res ]
+
+    # Query for a history graph of these department ids
+    q = model.Session.query(
+            GA_Url.department_id, 
+            GA_Url.period_name, 
+            func.sum(cast(GA_Url.pageviews,sqlalchemy.types.INT)))\
+        .filter( GA_Url.department_id.in_(department_ids) )\
+        .filter( GA_Url.url.like('/dataset/%') )\
+        .filter( GA_Url.package_id!='' )\
+        .group_by( GA_Url.department_id, GA_Url.period_name )
+    graph_dict = {}
+    for dept_id,period_name,views in q:
+        graph_dict[dept_id] = graph_dict.get( dept_id, {
+            'name' : model.Group.get(dept_id).title,
+            'raw' : {}
+            })
+        graph_dict[dept_id]['raw'][period_name] = views
+    return [ graph_dict[id] for id in department_ids ]
 
 
 def _get_publishers():

--- a/ckanext/ga_report/download_analytics.py
+++ b/ckanext/ga_report/download_analytics.py
@@ -32,6 +32,11 @@
         first_of_this_month = datetime.datetime(date.year, date.month, 1)
         _, last_day_of_month = calendar.monthrange(int(date.year), int(date.month))
         last_of_this_month =  datetime.datetime(date.year, date.month, last_day_of_month)
+        # if this is the latest month, note that it is only up until today
+        now = datetime.datetime.now()
+        if now.year == date.year and now.month == date.month:
+            last_day_of_month = now.day
+            last_of_this_month = now
         periods = ((date.strftime(FORMAT_MONTH),
                     last_day_of_month,
                     first_of_this_month, last_of_this_month),)
@@ -126,7 +131,7 @@
                 # Make sure the All records are correct.
                 ga_model.post_update_url_stats()
 
-                log.info('Aggregating datasets by publisher')
+                log.info('Associating datasets with their publisher')
                 ga_model.update_publisher_stats(period_name) # about 30 seconds.
 
 
@@ -298,7 +303,7 @@
 
 
     def _download_stats(self, start_date, end_date, period_name, period_complete_day):
-        """ Fetches stats about language and country """
+        """ Fetches stats about data downloads """
         import ckan.model as model
 
         data = {}
@@ -320,7 +325,14 @@
             return
 
         def process_result_data(result_data, cached=False):
+            progress_total = len(result_data)
+            progress_count = 0
+            resources_not_matched = []
             for result in result_data:
+                progress_count += 1
+                if progress_count % 100 == 0:
+                    log.debug('.. %d/%d done so far', progress_count, progress_total)
+
                 url = result[0].strip()
 
                 # Get package id associated with the resource that has this URL.
@@ -334,9 +346,13 @@
                 if package_name:
                     data[package_name] = data.get(package_name, 0) + int(result[1])
                 else:
-                    log.warning(u"Could not find resource for URL: {url}".format(url=url))
+                    resources_not_matched.append(url)
                     continue
-
+            if resources_not_matched:
+                log.debug('Could not match %i or %i resource URLs to datasets. e.g. %r',
+                          len(resources_not_matched), progress_total, resources_not_matched[:3])
+
+        log.info('Associating downloads of resource URLs with their respective datasets')
         process_result_data(results.get('rows'))
 
         results = self.service.data().ga().get(
@@ -348,6 +364,7 @@
                                  dimensions="ga:eventLabel",
                                  max_results=10000,
                                  end_date=end_date).execute()
+        log.info('Associating downloads of cache resource URLs with their respective datasets')
         process_result_data(results.get('rows'), cached=False)
 
         self._filter_out_long_tail(data, MIN_DOWNLOADS)

--- a/ckanext/ga_report/ga_model.py
+++ b/ckanext/ga_report/ga_model.py
@@ -161,20 +161,20 @@
 
 
 def pre_update_url_stats(period_name):
-    log.debug("Deleting '%s' records" % period_name)
-    model.Session.query(GA_Url).\
-            filter(GA_Url.period_name==period_name).delete()
-
-    count = model.Session.query(GA_Url).\
-            filter(GA_Url.period_name == 'All').count()
-    log.debug("Deleting %d 'All' records" % count)
-    count = model.Session.query(GA_Url).\
-            filter(GA_Url.period_name == 'All').delete()
-    log.debug("Deleted %d 'All' records" % count)
+    q = model.Session.query(GA_Url).\
+        filter(GA_Url.period_name==period_name)
+    log.debug("Deleting %d '%s' records" % (q.count(), period_name))
+    q.delete()
+
+    q = model.Session.query(GA_Url).\
+        filter(GA_Url.period_name == 'All')
+    log.debug("Deleting %d 'All' records..." % q.count())
+    q.delete()
 
     model.Session.flush()
     model.Session.commit()
     model.repo.commit_and_remove()
+    log.debug('...done')
 
 def post_update_url_stats():
 
@@ -185,6 +185,7 @@
         record regardless of whether the URL has an entry for
         the month being currently processed.
     """
+    log.debug('Post-processing "All" records...')
     query = """select url, pageviews::int, visits::int
                from ga_url
                where url not in (select url from ga_url where period_name ='All')"""
@@ -197,7 +198,13 @@
         views[row[0]] = views.get(row[0], 0) + row[1]
         visits[row[0]] = visits.get(row[0], 0) + row[2]
 
+    progress_total = len(views.keys())
+    progress_count = 0
     for key in views.keys():
+        progress_count += 1
+        if progress_count % 100 == 0:
+            log.debug('.. %d/%d done so far', progress_count, progress_total)
+
         package, publisher = _get_package_and_publisher(key)
 
         values = {'id': make_uuid(),
@@ -207,10 +214,11 @@
                   'pageviews': views[key],
                   'visits': visits[key],
                   'department_id': publisher,
-                  'package_id': publisher
+                  'package_id': package
                   }
         model.Session.add(GA_Url(**values))
     model.Session.commit()
+    log.debug('..done')
 
 
 def update_url_stats(period_name, period_complete_day, url_data):
@@ -219,9 +227,14 @@
     stores them in GA_Url under the period and recalculates the totals for
     the 'All' period.
     '''
+    progress_total = len(url_data)
+    progress_count = 0
     for url, views, visits in url_data:
+        progress_count += 1
+        if progress_count % 100 == 0:
+            log.debug('.. %d/%d done so far', progress_count, progress_total)
+
         package, publisher = _get_package_and_publisher(url)
-
 
         item = model.Session.query(GA_Url).\
             filter(GA_Url.period_name==period_name).\

--- a/ckanext/ga_report/helpers.py
+++ b/ckanext/ga_report/helpers.py
@@ -80,7 +80,7 @@
     return base.render_snippet('ga_report/ga_popular_single.html', **context)
 
 
-def most_popular_datasets(publisher, count=20):
+def most_popular_datasets(publisher, count=20, preview_image=None):
 
     if not publisher:
         _log.error("No valid publisher passed to 'most_popular_datasets'")
@@ -92,7 +92,8 @@
         'dataset_count': len(results),
         'datasets': results,
 
-        'publisher': publisher
+        'publisher': publisher,
+        'preview_image': preview_image
     }
 
     return base.render_snippet('ga_report/publisher/popular.html', **ctx)
@@ -106,6 +107,10 @@
     for entry in entries:
         if len(datasets) < count:
             p = model.Package.get(entry.url[len('/dataset/'):])
+            if not p:
+                _log.warning("Could not find Package for {url}".format(url=entry.url))
+                continue
+
             if not p in datasets:
                 datasets[p] = {'views':0, 'visits': 0}
             datasets[p]['views'] = datasets[p]['views'] + int(entry.pageviews)
@@ -117,3 +122,17 @@
 
     return sorted(results, key=operator.itemgetter(1), reverse=True)
 
+def month_option_title(month_iso, months, day):
+    month_isos = [ iso_code for (iso_code,name) in months ]
+    try:
+        index = month_isos.index(month_iso)
+    except ValueError:
+        _log.error('Month "%s" not found in list of months.' % month_iso)
+        return month_iso
+    month_name = months[index][1]
+    if index==0:
+        return month_name + (' (up to %s)'%day)
+    return month_name
+
+
+

--- a/ckanext/ga_report/plugin.py
+++ b/ckanext/ga_report/plugin.py
@@ -5,7 +5,8 @@
 
 from ckanext.ga_report.helpers import (most_popular_datasets,
                                        popular_datasets,
-                                       single_popular_dataset)
+                                       single_popular_dataset,
+                                       month_option_title)
 
 log = logging.getLogger('ckanext.ga-report')
 
@@ -27,7 +28,8 @@
             'ga_report_installed': lambda: True,
             'popular_datasets': popular_datasets,
             'most_popular_datasets': most_popular_datasets,
-            'single_popular_dataset': single_popular_dataset
+            'single_popular_dataset': single_popular_dataset,
+            'month_option_title': month_option_title
         }
 
     def after_map(self, map):

--- a/ckanext/ga_report/public/css/ga_report.css
+++ b/ckanext/ga_report/public/css/ga_report.css
@@ -2,6 +2,11 @@
   padding: 1px 0 0 0;
   width: 108px;
   text-align: center;
+  /* Hack to hide the momentary flash of text 
+   * before sparklines are fully rendered */
+  font-size: 1px;
+  color: transparent;
+  overflow: hidden;
 }
 .rickshaw_chart_container {
   position: relative;
@@ -16,16 +21,9 @@
   bottom: 0;
 }
 .rickshaw_legend {
-  position: absolute;
-  right: 0;
-  top: 0;
-  margin-left: 15px;
-  padding: 0 5px;
   background: transparent;
-  max-width: 150px;
-  overflow: hidden;
-  background: rgba(0,0,0,0.05);
-  border-radius:5px;
+  width: 100%;
+  padding-top: 4px;
 }
 .rickshaw_y_axis {
   position: absolute;
@@ -38,4 +36,34 @@
   color: #000000 !important;
   font-weight: normal !important;
 }
+.rickshaw_legend .instructions {
+  color: #000;
+  margin-bottom: 6px;
+}
 
+.rickshaw_legend .line .action {
+  display: none;
+}
+.rickshaw_legend .line .swatch {
+  display: block;
+  float: left;
+}
+.rickshaw_legend .line .label {
+  display: block;
+  white-space: normal;
+  float: left;
+  width: 200px;
+}
+.rickshaw_legend .line .label:hover {
+  text-decoration: underline;
+}
+
+.ga-reports-table .td-numeric {
+  text-align: center;
+}
+.ga-reports-heading {
+  padding-right: 10px;
+  margin-top: 4px;
+  float: left;
+}
+

--- a/ckanext/ga_report/public/scripts/ckanext_ga_reports.js
+++ b/ckanext/ga_report/public/scripts/ckanext_ga_reports.js
@@ -1,12 +1,34 @@
-
 var CKAN = CKAN || {};
 CKAN.GA_Reports = {};
 
 CKAN.GA_Reports.render_rickshaw = function( css_name, data, mode, colorscheme ) {
+    var graphLegends = $('#graph-legend-container');
+
+    function renderError(alertClass,alertText,legendText) {
+        $("#chart_"+css_name)
+          .html( '<div class="alert '+alertClass+'">'+alertText+'</div>')
+          .closest('.rickshaw_chart_container').css('height',50);
+        var myLegend = $('<div id="legend_'+css_name+'"/>')
+          .html(legendText)
+          .appendTo(graphLegends);
+    }
+
+    if (!Modernizr.svg) {
+        renderError('','Your browser does not support vector graphics. No graphs can be rendered.','(Graph cannot be rendered)');
+        return;
+    }
+    if (data.length==0) {
+        renderError('alert-info','There is not enough data to render a graph.','(No graph available)');
+        return
+    }
+    var myLegend = $('<div id="legend_'+css_name+'"/>').appendTo(graphLegends);
+
     var palette = new Rickshaw.Color.Palette( { scheme: colorscheme } );
     $.each(data, function(i, object) {
         object['color'] = palette.color();
     });
+    // Rickshaw renders the legend in reverse order...
+    data.reverse();
 
     var graphElement =  document.querySelector("#chart_"+css_name);
 
@@ -16,27 +38,95 @@
         series: data ,
         height: 328
     });
-    graph.render();
-    var x_axis = new Rickshaw.Graph.Axis.Time( { graph: graph } );
+    var x_axis = new Rickshaw.Graph.Axis.Time( { 
+        graph: graph 
+    } );
     var y_axis = new Rickshaw.Graph.Axis.Y( {
         graph: graph,
         orientation: 'left',
         tickFormat: Rickshaw.Fixtures.Number.formatKMBT,
-        element: document.getElementById('y_axis_'+css_name),
+        element: document.getElementById('y_axis_'+css_name)
     } );
     var legend = new Rickshaw.Graph.Legend( {
         element: document.querySelector('#legend_'+css_name),
         graph: graph
     } );
-    var hoverDetail = new Rickshaw.Graph.HoverDetail( {
+    var shelving = new Rickshaw.Graph.Behavior.Series.Toggle( {
       graph: graph,
-      formatter: function(series, x, y) {
-        var date = '<span class="date">' + new Date(x * 1000).toUTCString() + '</span>';
-        var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
-        var content = swatch + series.name + ": " + parseInt(y) + '<br>' + date;
-        return content;
-      }
+      legend: legend
     } );
+    myLegend.prepend('<div class="instructions">Click on a series below to isolate its graph:</div>');
+    graph.render();
 };
 
+CKAN.GA_Reports.bind_sparklines = function() {
+  /* 
+   * Bind to the 'totals' tab being on screen, when the 
+   * Sparkline graphs should be drawn.
+   * Note that they cannot be drawn sooner.
+   */
+  var created = false;
+  $('a[href="#totals"]').on(
+    'shown', 
+      function() {
+        if (!created) {
+          var sparkOptions = {
+            enableTagOptions: true,
+            type: 'line',
+            width: 100,
+            height: 26,
+            chartRangeMin: 0,
+            spotColor: '',
+            maxSpotColor: '',
+            minSpotColor: '',
+            highlightSpotColor: '#000000',
+            lineColor: '#3F8E6D',
+            fillColor: '#B7E66B'
+          };
+          $('.sparkline').sparkline('html',sparkOptions);
+          created = true;
+        }
+        $.sparkline_display_visible();
+      }
+  );
+};
 
+CKAN.GA_Reports.bind_sidebar = function() {
+  /* 
+   * Bind to changes in the tab behaviour: 
+   * Show the correct rickshaw graph in the sidebar. 
+   * Not to be called before all graphs load.
+   */
+  $('a[data-toggle="hashtab"]').on(
+    'shown',
+    function(e) {
+      var href = $(e.target).attr('href');
+      var pane = $(href);
+      if (!pane.length) { console.err('bad href',href); return; }
+      var legend_name = "none";
+      var graph = pane.find('.rickshaw_chart');
+      if (graph.length) {
+        legend_name = graph.attr('id').replace('chart_','');
+      }
+      legend_name = '#legend_'+legend_name;
+      $('#graph-legend-container > *').hide();
+      $('#graph-legend-container .instructions').show();
+      $(legend_name).show();
+    }
+  );
+  /* The first tab might already have been shown */
+  $('li.active > a[data-toggle="hashtab"]').trigger('shown');
+};
+
+CKAN.GA_Reports.bind_month_selector = function() {
+  var handler = function(e) { 
+    var target = $(e.delegateTarget);
+    var form = target.closest('form');
+    var url = form.attr('action')+'?month='+target.val()+window.location.hash;
+    window.location = url;
+  };
+  var selectors = $('select[name="month"]');
+  assert(selectors.length>0);
+  selectors.bind('change', handler);
+};
+

--- /dev/null
+++ b/ckanext/ga_report/public/scripts/rickshaw_ie7_shim.js
@@ -1,1 +1,109 @@
+/* 
+ * Collection of shims to allow d3 and Rickshaw to load, error-free
+ * (but ultimately unusable) on Internet Explorer 7. The browser's
+ * API lacks several crucial functions which these libraries depend
+ * upon to load; we try to hide these errors from the user.
+ *
+ * With thanks to Array functions from:
+ * http://stackoverflow.com/questions/2790001/fixing-javascript-array-functions-in-internet-explorer-indexof-foreach-etc
+ *
+ * Use (Modernizr.svg==true) to detect whether it's okay to draw a graph.
+ */
+'use strict';
 
+window.Element = window.Element || {'prototype': {}};
+window.CSSStyleDeclaration = window.CSSStyleDeclaration || {'prototype':{}};
+
+// Add ECMA262-5 method binding if not supported natively
+//
+if (!('bind' in Function.prototype)) {
+    Function.prototype.bind= function(owner) {
+        var that= this;
+        if (arguments.length<=1) {
+            return function() {
+                return that.apply(owner, arguments);
+            };
+        } else {
+            var args= Array.prototype.slice.call(arguments, 1);
+            return function() {
+                return that.apply(owner, arguments.length===0? args : args.concat(Array.prototype.slice.call(arguments)));
+            };
+        }
+    };
+}
+
+// Add ECMA262-5 string trim if not supported natively
+//
+if (!('trim' in String.prototype)) {
+    String.prototype.trim= function() {
+        return this.replace(/^\s+/, '').replace(/\s+$/, '');
+    };
+}
+
+// Add ECMA262-5 Array methods if not supported natively
+//
+if (!('indexOf' in Array.prototype)) {
+    Array.prototype.indexOf= function(find, i /*opt*/) {
+        if (i===undefined) i= 0;
+        if (i<0) i+= this.length;
+        if (i<0) i= 0;
+        for (var n= this.length; i<n; i++)
+            if (i in this && this[i]===find)
+                return i;
+        return -1;
+    };
+}
+if (!('lastIndexOf' in Array.prototype)) {
+    Array.prototype.lastIndexOf= function(find, i /*opt*/) {
+        if (i===undefined) i= this.length-1;
+        if (i<0) i+= this.length;