import re |
import re |
import csv |
import csv |
import sys |
import sys |
import json |
import json |
import logging |
import logging |
import operator |
import operator |
import collections |
import collections |
from ckan.lib.base import (BaseController, c, g, render, request, response, abort) |
from ckan.lib.base import (BaseController, c, g, render, request, response, abort) |
|
|
import sqlalchemy |
import sqlalchemy |
from sqlalchemy import func, cast, Integer |
from sqlalchemy import func, cast, Integer |
import ckan.model as model |
import ckan.model as model |
from ga_model import GA_Url, GA_Stat, GA_ReferralStat, GA_Publisher |
from ga_model import GA_Url, GA_Stat, GA_ReferralStat, GA_Publisher |
|
|
log = logging.getLogger('ckanext.ga-report') |
log = logging.getLogger('ckanext.ga-report') |
|
|
DOWNLOADS_AVAILABLE_FROM = '2012-12' |
DOWNLOADS_AVAILABLE_FROM = '2012-12' |
|
|
def _get_month_name(strdate): |
def _get_month_name(strdate): |
import calendar |
import calendar |
from time import strptime |
from time import strptime |
d = strptime(strdate, '%Y-%m') |
d = strptime(strdate, '%Y-%m') |
return '%s %s' % (calendar.month_name[d.tm_mon], d.tm_year) |
return '%s %s' % (calendar.month_name[d.tm_mon], d.tm_year) |
|
|
def _get_unix_epoch(strdate): |
def _get_unix_epoch(strdate): |
from time import strptime,mktime |
from time import strptime,mktime |
d = strptime(strdate, '%Y-%m') |
d = strptime(strdate, '%Y-%m') |
return int(mktime(d)) |
return int(mktime(d)) |
|
|
def _month_details(cls, stat_key=None): |
def _month_details(cls, stat_key=None): |
''' |
''' |
Returns a list of all the periods for which we have data, unfortunately |
Returns a list of all the periods for which we have data, unfortunately |
knows too much about the type of the cls being passed as GA_Url has a |
knows too much about the type of the cls being passed as GA_Url has a |
more complex query |
more complex query |
|
|
This may need extending if we add a period_name to the stats |
This may need extending if we add a period_name to the stats |
''' |
''' |
months = [] |
months = [] |
day = None |
day = None |
|
|
q = model.Session.query(cls.period_name,cls.period_complete_day)\ |
q = model.Session.query(cls.period_name,cls.period_complete_day)\ |
.filter(cls.period_name!='All').distinct(cls.period_name) |
.filter(cls.period_name!='All').distinct(cls.period_name) |
if stat_key: |
if stat_key: |
q= q.filter(cls.stat_name==stat_key) |
q= q.filter(cls.stat_name==stat_key) |
|
|
vals = q.order_by("period_name desc").all() |
vals = q.order_by("period_name desc").all() |
|
|
if vals and vals[0][1]: |
if vals and vals[0][1]: |
day = int(vals[0][1]) |
day = int(vals[0][1]) |
ordinal = 'th' if 11 <= day <= 13 \ |
ordinal = 'th' if 11 <= day <= 13 \ |
else {1:'st',2:'nd',3:'rd'}.get(day % 10, 'th') |
else {1:'st',2:'nd',3:'rd'}.get(day % 10, 'th') |
day = "{day}{ordinal}".format(day=day, ordinal=ordinal) |
day = "{day}{ordinal}".format(day=day, ordinal=ordinal) |
|
|
for m in vals: |
for m in vals: |
months.append( (m[0], _get_month_name(m[0]))) |
months.append( (m[0], _get_month_name(m[0]))) |
|
|
return months, day |
return months, day |
|
|
|
|
class GaReport(BaseController): |
class GaReport(BaseController): |
|
|
def csv(self, month): |
def csv(self, month): |
import csv |
import csv |
|
|
q = model.Session.query(GA_Stat).filter(GA_Stat.stat_name!='Downloads') |
q = model.Session.query(GA_Stat).filter(GA_Stat.stat_name!='Downloads') |
if month != 'all': |
if month != 'all': |
q = q.filter(GA_Stat.period_name==month) |
q = q.filter(GA_Stat.period_name==month) |
entries = q.order_by('GA_Stat.period_name, GA_Stat.stat_name, GA_Stat.key').all() |
entries = q.order_by('GA_Stat.period_name, GA_Stat.stat_name, GA_Stat.key').all() |
|
|
response.headers['Content-Type'] = "text/csv; charset=utf-8" |
response.headers['Content-Type'] = "text/csv; charset=utf-8" |
response.headers['Content-Disposition'] = str('attachment; filename=stats_%s.csv' % (month,)) |
response.headers['Content-Disposition'] = str('attachment; filename=stats_%s.csv' % (month,)) |
|
|
writer = csv.writer(response) |
writer = csv.writer(response) |
writer.writerow(["Period", "Statistic", "Key", "Value"]) |
writer.writerow(["Period", "Statistic", "Key", "Value"]) |
|
|
for entry in entries: |
for entry in entries: |
writer.writerow([entry.period_name.encode('utf-8'), |
writer.writerow([entry.period_name.encode('utf-8'), |
entry.stat_name.encode('utf-8'), |
entry.stat_name.encode('utf-8'), |
entry.key.encode('utf-8'), |
entry.key.encode('utf-8'), |
entry.value.encode('utf-8')]) |
entry.value.encode('utf-8')]) |
|
|
|
|
def index(self): |
def index(self): |
|
|
# Get the month details by fetching distinct values and determining the |
# Get the month details by fetching distinct values and determining the |
# month names from the values. |
# month names from the values. |
c.months, c.day = _month_details(GA_Stat) |
c.months, c.day = _month_details(GA_Stat) |
|
|
# Work out which month to show, based on query params of the first item |
# Work out which month to show, based on query params of the first item |
c.month_desc = 'all months' |
c.month_desc = 'all months' |
c.month = request.params.get('month', '') |
c.month = request.params.get('month', '') |
if c.month: |
if c.month: |
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) |
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) |
|
|
q = model.Session.query(GA_Stat).\ |
q = model.Session.query(GA_Stat).\ |
filter(GA_Stat.stat_name=='Totals') |
filter(GA_Stat.stat_name=='Totals') |
if c.month: |
if c.month: |
q = q.filter(GA_Stat.period_name==c.month) |
q = q.filter(GA_Stat.period_name==c.month) |
entries = q.order_by('ga_stat.key').all() |
entries = q.order_by('ga_stat.key').all() |
|
|
def clean_key(key, val): |
def clean_key(key, val): |
if key in ['Average time on site', 'Pages per visit', 'New visits', 'Bounce rate (home page)']: |
if key in ['Average time on site', 'Pages per visit', 'New visits', 'Bounce rate (home page)']: |
val = "%.2f" % round(float(val), 2) |
val = "%.2f" % round(float(val), 2) |
if key == 'Average time on site': |
if key == 'Average time on site': |
mins, secs = divmod(float(val), 60) |
mins, secs = divmod(float(val), 60) |
hours, mins = divmod(mins, 60) |
hours, mins = divmod(mins, 60) |
val = '%02d:%02d:%02d (%s seconds) ' % (hours, mins, secs, val) |
val = '%02d:%02d:%02d (%s seconds) ' % (hours, mins, secs, val) |
if key in ['New visits','Bounce rate (home page)']: |
if key in ['New visits','Bounce rate (home page)']: |
val = "%s%%" % val |
val = "%s%%" % val |
if key in ['Total page views', 'Total visits']: |
if key in ['Total page views', 'Total visits']: |
val = int(val) |
val = int(val) |
|
|
return key, val |
return key, val |
|
|
# Query historic values for sparkline rendering |
# Query historic values for sparkline rendering |
sparkline_query = model.Session.query(GA_Stat)\ |
sparkline_query = model.Session.query(GA_Stat)\ |
.filter(GA_Stat.stat_name=='Totals')\ |
.filter(GA_Stat.stat_name=='Totals')\ |
.order_by(GA_Stat.period_name) |
.order_by(GA_Stat.period_name) |
sparkline_data = {} |
sparkline_data = {} |
for x in sparkline_query: |
for x in sparkline_query: |
sparkline_data[x.key] = sparkline_data.get(x.key,[]) |
sparkline_data[x.key] = sparkline_data.get(x.key,[]) |
key, val = clean_key(x.key,float(x.value)) |
key, val = clean_key(x.key,float(x.value)) |
tooltip = '%s: %s' % (_get_month_name(x.period_name), val) |
tooltip = '%s: %s' % (_get_month_name(x.period_name), val) |
sparkline_data[x.key].append( (tooltip,x.value) ) |
sparkline_data[x.key].append( (tooltip,x.value) ) |
# Trim the latest month, as it looks like a huge dropoff |
# Trim the latest month, as it looks like a huge dropoff |
for key in sparkline_data: |
for key in sparkline_data: |
sparkline_data[key] = sparkline_data[key][:-1] |
sparkline_data[key] = sparkline_data[key][:-1] |
|
|
c.global_totals = [] |
c.global_totals = [] |
if c.month: |
if c.month: |
for e in entries: |
for e in entries: |
key, val = clean_key(e.key, e.value) |
key, val = clean_key(e.key, e.value) |
sparkline = sparkline_data[e.key] |
sparkline = sparkline_data[e.key] |
c.global_totals.append((key, val, sparkline)) |
c.global_totals.append((key, val, sparkline)) |
else: |
else: |
d = collections.defaultdict(list) |
d = collections.defaultdict(list) |
for e in entries: |
for e in entries: |
d[e.key].append(float(e.value)) |
d[e.key].append(float(e.value)) |
for k, v in d.iteritems(): |
for k, v in d.iteritems(): |
if k in ['Total page views', 'Total visits']: |
if k in ['Total page views', 'Total visits']: |
v = sum(v) |
v = sum(v) |
else: |
else: |
v = float(sum(v))/float(len(v)) |
v = float(sum(v))/float(len(v)) |
sparkline = sparkline_data[k] |
sparkline = sparkline_data[k] |
key, val = clean_key(k,v) |
key, val = clean_key(k,v) |
|
|
c.global_totals.append((key, val, sparkline)) |
c.global_totals.append((key, val, sparkline)) |
# Sort the global totals into a more pleasant order |
# Sort the global totals into a more pleasant order |
def sort_func(x): |
def sort_func(x): |
key = x[0] |
key = x[0] |
total_order = ['Total page views','Total visits','Pages per visit'] |
total_order = ['Total page views','Total visits','Pages per visit'] |
if key in total_order: |
if key in total_order: |
return total_order.index(key) |
return total_order.index(key) |
return 999 |
return 999 |
c.global_totals = sorted(c.global_totals, key=sort_func) |
c.global_totals = sorted(c.global_totals, key=sort_func) |
|
|
keys = { |
keys = { |
'Browser versions': 'browser_versions', |
'Browser versions': 'browser_versions', |
'Browsers': 'browsers', |
'Browsers': 'browsers', |
'Operating Systems versions': 'os_versions', |
'Operating Systems versions': 'os_versions', |
'Operating Systems': 'os', |
'Operating Systems': 'os', |
'Social sources': 'social_networks', |
'Social sources': 'social_networks', |
'Languages': 'languages', |
'Languages': 'languages', |
'Country': 'country' |