import logging | import logging |
import datetime | import datetime |
import os | import os |
from pylons import config | from pylons import config |
from ckan.lib.cli import CkanCommand | from ckan.lib.cli import CkanCommand |
# No other CKAN imports allowed until _load_config is run, | # No other CKAN imports allowed until _load_config is run, |
# or logging is disabled | # or logging is disabled |
class InitDB(CkanCommand): | class InitDB(CkanCommand): |
"""Initialise the extension's database tables | """Initialise the extension's database tables |
""" | """ |
summary = __doc__.split('\n')[0] | summary = __doc__.split('\n')[0] |
usage = __doc__ | usage = __doc__ |
max_args = 0 | max_args = 0 |
min_args = 0 | min_args = 0 |
def command(self): | def command(self): |
self._load_config() | self._load_config() |
import ckan.model as model | import ckan.model as model |
model.Session.remove() | model.Session.remove() |
model.Session.configure(bind=model.meta.engine) | model.Session.configure(bind=model.meta.engine) |
log = logging.getLogger('ckanext.ga_report') | log = logging.getLogger('ckanext.ga_report') |
import ga_model | import ga_model |
ga_model.init_tables() | ga_model.init_tables() |
log.info("DB tables are setup") | log.info("DB tables are setup") |
class GetAuthToken(CkanCommand): | class GetAuthToken(CkanCommand): |
""" Get's the Google auth token | """ Get's the Google auth token |
Usage: paster getauthtoken <credentials_file> | Usage: paster getauthtoken <credentials_file> |
Where <credentials_file> is the file name containing the details | Where <credentials_file> is the file name containing the details |
for the service (obtained from https://code.google.com/apis/console). | for the service (obtained from https://code.google.com/apis/console). |
By default this is set to credentials.json | By default this is set to credentials.json |
""" | """ |
summary = __doc__.split('\n')[0] | summary = __doc__.split('\n')[0] |
usage = __doc__ | usage = __doc__ |
max_args = 0 | max_args = 0 |
min_args = 0 | min_args = 0 |
def command(self): | def command(self): |
""" | """ |
In this case we don't want a valid service, but rather just to | In this case we don't want a valid service, but rather just to |
force the user through the auth flow. We allow this to complete to | force the user through the auth flow. We allow this to complete to |
act as a form of verification instead of just getting the token and | act as a form of verification instead of just getting the token and |
assuming it is correct. | assuming it is correct. |
""" | """ |
from ga_auth import init_service | from ga_auth import init_service |
init_service('token.dat', | init_service('token.dat', |
self.args[0] if self.args | self.args[0] if self.args |
else 'credentials.json') | else 'credentials.json') |
class FixTimePeriods(CkanCommand): | class FixTimePeriods(CkanCommand): |
""" | """ |
Fixes the 'All' records for GA_Urls | Fixes the 'All' records for GA_Urls |
It is possible that older urls that haven't recently been visited | It is possible that older urls that haven't recently been visited |
do not have All records. This command will traverse through those | do not have All records. This command will traverse through those |
records and generate valid All records for them. | records and generate valid All records for them. |
""" | """ |
summary = __doc__.split('\n')[0] | summary = __doc__.split('\n')[0] |
usage = __doc__ | usage = __doc__ |
max_args = 0 | max_args = 0 |
min_args = 0 | min_args = 0 |
def __init__(self, name): | def __init__(self, name): |
super(FixTimePeriods, self).__init__(name) | super(FixTimePeriods, self).__init__(name) |
def command(self): | def command(self): |
import ckan.model as model | import ckan.model as model |
from ga_model import post_update_url_stats | from ga_model import post_update_url_stats |
self._load_config() | self._load_config() |
model.Session.remove() | model.Session.remove() |
model.Session.configure(bind=model.meta.engine) | model.Session.configure(bind=model.meta.engine) |
log = logging.getLogger('ckanext.ga_report') | log = logging.getLogger('ckanext.ga_report') |
log.info("Updating 'All' records for old URLs") | log.info("Updating 'All' records for old URLs") |
post_update_url_stats() | post_update_url_stats() |
log.info("Processing complete") | log.info("Processing complete") |
class LoadAnalytics(CkanCommand): | class LoadAnalytics(CkanCommand): |
"""Get data from Google Analytics API and save it | """Get data from Google Analytics API and save it |
in the ga_model | in the ga_model |
Usage: paster loadanalytics <time-period> | Usage: paster loadanalytics <time-period> |
Where <time-period> is: | Where <time-period> is: |
all - data for all time | all - data for all time |
latest - (default) just the 'latest' data | latest - (default) just the 'latest' data |
YYYY-MM - just data for the specific month | YYYY-MM - just data for the specific month |
""" | """ |
summary = __doc__.split('\n')[0] | summary = __doc__.split('\n')[0] |
usage = __doc__ | usage = __doc__ |
max_args = 1 | max_args = 1 |
min_args = 0 | min_args = 0 |
def __init__(self, name): | def __init__(self, name): |
super(LoadAnalytics, self).__init__(name) | super(LoadAnalytics, self).__init__(name) |
self.parser.add_option('-d', '--delete-first', | self.parser.add_option('-d', '--delete-first', |
action='store_true', | action='store_true', |
default=False, | default=False, |
dest='delete_first', | dest='delete_first', |
help='Delete data for the period first') | help='Delete data for the period first') |
self.parser.add_option('-s', '--skip_url_stats', | self.parser.add_option('-s', '--skip_url_stats', |
action='store_true', | action='store_true', |
default=False, | default=False, |
dest='skip_url_stats', | dest='skip_url_stats', |
help='Skip the download of URL data - just do site-wide stats') | help='Skip the download of URL data - just do site-wide stats') |
self.token = "" | |
def command(self): | def command(self): |
self._load_config() | self._load_config() |
from download_analytics import DownloadAnalytics | from download_analytics import DownloadAnalytics |
from ga_auth import (init_service, get_profile_id) | from ga_auth import (init_service, get_profile_id) |
ga_token_filepath = os.path.expanduser(config.get('googleanalytics.token.filepath', '')) | ga_token_filepath = os.path.expanduser(config.get('googleanalytics.token.filepath', '')) |
if not ga_token_filepath: | if not ga_token_filepath: |
print 'ERROR: In the CKAN config you need to specify the filepath of the ' \ | print 'ERROR: In the CKAN config you need to specify the filepath of the ' \ |
'Google Analytics token file under key: googleanalytics.token.filepath' | 'Google Analytics token file under key: googleanalytics.token.filepath' |
return | return |
try: | try: |
svc = init_service(ga_token_filepath, None) | self.token, svc = init_service(ga_token_filepath, None) |
except TypeError: | except TypeError: |
print ('Have you correctly run the getauthtoken task and ' | print ('Have you correctly run the getauthtoken task and ' |
'specified the correct token file in the CKAN config under ' | 'specified the correct token file in the CKAN config under ' |
'"googleanalytics.token.filepath"?') | '"googleanalytics.token.filepath"?') |
return | return |
downloader = DownloadAnalytics(svc, profile_id=get_profile_id(svc), | downloader = DownloadAnalytics(svc, self.token, profile_id=get_profile_id(svc), |
delete_first=self.options.delete_first, | delete_first=self.options.delete_first, |
skip_url_stats=self.options.skip_url_stats) | skip_url_stats=self.options.skip_url_stats) |
time_period = self.args[0] if self.args else 'latest' | time_period = self.args[0] if self.args else 'latest' |
if time_period == 'all': | if time_period == 'all': |
downloader.all_() | downloader.all_() |
elif time_period == 'latest': | elif time_period == 'latest': |
downloader.latest() | downloader.latest() |
else: | else: |
# The month to use | # The month to use |
for_date = datetime.datetime.strptime(time_period, '%Y-%m') | for_date = datetime.datetime.strptime(time_period, '%Y-%m') |
downloader.specific_month(for_date) | downloader.specific_month(for_date) |
import re | import re |
import csv | import csv |
import sys | import sys |
import json | import json |
import logging | import logging |
import operator | import operator |
import collections | import collections |
from ckan.lib.base import (BaseController, c, g, render, request, response, abort) | from ckan.lib.base import (BaseController, c, g, render, request, response, abort) |
import sqlalchemy | import sqlalchemy |
from sqlalchemy import func, cast, Integer | from sqlalchemy import func, cast, Integer |
import ckan.model as model | import ckan.model as model |
from ga_model import GA_Url, GA_Stat, GA_ReferralStat, GA_Publisher | from ga_model import GA_Url, GA_Stat, GA_ReferralStat, GA_Publisher |
log = logging.getLogger('ckanext.ga-report') | log = logging.getLogger('ckanext.ga-report') |
DOWNLOADS_AVAILABLE_FROM = '2012-12' | DOWNLOADS_AVAILABLE_FROM = '2012-12' |
def _get_month_name(strdate): | def _get_month_name(strdate): |
import calendar | import calendar |
from time import strptime | from time import strptime |
d = strptime(strdate, '%Y-%m') | d = strptime(strdate, '%Y-%m') |
return '%s %s' % (calendar.month_name[d.tm_mon], d.tm_year) | return '%s %s' % (calendar.month_name[d.tm_mon], d.tm_year) |
def _get_unix_epoch(strdate): | def _get_unix_epoch(strdate): |
from time import strptime,mktime | from time import strptime,mktime |
d = strptime(strdate, '%Y-%m') | d = strptime(strdate, '%Y-%m') |
return int(mktime(d)) | return int(mktime(d)) |
def _month_details(cls, stat_key=None): | def _month_details(cls, stat_key=None): |
''' | ''' |
Returns a list of all the periods for which we have data, unfortunately | Returns a list of all the periods for which we have data, unfortunately |
knows too much about the type of the cls being passed as GA_Url has a | knows too much about the type of the cls being passed as GA_Url has a |
more complex query | more complex query |
This may need extending if we add a period_name to the stats | This may need extending if we add a period_name to the stats |
''' | ''' |
months = [] | months = [] |
day = None | day = None |
q = model.Session.query(cls.period_name,cls.period_complete_day)\ | q = model.Session.query(cls.period_name,cls.period_complete_day)\ |
.filter(cls.period_name!='All').distinct(cls.period_name) | .filter(cls.period_name!='All').distinct(cls.period_name) |
if stat_key: | if stat_key: |
q= q.filter(cls.stat_name==stat_key) | q= q.filter(cls.stat_name==stat_key) |
vals = q.order_by("period_name desc").all() | vals = q.order_by("period_name desc").all() |
if vals and vals[0][1]: | if vals and vals[0][1]: |
day = int(vals[0][1]) | day = int(vals[0][1]) |
ordinal = 'th' if 11 <= day <= 13 \ | ordinal = 'th' if 11 <= day <= 13 \ |
else {1:'st',2:'nd',3:'rd'}.get(day % 10, 'th') | else {1:'st',2:'nd',3:'rd'}.get(day % 10, 'th') |
day = "{day}{ordinal}".format(day=day, ordinal=ordinal) | day = "{day}{ordinal}".format(day=day, ordinal=ordinal) |
for m in vals: | for m in vals: |
months.append( (m[0], _get_month_name(m[0]))) | months.append( (m[0], _get_month_name(m[0]))) |
return months, day | return months, day |
class GaReport(BaseController): | class GaReport(BaseController): |
def csv(self, month): | def csv(self, month): |
import csv | import csv |
q = model.Session.query(GA_Stat).filter(GA_Stat.stat_name!='Downloads') | q = model.Session.query(GA_Stat).filter(GA_Stat.stat_name!='Downloads') |
if month != 'all': | if month != 'all': |
q = q.filter(GA_Stat.period_name==month) | q = q.filter(GA_Stat.period_name==month) |
entries = q.order_by('GA_Stat.period_name, GA_Stat.stat_name, GA_Stat.key').all() | entries = q.order_by('GA_Stat.period_name, GA_Stat.stat_name, GA_Stat.key').all() |
response.headers['Content-Type'] = "text/csv; charset=utf-8" | response.headers['Content-Type'] = "text/csv; charset=utf-8" |
response.headers['Content-Disposition'] = str('attachment; filename=stats_%s.csv' % (month,)) | response.headers['Content-Disposition'] = str('attachment; filename=stats_%s.csv' % (month,)) |
writer = csv.writer(response) | writer = csv.writer(response) |
writer.writerow(["Period", "Statistic", "Key", "Value"]) | writer.writerow(["Period", "Statistic", "Key", "Value"]) |
for entry in entries: | for entry in entries: |
writer.writerow([entry.period_name.encode('utf-8'), | writer.writerow([entry.period_name.encode('utf-8'), |
entry.stat_name.encode('utf-8'), | entry.stat_name.encode('utf-8'), |
entry.key.encode('utf-8'), | entry.key.encode('utf-8'), |
entry.value.encode('utf-8')]) | entry.value.encode('utf-8')]) |
def index(self): | def index(self): |
# Get the month details by fetching distinct values and determining the | # Get the month details by fetching distinct values and determining the |
# month names from the values. | # month names from the values. |
c.months, c.day = _month_details(GA_Stat) | c.months, c.day = _month_details(GA_Stat) |
# Work out which month to show, based on query params of the first item | # Work out which month to show, based on query params of the first item |
c.month_desc = 'all months' | c.month_desc = 'all months' |
c.month = request.params.get('month', '') | c.month = request.params.get('month', '') |
if c.month: | if c.month: |
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) | c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) |
q = model.Session.query(GA_Stat).\ | q = model.Session.query(GA_Stat).\ |
filter(GA_Stat.stat_name=='Totals') | filter(GA_Stat.stat_name=='Totals') |
if c.month: | if c.month: |
q = q.filter(GA_Stat.period_name==c.month) | q = q.filter(GA_Stat.period_name==c.month) |
entries = q.order_by('ga_stat.key').all() | entries = q.order_by('ga_stat.key').all() |
def clean_key(key, val): | def clean_key(key, val): |
if key in ['Average time on site', 'Pages per visit', 'New visits', 'Bounce rate (home page)']: | if key in ['Average time on site', 'Pages per visit', 'New visits', 'Bounce rate (home page)']: |
val = "%.2f" % round(float(val), 2) | val = "%.2f" % round(float(val), 2) |
if key == 'Average time on site': | if key == 'Average time on site': |
mins, secs = divmod(float(val), 60) | mins, secs = divmod(float(val), 60) |
hours, mins = divmod(mins, 60) | hours, mins = divmod(mins, 60) |
val = '%02d:%02d:%02d (%s seconds) ' % (hours, mins, secs, val) | val = '%02d:%02d:%02d (%s seconds) ' % (hours, mins, secs, val) |
if key in ['New visits','Bounce rate (home page)']: | if key in ['New visits','Bounce rate (home page)']: |
val = "%s%%" % val | val = "%s%%" % val |
if key in ['Total page views', 'Total visits']: | if key in ['Total page views', 'Total visits']: |
val = int(val) | val = int(val) |
return key, val | return key, val |
# Query historic values for sparkline rendering | # Query historic values for sparkline rendering |
sparkline_query = model.Session.query(GA_Stat)\ | sparkline_query = model.Session.query(GA_Stat)\ |
.filter(GA_Stat.stat_name=='Totals')\ | .filter(GA_Stat.stat_name=='Totals')\ |
.order_by(GA_Stat.period_name) | .order_by(GA_Stat.period_name) |
sparkline_data = {} | sparkline_data = {} |
for x in sparkline_query: | for x in sparkline_query: |
sparkline_data[x.key] = sparkline_data.get(x.key,[]) | sparkline_data[x.key] = sparkline_data.get(x.key,[]) |
key, val = clean_key(x.key,float(x.value)) | key, val = clean_key(x.key,float(x.value)) |
tooltip = '%s: %s' % (_get_month_name(x.period_name), val) | tooltip = '%s: %s' % (_get_month_name(x.period_name), val) |
sparkline_data[x.key].append( (tooltip,x.value) ) | sparkline_data[x.key].append( (tooltip,x.value) ) |
# Trim the latest month, as it looks like a huge dropoff | # Trim the latest month, as it looks like a huge dropoff |
for key in sparkline_data: | for key in sparkline_data: |
sparkline_data[key] = sparkline_data[key][:-1] | sparkline_data[key] = sparkline_data[key][:-1] |
c.global_totals = [] | c.global_totals = [] |
if c.month: | if c.month: |
for e in entries: | for e in entries: |
key, val = clean_key(e.key, e.value) | key, val = clean_key(e.key, e.value) |
sparkline = sparkline_data[e.key] | sparkline = sparkline_data[e.key] |
c.global_totals.append((key, val, sparkline)) | c.global_totals.append((key, val, sparkline)) |
else: | else: |
d = collections.defaultdict(list) | d = collections.defaultdict(list) |
for e in entries: | for e in entries: |
d[e.key].append(float(e.value)) | d[e.key].append(float(e.value)) |
for k, v in d.iteritems(): | for k, v in d.iteritems(): |
if k in ['Total page views', 'Total visits']: | if k in ['Total page views', 'Total visits']: |
v = sum(v) | v = sum(v) |
else: | else: |
v = float(sum(v))/float(len(v)) | v = float(sum(v))/float(len(v)) |
sparkline = sparkline_data[k] | sparkline = sparkline_data[k] |
key, val = clean_key(k,v) | key, val = clean_key(k,v) |
c.global_totals.append((key, val, sparkline)) | c.global_totals.append((key, val, sparkline)) |
# Sort the global totals into a more pleasant order | # Sort the global totals into a more pleasant order |
def sort_func(x): | def sort_func(x): |
key = x[0] | key = x[0] |
total_order = ['Total page views','Total visits','Pages per visit'] | total_order = ['Total page views','Total visits','Pages per visit'] |
if key in total_order: | if key in total_order: |
return total_order.index(key) | return total_order.index(key) |
return 999 | return 999 |
c.global_totals = sorted(c.global_totals, key=sort_func) | c.global_totals = sorted(c.global_totals, key=sort_func) |
keys = { | keys = { |
'Browser versions': 'browser_versions', | 'Browser versions': 'browser_versions', |
'Browsers': 'browsers', | 'Browsers': 'browsers', |
'Operating Systems versions': 'os_versions', | 'Operating Systems versions': 'os_versions', |
'Operating Systems': 'os', | 'Operating Systems': 'os', |
'Social sources': 'social_networks', | 'Social sources': 'social_networks', |
'Languages': 'languages', | 'Languages': 'languages', |
'Country': 'country' | 'Country': 'country' |
} | } |
def shorten_name(name, length=60): | def shorten_name(name, length=60): |
return (name[:length] + '..') if len(name) > 60 else name | return (name[:length] + '..') if len(name) > 60 else name |
def fill_out_url(url): | def fill_out_url(url): |
import urlparse | import urlparse |
return urlparse.urljoin(g.site_url, url) | return urlparse.urljoin(g.site_url, url) |
c.social_referrer_totals, c.social_referrers = [], [] | c.social_referrer_totals, c.social_referrers = [], [] |
q = model.Session.query(GA_ReferralStat) | q = model.Session.query(GA_ReferralStat) |
q = q.filter(GA_ReferralStat.period_name==c.month) if c.month else q | q = q.filter(GA_ReferralStat.period_name==c.month) if c.month else q |
q = q.order_by('ga_referrer.count::int desc') | q = q.order_by('ga_referrer.count::int desc') |
for entry in q.all(): | for entry in q.all(): |
c.social_referrers.append((shorten_name(entry.url), fill_out_url(entry.url), | c.social_referrers.append((shorten_name(entry.url), fill_out_url(entry.url), |
entry.source,entry.count)) | entry.source,entry.count)) |
q = model.Session.query(GA_ReferralStat.url, | q = model.Session.query(GA_ReferralStat.url, |
func.sum(GA_ReferralStat.count).label('count')) | func.sum(GA_ReferralStat.count).label('count')) |
q = q.filter(GA_ReferralStat.period_name==c.month) if c.month else q | q = q.filter(GA_ReferralStat.period_name==c.month) if c.month else q |
q = q.order_by('count desc').group_by(GA_ReferralStat.url) | q = q.order_by('count desc').group_by(GA_ReferralStat.url) |
for entry in q.all(): | for entry in q.all(): |
c.social_referrer_totals.append((shorten_name(entry[0]), fill_out_url(entry[0]),'', | c.social_referrer_totals.append((shorten_name(entry[0]), fill_out_url(entry[0]),'', |
entry[1])) | entry[1])) |
for k, v in keys.iteritems(): | for k, v in keys.iteritems(): |
q = model.Session.query(GA_Stat).\ | q = model.Session.query(GA_Stat).\ |
filter(GA_Stat.stat_name==k).\ | filter(GA_Stat.stat_name==k).\ |
order_by(GA_Stat.period_name) | order_by(GA_Stat.period_name) |
# Buffer the tabular data | # Buffer the tabular data |
if c.month: | if c.month: |
entries = [] | entries = [] |
q = q.filter(GA_Stat.period_name==c.month).\ | q = q.filter(GA_Stat.period_name==c.month).\ |
order_by('ga_stat.value::int desc') | order_by('ga_stat.value::int desc') |
d = collections.defaultdict(int) | d = collections.defaultdict(int) |
for e in q.all(): | for e in q.all(): |
d[e.key] += int(e.value) | d[e.key] += int(e.value) |
entries = [] | entries = [] |
for key, val in d.iteritems(): | for key, val in d.iteritems(): |
entries.append((key,val,)) | entries.append((key,val,)) |
entries = sorted(entries, key=operator.itemgetter(1), reverse=True) | entries = sorted(entries, key=operator.itemgetter(1), reverse=True) |
# Run a query on all months to gather graph data | # Run a query on all months to gather graph data |
graph_query = model.Session.query(GA_Stat).\ | graph_query = model.Session.query(GA_Stat).\ |
filter(GA_Stat.stat_name==k).\ | filter(GA_Stat.stat_name==k).\ |
order_by(GA_Stat.period_name) | order_by(GA_Stat.period_name) |
graph_dict = {} | graph_dict = {} |
for stat in graph_query: | for stat in graph_query: |
graph_dict[ stat.key ] = graph_dict.get(stat.key,{ | graph_dict[ stat.key ] = graph_dict.get(stat.key,{ |
'name':stat.key, | 'name':stat.key, |
'raw': {} | 'raw': {} |
}) | }) |
graph_dict[ stat.key ]['raw'][stat.period_name] = float(stat.value) | graph_dict[ stat.key ]['raw'][stat.period_name] = float(stat.value) |
stats_in_table = [x[0] for x in entries] | stats_in_table = [x[0] for x in entries] |
stats_not_in_table = set(graph_dict.keys()) - set(stats_in_table) | stats_not_in_table = set(graph_dict.keys()) - set(stats_in_table) |
stats = stats_in_table + sorted(list(stats_not_in_table)) | stats = stats_in_table + sorted(list(stats_not_in_table)) |
graph = [graph_dict[x] for x in stats] | graph = [graph_dict[x] for x in stats] |
setattr(c, v+'_graph', json.dumps( _to_rickshaw(graph,percentageMode=True) )) | setattr(c, v+'_graph', json.dumps( _to_rickshaw(graph,percentageMode=True) )) |
# Get the total for each set of values and then set the value as | # Get the total for each set of values and then set the value as |
# a percentage of the total | # a percentage of the total |
if k == 'Social sources': | if k == 'Social sources': |
total = sum([x for n,x,graph in c.global_totals if n == 'Total visits']) | total = sum([x for n,x,graph in c.global_totals if n == 'Total visits']) |
else: | else: |
total = sum([num for _,num in entries]) | total = sum([num for _,num in entries]) |
setattr(c, v, [(k,_percent(v,total)) for k,v in entries ]) | setattr(c, v, [(k,_percent(v,total)) for k,v in entries ]) |
return render('ga_report/site/index.html') | return render('ga_report/site/index.html') |
class GaDatasetReport(BaseController): | class GaDatasetReport(BaseController): |
""" | """ |
Displays the pageview and visit count for datasets | Displays the pageview and visit count for datasets |
with options to filter by publisher and time period. | with options to filter by publisher and time period. |
""" | """ |
def publisher_csv(self, month): | def publisher_csv(self, month): |
''' | ''' |
Returns a CSV of each publisher with the total number of dataset | Returns a CSV of each publisher with the total number of dataset |
views & visits. | views & visits. |
''' | ''' |
c.month = month if not month == 'all' else '' | c.month = month if not month == 'all' else '' |
response.headers['Content-Type'] = "text/csv; charset=utf-8" | response.headers['Content-Type'] = "text/csv; charset=utf-8" |
response.headers['Content-Disposition'] = str('attachment; filename=publishers_%s.csv' % (month,)) | response.headers['Content-Disposition'] = str('attachment; filename=publishers_%s.csv' % (month,)) |
writer = csv.writer(response) | writer = csv.writer(response) |
writer.writerow(["Publisher Title", "Publisher Name", "Views", "Visits", "Period Name"]) | writer.writerow(["Publisher Title", "Publisher Name", "Views", "Visits", "Period Name"]) |
top_publishers = _get_top_publishers(limit=None) | top_publishers = _get_top_publishers(limit=None) |
for publisher,view,visit in top_publishers: | for publisher,view,visit in top_publishers: |
writer.writerow([publisher.title.encode('utf-8'), | writer.writerow([publisher.title.encode('utf-8'), |
publisher.name.encode('utf-8'), | publisher.name.encode('utf-8'), |
view, | view, |
visit, | visit, |
month]) | month]) |
def dataset_csv(self, id='all', month='all'): | def dataset_csv(self, id='all', month='all'): |
''' | ''' |
Returns a CSV with the number of views & visits for each dataset. | Returns a CSV with the number of views & visits for each dataset. |
:param id: A Publisher ID or None if you want for all | :param id: A Publisher ID or None if you want for all |
:param month: The time period, or 'all' | :param month: The time period, or 'all' |
''' | ''' |
c.month = month if not month == 'all' else '' | c.month = month if not month == 'all' else '' |
if id != 'all': | if id != 'all': |
c.publisher = model.Group.get(id) | c.publisher = model.Group.get(id) |
if not c.publisher: | if not c.publisher: |
abort(404, 'A publisher with that name could not be found') | abort(404, 'A publisher with that name could not be found') |
packages = self._get_packages(publisher=c.publisher, month=c.month) | packages = self._get_packages(publisher=c.publisher, month=c.month) |
response.headers['Content-Type'] = "text/csv; charset=utf-8" | response.headers['Content-Type'] = "text/csv; charset=utf-8" |
response.headers['Content-Disposition'] = \ | response.headers['Content-Disposition'] = \ |
str('attachment; filename=datasets_%s_%s.csv' % (c.publisher_name, month,)) | str('attachment; filename=datasets_%s_%s.csv' % (c.publisher_name, month,)) |
writer = csv.writer(response) | writer = csv.writer(response) |
writer.writerow(["Dataset Title", "Dataset Name", "Views", "Visits", "Resource downloads", "Period Name"]) | writer.writerow(["Dataset Title", "Dataset Name", "Views", "Visits", "Resource downloads", "Period Name"]) |
for package,view,visit,downloads in packages: | for package,view,visit,downloads in packages: |
writer.writerow([package.title.encode('utf-8'), | writer.writerow([package.title.encode('utf-8'), |
package.name.encode('utf-8'), | package.name.encode('utf-8'), |
view, | view, |
visit, | visit, |
downloads, | downloads, |
month]) | month]) |
def publishers(self): | def publishers(self): |
'''A list of publishers and the number of views/visits for each''' | '''A list of publishers and the number of views/visits for each''' |
# Get the month details by fetching distinct values and determining the | # Get the month details by fetching distinct values and determining the |
# month names from the values. | # month names from the values. |
c.months, c.day = _month_details(GA_Url) | c.months, c.day = _month_details(GA_Url) |
# Work out which month to show, based on query params of the first item | # Work out which month to show, based on query params of the first item |
c.month = request.params.get('month', '') | c.month = request.params.get('month', '') |
c.month_desc = 'all months' | c.month_desc = 'all months' |
if c.month: | if c.month: |
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) | c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) |
c.top_publishers = _get_top_publishers() | c.top_publishers = _get_top_publishers() |
graph_data = _get_top_publishers_graph() | graph_data = _get_top_publishers_graph() |
c.top_publishers_graph = json.dumps( _to_rickshaw(graph_data) ) | c.top_publishers_graph = json.dumps( _to_rickshaw(graph_data) ) |
return render('ga_report/publisher/index.html') | x = render('ga_report/publisher/index.html') |
return x | |
def _get_packages(self, publisher=None, month='', count=-1): | def _get_packages(self, publisher=None, month='', count=-1): |
'''Returns the datasets in order of views''' | '''Returns the datasets in order of views''' |
have_download_data = True | have_download_data = True |
month = month or 'All' | month = month or 'All' |
if month != 'All': | if month != 'All': |
have_download_data = month >= DOWNLOADS_AVAILABLE_FROM | have_download_data = month >= DOWNLOADS_AVAILABLE_FROM |
q = model.Session.query(GA_Url,model.Package)\ | q = model.Session.query(GA_Url,model.Package)\ |
.filter(model.Package.name==GA_Url.package_id)\ | .filter(model.Package.name==GA_Url.package_id)\ |
.filter(GA_Url.url.like('/dataset/%')) | .filter(GA_Url.url.like('/dataset/%')) |
if publisher: | if publisher: |
q = q.filter(GA_Url.department_id==publisher.name) | q = q.filter(GA_Url.department_id==publisher.name) |
q = q.filter(GA_Url.period_name==month) | q = q.filter(GA_Url.period_name==month) |
q = q.order_by('ga_url.pageviews::int desc') | q = q.order_by('ga_url.pageviews::int desc') |
top_packages = [] | top_packages = [] |
if count == -1: | if count == -1: |
entries = q.all() | entries = q.all() |
else: | else: |
entries = q.limit(count) | entries = q.limit(count) |
for entry,package in entries: | for entry,package in entries: |
if package: | if package: |
# Downloads .... | # Downloads .... |
if have_download_data: | if have_download_data: |
dls = model.Session.query(GA_Stat).\ | dls = model.Session.query(GA_Stat).\ |
filter(GA_Stat.stat_name=='Downloads').\ | filter(GA_Stat.stat_name=='Downloads').\ |
filter(GA_Stat.key==package.name) | filter(GA_Stat.key==package.name) |
if month != 'All': # Fetch everything unless the month is specific | if month != 'All': # Fetch everything unless the month is specific |
dls = dls.filter(GA_Stat.period_name==month) | dls = dls.filter(GA_Stat.period_name==month) |
downloads = 0 | downloads = 0 |
for x in dls: | for x in dls: |
downloads += int(x.value) | downloads += int(x.value) |
else: | else: |
downloads = 'No data' | downloads = 'No data' |
top_packages.append((package, entry.pageviews, entry.visits, downloads)) | top_packages.append((package, entry.pageviews, entry.visits, downloads)) |
else: | else: |
log.warning('Could not find package associated package') | log.warning('Could not find package associated package') |
return top_packages | return top_packages |
def read(self): | def read(self): |
''' | ''' |
Lists the most popular datasets across all publishers | Lists the most popular datasets across all publishers |
''' | ''' |
return self.read_publisher(None) | return self.read_publisher(None) |
def read_publisher(self, id): | def read_publisher(self, id): |
''' | ''' |
Lists the most popular datasets for a publisher (or across all publishers) | Lists the most popular datasets for a publisher (or across all publishers) |
''' | ''' |
count = 20 | count = 20 |
c.publishers = _get_publishers() | c.publishers = _get_publishers() |
id = request.params.get('publisher', id) | id = request.params.get('publisher', id) |
if id and id != 'all': | if id and id != 'all': |
c.publisher = model.Group.get(id) | c.publisher = model.Group.get(id) |
if not c.publisher: | if not c.publisher: |
abort(404, 'A publisher with that name could not be found') | abort(404, 'A publisher with that name could not be found') |
c.publisher_name = c.publisher.name | c.publisher_name = c.publisher.name |
c.top_packages = [] # package, dataset_views in c.top_packages | c.top_packages = [] # package, dataset_views in c.top_packages |
# Get the month details by fetching distinct values and determining the | # Get the month details by fetching distinct values and determining the |
# month names from the values. | # month names from the values. |
c.months, c.day = _month_details(GA_Url) | c.months, c.day = _month_details(GA_Url) |
# Work out which month to show, based on query params of the first item | # Work out which month to show, based on query params of the first item |
c.month = request.params.get('month', '') | c.month = request.params.get('month', '') |
if not c.month: | if not c.month: |
c.month_desc = 'all months' | c.month_desc = 'all months' |
else: | else: |
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) | c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) |
month = c.month or 'All' | month = c.month or 'All' |
c.publisher_page_views = 0 | c.publisher_page_views = 0 |
q = model.Session.query(GA_Url).\ | q = model.Session.query(GA_Url).\ |
filter(GA_Url.url=='/publisher/%s' % c.publisher_name) | filter(GA_Url.url=='/publisher/%s' % c.publisher_name) |
entry = q.filter(GA_Url.period_name==c.month).first() | entry = q.filter(GA_Url.period_name==c.month).first() |
c.publisher_page_views = entry.pageviews if entry else 0 | c.publisher_page_views = entry.pageviews if entry else 0 |
c.top_packages = self._get_packages(publisher=c.publisher, count=20, month=c.month) | c.top_packages = self._get_packages(publisher=c.publisher, count=20, month=c.month) |
# Graph query | # Graph query |
top_packages_all_time = self._get_packages(publisher=c.publisher, count=20, month='All') | top_packages_all_time = self._get_packages(publisher=c.publisher, count=20, month='All') |
top_package_names = [ x[0].name for x in top_packages_all_time ] | top_package_names = [ x[0].name for x in top_packages_all_time ] |
graph_query = model.Session.query(GA_Url,model.Package)\ | graph_query = model.Session.query(GA_Url,model.Package)\ |
.filter(model.Package.name==GA_Url.package_id)\ | .filter(model.Package.name==GA_Url.package_id)\ |
.filter(GA_Url.url.like('/dataset/%'))\ | .filter(GA_Url.url.like('/dataset/%'))\ |
.filter(GA_Url.package_id.in_(top_package_names)) | .filter(GA_Url.package_id.in_(top_package_names)) |
all_series = {} | all_series = {} |
for entry,package in graph_query: | for entry,package in graph_query: |
if not package: continue | if not package: continue |
if entry.period_name=='All': continue | if entry.period_name=='All': continue |
all_series[package.name] = all_series.get(package.name,{ | all_series[package.name] = all_series.get(package.name,{ |
'name':package.title, | 'name':package.title, |
'raw': {} | 'raw': {} |
}) | }) |
all_series[package.name]['raw'][entry.period_name] = int(entry.pageviews) | all_series[package.name]['raw'][entry.period_name] = int(entry.pageviews) |
graph = [ all_series[series_name] for series_name in top_package_names ] | graph = [ all_series[series_name] for series_name in top_package_names ] |
c.graph_data = json.dumps( _to_rickshaw(graph) ) | c.graph_data = json.dumps( _to_rickshaw(graph) ) |
return render('ga_report/publisher/read.html') | return render('ga_report/publisher/read.html') |
def _to_rickshaw(data, percentageMode=False): | def _to_rickshaw(data, percentageMode=False): |
if data==[]: | if data==[]: |
return data | return data |
# x-axis is every month in c.months. Note that data might not exist | # x-axis is every month in c.months. Note that data might not exist |
# for entire history, eg. for recently-added datasets | # for entire history, eg. for recently-added datasets |
x_axis = [x[0] for x in c.months] | x_axis = [x[0] for x in c.months] |
x_axis.reverse() # Ascending order | x_axis.reverse() # Ascending order |
x_axis = x_axis[:-1] # Remove latest month | x_axis = x_axis[:-1] # Remove latest month |
totals = {} | totals = {} |
for series in data: | for series in data: |
series['data'] = [] | series['data'] = [] |
for x_string in x_axis: | for x_string in x_axis: |
x = _get_unix_epoch( x_string ) | x = _get_unix_epoch( x_string ) |
y = series['raw'].get(x_string,0) | y = series['raw'].get(x_string,0) |
series['data'].append({'x':x,'y':y}) | series['data'].append({'x':x,'y':y}) |
totals[x] = totals.get(x,0)+y | totals[x] = totals.get(x,0)+y |
if not percentageMode: | if not percentageMode: |
return data | return data |
# Turn all data into percentages | # Turn all data into percentages |
# Roll insignificant series into a catch-all | # Roll insignificant series into a catch-all |
THRESHOLD = 1 | THRESHOLD = 1 |
raw_data = data | raw_data = data |
data = [] | data = [] |
for series in raw_data: | for series in raw_data: |
for point in series['data']: | for point in series['data']: |
percentage = (100*float(point['y'])) / totals[point['x']] | percentage = (100*float(point['y'])) / totals[point['x']] |
if not (series in data) and percentage>THRESHOLD: | if not (series in data) and percentage>THRESHOLD: |
data.append(series) | data.append(series) |
point['y'] = percentage | point['y'] = percentage |
others = [ x for x in raw_data if not (x in data) ] | others = [ x for x in raw_data if not (x in data) ] |
if len(others): | if len(others): |
data_other = [] | data_other = [] |
for i in range(len(x_axis)): | for i in range(len(x_axis)): |
x = _get_unix_epoch(x_axis[i]) | x = _get_unix_epoch(x_axis[i]) |
y = 0 | y = 0 |
for series in others: | for series in others: |
y += series['data'][i]['y'] | y += series['data'][i]['y'] |
data_other.append({'x':x,'y':y}) | data_other.append({'x':x,'y':y}) |
data.append({ | data.append({ |
'name':'Other', | 'name':'Other', |
'data': data_other | 'data': data_other |
}) | }) |
return data | return data |
def _get_top_publishers(limit=20): | def _get_top_publishers(limit=20): |
''' | ''' |
Returns a list of the top 20 publishers by dataset visits. | Returns a list of the top 20 publishers by dataset visits. |
(The number to show can be varied with 'limit') | (The number to show can be varied with 'limit') |
''' | ''' |
month = c.month or 'All' | month = c.month or 'All' |
connection = model.Session.connection() | connection = model.Session.connection() |
q = """ | q = """ |
select department_id, sum(pageviews::int) views, sum(visits::int) visits | select department_id, sum(pageviews::int) views, sum(visits::int) visits |
from ga_url | from ga_url |
where department_id <> '' | where department_id <> '' |
and package_id <> '' | and package_id <> '' |
and url like '/dataset/%%' | and url like '/dataset/%%' |
and period_name=%s | and period_name=%s |
group by department_id order by views desc | group by department_id order by views desc |
""" | """ |
if limit: | if limit: |
q = q + " limit %s;" % (limit) | q = q + " limit %s;" % (limit) |
top_publishers = [] | top_publishers = [] |
res = connection.execute(q, month) | res = connection.execute(q, month) |
for row in res: | for row in res: |
g = model.Group.get(row[0]) | g = model.Group.get(row[0]) |
if g: | if g: |
top_publishers.append((g, row[1], row[2])) | top_publishers.append((g, row[1], row[2])) |
return top_publishers | return top_publishers |
def _get_top_publishers_graph(limit=20): | def _get_top_publishers_graph(limit=20): |
''' | ''' |
Returns a list of the top 20 publishers by dataset visits. | Returns a list of the top 20 publishers by dataset visits. |
(The number to show can be varied with 'limit') | (The number to show can be varied with 'limit') |
''' | ''' |
connection = model.Session.connection() | connection = model.Session.connection() |
q = """ | q = """ |
select department_id, sum(pageviews::int) views | select department_id, sum(pageviews::int) views |
from ga_url | from ga_url |
where department_id <> '' | where department_id <> '' |
and package_id <> '' | and package_id <> '' |
and url like '/dataset/%%' | and url like '/dataset/%%' |
and period_name='All' | and period_name='All' |
group by department_id order by views desc | group by department_id order by views desc |
""" | """ |
if limit: | if limit: |
q = q + " limit %s;" % (limit) | q = q + " limit %s;" % (limit) |
res = connection.execute(q) | res = connection.execute(q) |
department_ids = [ row[0] for row in res ] | department_ids = [ row[0] for row in res ] |
# Query for a history graph of these department ids | # Query for a history graph of these department ids |
q = model.Session.query( | q = model.Session.query( |
GA_Url.department_id, | GA_Url.department_id, |
GA_Url.period_name, | GA_Url.period_name, |
func.sum(cast(GA_Url.pageviews,sqlalchemy.types.INT)))\ | func.sum(cast(GA_Url.pageviews,sqlalchemy.types.INT)))\ |
.filter( GA_Url.department_id.in_(department_ids) )\ | .filter( GA_Url.department_id.in_(department_ids) )\ |
.filter( GA_Url.url.like('/dataset/%') )\ | .filter( GA_Url.url.like('/dataset/%') )\ |
.filter( GA_Url.package_id!='' )\ | .filter( GA_Url.package_id!='' )\ |
.group_by( GA_Url.department_id, GA_Url.period_name ) | .group_by( GA_Url.department_id, GA_Url.period_name ) |
graph_dict = {} | graph_dict = {} |
for dept_id,period_name,views in q: | for dept_id,period_name,views in q: |
graph_dict[dept_id] = graph_dict.get( dept_id, { | graph_dict[dept_id] = graph_dict.get( dept_id, { |
'name' : model.Group.get(dept_id).title, | 'name' : model.Group.get(dept_id).title, |
'raw' : {} | 'raw' : {} |
}) | }) |
graph_dict[dept_id]['raw'][period_name] = views | graph_dict[dept_id]['raw'][period_name] = views |
return [ graph_dict[id] for id in department_ids ] | return [ graph_dict[id] for id in department_ids ] |
def _get_publishers(): | def _get_publishers(): |
''' | ''' |
Returns a list of all publishers. Each item is a tuple: | Returns a list of all publishers. Each item is a tuple: |
(name, title) | (name, title) |
''' | ''' |
publishers = [] | publishers = [] |
for pub in model.Session.query(model.Group).\ | for pub in model.Session.query(model.Group).\ |
filter(model.Group.type=='publisher').\ | filter(model.Group.type=='organization').\ |
filter(model.Group.state=='active').\ | filter(model.Group.state=='active').\ |
order_by(model.Group.name): | order_by(model.Group.name): |
publishers.append((pub.name, pub.title)) | publishers.append((pub.name, pub.title)) |
return publishers | return publishers |
def _percent(num, total): | def _percent(num, total): |
p = 100 * float(num)/float(total) | p = 100 * float(num)/float(total) |
return "%.2f%%" % round(p, 2) | return "%.2f%%" % round(p, 2) |
import os | import os |
import logging | import logging |
import datetime | import datetime |
import httplib | import httplib |
import collections | import collections |
import requests | |
import json | |
from pylons import config | from pylons import config |
from ga_model import _normalize_url | from ga_model import _normalize_url |
import ga_model | import ga_model |
#from ga_client import GA | #from ga_client import GA |
log = logging.getLogger('ckanext.ga-report') | log = logging.getLogger('ckanext.ga-report') |
FORMAT_MONTH = '%Y-%m' | FORMAT_MONTH = '%Y-%m' |
MIN_VIEWS = 50 | MIN_VIEWS = 50 |
MIN_VISITS = 20 | MIN_VISITS = 20 |
MIN_DOWNLOADS = 10 | MIN_DOWNLOADS = 10 |
class DownloadAnalytics(object): | class DownloadAnalytics(object): |
'''Downloads and stores analytics info''' | '''Downloads and stores analytics info''' |
def __init__(self, service=None, profile_id=None, delete_first=False, | def __init__(self, service=None, token=None, profile_id=None, delete_first=False, |
skip_url_stats=False): | skip_url_stats=False): |
self.period = config['ga-report.period'] | self.period = config['ga-report.period'] |
self.service = service | self.service = service |
self.profile_id = profile_id | self.profile_id = profile_id |
self.delete_first = delete_first | self.delete_first = delete_first |
self.skip_url_stats = skip_url_stats | self.skip_url_stats = skip_url_stats |
self.token = token | |
def specific_month(self, date): | def specific_month(self, date): |
import calendar | import calendar |
first_of_this_month = datetime.datetime(date.year, date.month, 1) | first_of_this_month = datetime.datetime(date.year, date.month, 1) |
_, last_day_of_month = calendar.monthrange(int(date.year), int(date.month)) | _, last_day_of_month = calendar.monthrange(int(date.year), int(date.month)) |
last_of_this_month = datetime.datetime(date.year, date.month, last_day_of_month) | last_of_this_month = datetime.datetime(date.year, date.month, last_day_of_month) |
# if this is the latest month, note that it is only up until today | # if this is the latest month, note that it is only up until today |
now = datetime.datetime.now() | now = datetime.datetime.now() |
if now.year == date.year and now.month == date.month: | if now.year == date.year and now.month == date.month: |
last_day_of_month = now.day | last_day_of_month = now.day |
last_of_this_month = now | last_of_this_month = now |
periods = ((date.strftime(FORMAT_MONTH), | periods = ((date.strftime(FORMAT_MONTH), |
last_day_of_month, | last_day_of_month, |
first_of_this_month, last_of_this_month),) | first_of_this_month, last_of_this_month),) |
self.download_and_store(periods) | self.download_and_store(periods) |
def latest(self): | def latest(self): |
if self.period == 'monthly': | if self.period == 'monthly': |
# from first of this month to today | # from first of this month to today |
now = datetime.datetime.now() | now = datetime.datetime.now() |
first_of_this_month = datetime.datetime(now.year, now.month, 1) | first_of_this_month = datetime.datetime(now.year, now.month, 1) |
periods = ((now.strftime(FORMAT_MONTH), | periods = ((now.strftime(FORMAT_MONTH), |
now.day, | now.day, |
first_of_this_month, now),) | first_of_this_month, now),) |
else: | else: |
raise NotImplementedError | raise NotImplementedError |
self.download_and_store(periods) | self.download_and_store(periods) |
def for_date(self, for_date): | def for_date(self, for_date): |
assert isinstance(since_date, datetime.datetime) | assert isinstance(since_date, datetime.datetime) |
periods = [] # (period_name, period_complete_day, start_date, end_date) | periods = [] # (period_name, period_complete_day, start_date, end_date) |
if self.period == 'monthly': | if self.period == 'monthly': |
first_of_the_months_until_now = [] | first_of_the_months_until_now = [] |
year = for_date.year | year = for_date.year |
month = for_date.month | month = for_date.month |
now = datetime.datetime.now() | now = datetime.datetime.now() |
first_of_this_month = datetime.datetime(now.year, now.month, 1) | first_of_this_month = datetime.datetime(now.year, now.month, 1) |
while True: | while True: |
first_of_the_month = datetime.datetime(year, month, 1) | first_of_the_month = datetime.datetime(year, month, 1) |
if first_of_the_month == first_of_this_month: | if first_of_the_month == first_of_this_month: |
periods.append((now.strftime(FORMAT_MONTH), | periods.append((now.strftime(FORMAT_MONTH), |
now.day, | now.day, |
first_of_this_month, now)) | first_of_this_month, now)) |
break | break |
elif first_of_the_month < first_of_this_month: | elif first_of_the_month < first_of_this_month: |
in_the_next_month = first_of_the_month + datetime.timedelta(40) | in_the_next_month = first_of_the_month + datetime.timedelta(40) |
last_of_the_month = datetime.datetime(in_the_next_month.year, | last_of_the_month = datetime.datetime(in_the_next_month.year, |
in_the_next_month.month, 1)\ | in_the_next_month.month, 1)\ |
- datetime.timedelta(1) | - datetime.timedelta(1) |
periods.append((now.strftime(FORMAT_MONTH), 0, | periods.append((now.strftime(FORMAT_MONTH), 0, |
first_of_the_month, last_of_the_month)) | first_of_the_month, last_of_the_month)) |
else: | else: |
# first_of_the_month has got to the future somehow | # first_of_the_month has got to the future somehow |
break | break |
month += 1 | month += 1 |
if month > 12: | if month > 12: |
year += 1 | year += 1 |
month = 1 | month = 1 |
else: | else: |
raise NotImplementedError | raise NotImplementedError |
self.download_and_store(periods) | self.download_and_store(periods) |
@staticmethod | @staticmethod |
def get_full_period_name(period_name, period_complete_day): | def get_full_period_name(period_name, period_complete_day): |
if period_complete_day: | if period_complete_day: |
return period_name + ' (up to %ith)' % period_complete_day | return period_name + ' (up to %ith)' % period_complete_day |
else: | else: |
return period_name | return period_name |
def download_and_store(self, periods): | def download_and_store(self, periods): |
for period_name, period_complete_day, start_date, end_date in periods: | for period_name, period_complete_day, start_date, end_date in periods: |
log.info('Period "%s" (%s - %s)', | log.info('Period "%s" (%s - %s)', |
self.get_full_period_name(period_name, period_complete_day), | self.get_full_period_name(period_name, period_complete_day), |
start_date.strftime('%Y-%m-%d'), | start_date.strftime('%Y-%m-%d'), |
end_date.strftime('%Y-%m-%d')) | end_date.strftime('%Y-%m-%d')) |
if self.delete_first: | if self.delete_first: |
log.info('Deleting existing Analytics for this period "%s"', | log.info('Deleting existing Analytics for this period "%s"', |
period_name) | period_name) |
ga_model.delete(period_name) | ga_model.delete(period_name) |
if not self.skip_url_stats: | if not self.skip_url_stats: |
# Clean out old url data before storing the new | # Clean out old url data before storing the new |
ga_model.pre_update_url_stats(period_name) | ga_model.pre_update_url_stats(period_name) |
accountName = config.get('googleanalytics.account') | accountName = config.get('googleanalytics.account') |
log.info('Downloading analytics for dataset views') | log.info('Downloading analytics for dataset views') |
data = self.download(start_date, end_date, '~/%s/dataset/[a-z0-9-_]+' % accountName) | data = self.download(start_date, end_date, '~/%s/dataset/[a-z0-9-_]+' % accountName) |
log.info('Storing dataset views (%i rows)', len(data.get('url'))) | log.info('Storing dataset views (%i rows)', len(data.get('url'))) |
self.store(period_name, period_complete_day, data, ) | self.store(period_name, period_complete_day, data, ) |
log.info('Downloading analytics for publisher views') | log.info('Downloading analytics for publisher views') |
data = self.download(start_date, end_date, '~/%s/publisher/[a-z0-9-_]+' % accountName) | data = self.download(start_date, end_date, '~/%s/publisher/[a-z0-9-_]+' % accountName) |
log.info('Storing publisher views (%i rows)', len(data.get('url'))) | log.info('Storing publisher views (%i rows)', len(data.get('url'))) |
self.store(period_name, period_complete_day, data,) | self.store(period_name, period_complete_day, data,) |
# Make sure the All records are correct. | # Make sure the All records are correct. |
ga_model.post_update_url_stats() | ga_model.post_update_url_stats() |
log.info('Associating datasets with their publisher') | log.info('Associating datasets with their publisher') |
ga_model.update_publisher_stats(period_name) # about 30 seconds. | ga_model.update_publisher_stats(period_name) # about 30 seconds. |
log.info('Downloading and storing analytics for site-wide stats') | log.info('Downloading and storing analytics for site-wide stats') |
self.sitewide_stats( period_name, period_complete_day ) | self.sitewide_stats( period_name, period_complete_day ) |
log.info('Downloading and storing analytics for social networks') | log.info('Downloading and storing analytics for social networks') |
self.update_social_info(period_name, start_date, end_date) | self.update_social_info(period_name, start_date, end_date) |
def update_social_info(self, period_name, start_date, end_date): | def update_social_info(self, period_name, start_date, end_date): |
start_date = start_date.strftime('%Y-%m-%d') | start_date = start_date.strftime('%Y-%m-%d') |
end_date = end_date.strftime('%Y-%m-%d') | end_date = end_date.strftime('%Y-%m-%d') |
query = 'ga:hasSocialSourceReferral=~Yes$' | query = 'ga:hasSocialSourceReferral=~Yes$' |
metrics = 'ga:entrances' | metrics = 'ga:entrances' |
sort = '-ga:entrances' | sort = '-ga:entrances' |
# Supported query params at | try: |
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference | # Because of issues of invalid responses, we are going to make these requests |
results = self.service.data().ga().get( | # ourselves. |
ids='ga:' + self.profile_id, | headers = {'authorization': 'Bearer ' + self.token} |
filters=query, | |
start_date=start_date, | args = dict(ids='ga:' + self.profile_id, |
metrics=metrics, | filters=query, |
sort=sort, | metrics=metrics, |
dimensions="ga:landingPagePath,ga:socialNetwork", | sort=sort, |
max_results=10000, | dimensions="ga:landingPagePath,ga:socialNetwork", |
end_date=end_date).execute() | max_results=10000) |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
data = collections.defaultdict(list) | data = collections.defaultdict(list) |
rows = results.get('rows',[]) | rows = results.get('rows',[]) |
for row in rows: | for row in rows: |
url = _normalize_url('http:/' + row[0]) | url = _normalize_url('http:/' + row[0]) |
data[url].append( (row[1], int(row[2]),) ) | data[url].append( (row[1], int(row[2]),) ) |
ga_model.update_social(period_name, data) | ga_model.update_social(period_name, data) |
def download(self, start_date, end_date, path=None): | def download(self, start_date, end_date, path=None): |
'''Get data from GA for a given time period''' | '''Get data from GA for a given time period''' |
start_date = start_date.strftime('%Y-%m-%d') | start_date = start_date.strftime('%Y-%m-%d') |
end_date = end_date.strftime('%Y-%m-%d') | end_date = end_date.strftime('%Y-%m-%d') |
query = 'ga:pagePath=%s$' % path | query = 'ga:pagePath=%s$' % path |
metrics = 'ga:pageviews, ga:visits' | metrics = 'ga:pageviews, ga:visits' |
sort = '-ga:pageviews' | sort = '-ga:pageviews' |
# Supported query params at | # Supported query params at |
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference | # https://developers.google.com/analytics/devguides/reporting/core/v3/reference |
try: | try: |
results = self.service.data().ga().get( | # Because of issues of invalid responses, we are going to make these requests |
ids='ga:' + self.profile_id, | # ourselves. |
filters=query, | headers = {'authorization': 'Bearer ' + self.token} |
start_date=start_date, | |
metrics=metrics, | args = {} |
sort=sort, | args["sort"] = "-ga:pageviews" |
dimensions="ga:pagePath", | args["max-results"] = 100000 |
max_results=10000, | args["dimensions"] = "ga:pagePath" |
end_date=end_date).execute() | args["start-date"] = start_date |
except httplib.BadStatusLine: | args["end-date"] = end_date |
log.error(u"Failed to download data=> ids: ga:{0}, filters: {1}, start_date: {2}, end_date: {3}, metrics: {4}, sort: {5}, dimensions: ga:pagePath".format( | args["metrics"] = metrics |
self.profile_id, query, start_date, end_date, metrics, sort )) | args["ids"] = "ga:" + self.profile_id |
return dict(url=[]) | args["filters"] = query |
args["alt"] = "json" | |
r = requests.get("https://www.googleapis.com/analytics/v3/data/ga", params=args, headers=headers) | |
if r.status_code != 200: | |
raise Exception("Request with params: %s failed" % args) | |
results = json.loads(r.content) | |
print len(results.keys()) | |
except Exception, e: | |
log.exception(e) | |
#return dict(url=[]) | |
raise e | |
packages = [] | packages = [] |
log.info("There are %d results" % results['totalResults']) | log.info("There are %d results" % results['totalResults']) |
for entry in results.get('rows'): | for entry in results.get('rows'): |
(loc,pageviews,visits) = entry | (loc,pageviews,visits) = entry |
url = _normalize_url('http:/' + loc) # strips off domain e.g. www.data.gov.uk or data.gov.uk | url = _normalize_url('http:/' + loc) # strips off domain e.g. www.data.gov.uk or data.gov.uk |
if not url.startswith('/dataset/') and not url.startswith('/publisher/'): | if not url.startswith('/dataset/') and not url.startswith('/publisher/'): |
# filter out strays like: | # filter out strays like: |
# /data/user/login?came_from=http://data.gov.uk/dataset/os-code-point-open | # /data/user/login?came_from=http://data.gov.uk/dataset/os-code-point-open |
# /403.html?page=/about&from=http://data.gov.uk/publisher/planning-inspectorate | # /403.html?page=/about&from=http://data.gov.uk/publisher/planning-inspectorate |
continue | continue |
packages.append( (url, pageviews, visits,) ) # Temporary hack | packages.append( (url, pageviews, visits,) ) # Temporary hack |
return dict(url=packages) | return dict(url=packages) |
def store(self, period_name, period_complete_day, data): | def store(self, period_name, period_complete_day, data): |
if 'url' in data: | if 'url' in data: |
ga_model.update_url_stats(period_name, period_complete_day, data['url']) | ga_model.update_url_stats(period_name, period_complete_day, data['url']) |
def sitewide_stats(self, period_name, period_complete_day): | def sitewide_stats(self, period_name, period_complete_day): |
import calendar | import calendar |
year, month = period_name.split('-') | year, month = period_name.split('-') |
_, last_day_of_month = calendar.monthrange(int(year), int(month)) | _, last_day_of_month = calendar.monthrange(int(year), int(month)) |
start_date = '%s-01' % period_name | start_date = '%s-01' % period_name |
end_date = '%s-%s' % (period_name, last_day_of_month) | end_date = '%s-%s' % (period_name, last_day_of_month) |
funcs = ['_totals_stats', '_social_stats', '_os_stats', | funcs = ['_totals_stats', '_social_stats', '_os_stats', |
'_locale_stats', '_browser_stats', '_mobile_stats', '_download_stats'] | '_locale_stats', '_browser_stats', '_mobile_stats', '_download_stats'] |
for f in funcs: | for f in funcs: |
log.info('Downloading analytics for %s' % f.split('_')[1]) | log.info('Downloading analytics for %s' % f.split('_')[1]) |
getattr(self, f)(start_date, end_date, period_name, period_complete_day) | getattr(self, f)(start_date, end_date, period_name, period_complete_day) |
def _get_results(result_data, f): | def _get_results(result_data, f): |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
key = f(result) | key = f(result) |
data[key] = data.get(key,0) + result[1] | data[key] = data.get(key,0) + result[1] |
return data | return data |
def _get_json(self, params, prev_fail=False): | |
if prev_fail: | |
import os | |
ga_token_filepath = os.path.expanduser(config.get('googleanalytics.token.filepath', '')) | |
if not ga_token_filepath: | |
print 'ERROR: In the CKAN config you need to specify the filepath of the ' \ | |
'Google Analytics token file under key: googleanalytics.token.filepath' | |
return | |
try: | |
self.token, svc = init_service(ga_token_filepath, None) | |
except TypeError: | |
print ('Have you correctly run the getauthtoken task and ' | |
'specified the correct token file in the CKAN config under ' | |
'"googleanalytics.token.filepath"?') | |
try: | |
# Because of issues of invalid responses, we are going to make these requests | |
# ourselves. | |
headers = {'authorization': 'Bearer ' + self.token} | |
r = requests.get("https://www.googleapis.com/analytics/v3/data/ga", params=params, headers=headers) | |
if r.status_code != 200: | |
log.info("STATUS: %s" % (r.status_code,)) | |
log.info("CONTENT: %s" % (r.content,)) | |
raise Exception("Request with params: %s failed" % params) | |
return json.loads(r.content) | |
except Exception, e: | |
if not prev_fail: | |
print e | |
results = self._get_json(self, params, prev_fail=True) | |
else: | |
log.exception(e) | |
return dict(url=[]) | |
def _totals_stats(self, start_date, end_date, period_name, period_complete_day): | def _totals_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Fetches distinct totals, total pageviews etc """ | """ Fetches distinct totals, total pageviews etc """ |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | args = {} |
start_date=start_date, | args["max-results"] = 100000 |
metrics='ga:pageviews', | args["start-date"] = start_date |
sort='-ga:pageviews', | args["end-date"] = end_date |
max_results=10000, | args["ids"] = "ga:" + self.profile_id |
end_date=end_date).execute() | |
args["metrics"] = "ga:pageviews" | |
args["sort"] = "-ga:pageviews" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
ga_model.update_sitewide_stats(period_name, "Totals", {'Total page views': result_data[0][0]}, | ga_model.update_sitewide_stats(period_name, "Totals", {'Total page views': result_data[0][0]}, |
period_complete_day) | period_complete_day) |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
metrics='ga:pageviewsPerVisit,ga:avgTimeOnSite,ga:percentNewVisits,ga:visits', | headers = {'authorization': 'Bearer ' + self.token} |
max_results=10000, | |
end_date=end_date).execute() | args = {} |
args["max-results"] = 100000 | |
args["start-date"] = start_date | |
args["end-date"] = end_date | |
args["ids"] = "ga:" + self.profile_id | |
args["metrics"] = "ga:pageviewsPerVisit,ga:avgTimeOnSite,ga:percentNewVisits,ga:visits" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = { | data = { |
'Pages per visit': result_data[0][0], | 'Pages per visit': result_data[0][0], |
'Average time on site': result_data[0][1], | 'Average time on site': result_data[0][1], |
'New visits': result_data[0][2], | 'New visits': result_data[0][2], |
'Total visits': result_data[0][3], | 'Total visits': result_data[0][3], |
} | } |
ga_model.update_sitewide_stats(period_name, "Totals", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Totals", data, period_complete_day) |
# Bounces from / or another configurable page. | # Bounces from / or another configurable page. |
path = '/%s%s' % (config.get('googleanalytics.account'), | path = '/%s%s' % (config.get('googleanalytics.account'), |
config.get('ga-report.bounce_url', '/')) | config.get('ga-report.bounce_url', '/')) |
results = self.service.data().ga().get( | |
ids='ga:' + self.profile_id, | try: |
filters='ga:pagePath==%s' % (path,), | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
metrics='ga:visitBounceRate', | headers = {'authorization': 'Bearer ' + self.token} |
dimensions='ga:pagePath', | |
max_results=10000, | args = {} |
end_date=end_date).execute() | args["max-results"] = 100000 |
args["start-date"] = start_date | |
args["end-date"] = end_date | |
args["ids"] = "ga:" + self.profile_id | |
args["filters"] = 'ga:pagePath==%s' % (path,) | |
args["dimensions"] = 'ga:pagePath' | |
args["metrics"] = "ga:visitBounceRate" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
if not result_data or len(result_data) != 1: | if not result_data or len(result_data) != 1: |
log.error('Could not pinpoint the bounces for path: %s. Got results: %r', | log.error('Could not pinpoint the bounces for path: %s. Got results: %r', |
path, result_data) | path, result_data) |
return | return |
results = result_data[0] | results = result_data[0] |
bounces = float(results[1]) | bounces = float(results[1]) |
# visitBounceRate is already a % | # visitBounceRate is already a % |
log.info('Google reports visitBounceRate as %s', bounces) | log.info('Google reports visitBounceRate as %s', bounces) |
ga_model.update_sitewide_stats(period_name, "Totals", {'Bounce rate (home page)': float(bounces)}, | ga_model.update_sitewide_stats(period_name, "Totals", {'Bounce rate (home page)': float(bounces)}, |
period_complete_day) | period_complete_day) |
def _locale_stats(self, start_date, end_date, period_name, period_complete_day): | def _locale_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Fetches stats about language and country """ | """ Fetches stats about language and country """ |
results = self.service.data().ga().get( | |
ids='ga:' + self.profile_id, | try: |
start_date=start_date, | # Because of issues of invalid responses, we are going to make these requests |
metrics='ga:pageviews', | # ourselves. |
sort='-ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
dimensions="ga:language,ga:country", | |
max_results=10000, | args = {} |
end_date=end_date).execute() | args["max-results"] = 100000 |
args["start-date"] = start_date | |
args["end-date"] = end_date | |
args["ids"] = "ga:" + self.profile_id | |
args["dimensions"] = "ga:language,ga:country" | |
args["metrics"] = "ga:pageviews" | |
args["sort"] = "-ga:pageviews" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Languages", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Languages", data, period_complete_day) |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[1]] = data.get(result[1], 0) + int(result[2]) | data[result[1]] = data.get(result[1], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Country", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Country", data, period_complete_day) |
def _download_stats(self, start_date, end_date, period_name, period_complete_day): | def _download_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Fetches stats about data downloads """ | """ Fetches stats about data downloads """ |
import ckan.model as model | import ckan.model as model |
data = {} | data = {} |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
filters='ga:eventAction==download', | headers = {'authorization': 'Bearer ' + self.token} |
metrics='ga:totalEvents', | |
sort='-ga:totalEvents', | args = {} |
dimensions="ga:eventLabel", | args["max-results"] = 100000 |
max_results=10000, | args["start-date"] = start_date |
end_date=end_date).execute() | args["end-date"] = end_date |
args["ids"] = "ga:" + self.profile_id | |
args["filters"] = 'ga:eventAction==download' | |
args["dimensions"] = "ga:eventLabel" | |
args["metrics"] = "ga:totalEvents" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
if not result_data: | if not result_data: |
# We may not have data for this time period, so we need to bail | # We may not have data for this time period, so we need to bail |
# early. | # early. |
log.info("There is no download data for this time period") | log.info("There is no download data for this time period") |
return | return |
def process_result_data(result_data, cached=False): | def process_result_data(result_data, cached=False): |
progress_total = len(result_data) | progress_total = len(result_data) |
progress_count = 0 | progress_count = 0 |
resources_not_matched = [] | resources_not_matched = [] |
for result in result_data: | for result in result_data: |
progress_count += 1 | progress_count += 1 |
if progress_count % 100 == 0: | if progress_count % 100 == 0: |
log.debug('.. %d/%d done so far', progress_count, progress_total) | log.debug('.. %d/%d done so far', progress_count, progress_total) |
url = result[0].strip() | url = result[0].strip() |
# Get package id associated with the resource that has this URL. | # Get package id associated with the resource that has this URL. |
q = model.Session.query(model.Resource) | q = model.Session.query(model.Resource) |
if cached: | if cached: |
r = q.filter(model.Resource.cache_url.like("%s%%" % url)).first() | r = q.filter(model.Resource.cache_url.like("%s%%" % url)).first() |
else: | else: |
r = q.filter(model.Resource.url.like("%s%%" % url)).first() | r = q.filter(model.Resource.url.like("%s%%" % url)).first() |
package_name = r.resource_group.package.name if r else "" | package_name = r.resource_group.package.name if r else "" |
if package_name: | if package_name: |
data[package_name] = data.get(package_name, 0) + int(result[1]) | data[package_name] = data.get(package_name, 0) + int(result[1]) |
else: | else: |
resources_not_matched.append(url) | resources_not_matched.append(url) |
continue | continue |
if resources_not_matched: | if resources_not_matched: |
log.debug('Could not match %i or %i resource URLs to datasets. e.g. %r', | log.debug('Could not match %i or %i resource URLs to datasets. e.g. %r', |
len(resources_not_matched), progress_total, resources_not_matched[:3]) | len(resources_not_matched), progress_total, resources_not_matched[:3]) |
log.info('Associating downloads of resource URLs with their respective datasets') | log.info('Associating downloads of resource URLs with their respective datasets') |
process_result_data(results.get('rows')) | process_result_data(results.get('rows')) |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
filters='ga:eventAction==download-cache', | headers = {'authorization': 'Bearer ' + self.token} |
metrics='ga:totalEvents', | |
sort='-ga:totalEvents', | args = dict( ids='ga:' + self.profile_id, |
dimensions="ga:eventLabel", | filters='ga:eventAction==download-cache', |
max_results=10000, | metrics='ga:totalEvents', |
end_date=end_date).execute() | sort='-ga:totalEvents', |
dimensions="ga:eventLabel", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
log.info('Associating downloads of cache resource URLs with their respective datasets') | log.info('Associating downloads of cache resource URLs with their respective datasets') |
process_result_data(results.get('rows'), cached=False) | process_result_data(results.get('rows'), cached=False) |
self._filter_out_long_tail(data, MIN_DOWNLOADS) | self._filter_out_long_tail(data, MIN_DOWNLOADS) |
ga_model.update_sitewide_stats(period_name, "Downloads", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Downloads", data, period_complete_day) |
def _social_stats(self, start_date, end_date, period_name, period_complete_day): | def _social_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Finds out which social sites people are referred from """ | """ Finds out which social sites people are referred from """ |
results = self.service.data().ga().get( | |
ids='ga:' + self.profile_id, | try: |
start_date=start_date, | # Because of issues of invalid responses, we are going to make these requests |
metrics='ga:pageviews', | # ourselves. |
sort='-ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
dimensions="ga:socialNetwork,ga:referralPath", | |
max_results=10000, | args = dict( ids='ga:' + self.profile_id, |
end_date=end_date).execute() | metrics='ga:pageviews', |
sort='-ga:pageviews', | |
dimensions="ga:socialNetwork,ga:referralPath", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
if not result[0] == '(not set)': | if not result[0] == '(not set)': |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, 3) | self._filter_out_long_tail(data, 3) |
ga_model.update_sitewide_stats(period_name, "Social sources", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Social sources", data, period_complete_day) |
def _os_stats(self, start_date, end_date, period_name, period_complete_day): | def _os_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Operating system stats """ | """ Operating system stats """ |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
metrics='ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
sort='-ga:pageviews', | |
dimensions="ga:operatingSystem,ga:operatingSystemVersion", | args = dict( ids='ga:' + self.profile_id, |
max_results=10000, | metrics='ga:pageviews', |
end_date=end_date).execute() | sort='-ga:pageviews', |
dimensions="ga:operatingSystem,ga:operatingSystemVersion", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Operating Systems", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Operating Systems", data, period_complete_day) |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
if int(result[2]) >= MIN_VIEWS: | if int(result[2]) >= MIN_VIEWS: |
key = "%s %s" % (result[0],result[1]) | key = "%s %s" % (result[0],result[1]) |
data[key] = result[2] | data[key] = result[2] |
ga_model.update_sitewide_stats(period_name, "Operating Systems versions", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Operating Systems versions", data, period_complete_day) |
def _browser_stats(self, start_date, end_date, period_name, period_complete_day): | def _browser_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Information about browsers and browser versions """ | """ Information about browsers and browser versions """ |
results = self.service.data().ga().get( | |
ids='ga:' + self.profile_id, | try: |
start_date=start_date, | # Because of issues of invalid responses, we are going to make these requests |
metrics='ga:pageviews', | # ourselves. |
sort='-ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
dimensions="ga:browser,ga:browserVersion", | |
max_results=10000, | args = dict( ids='ga:' + self.profile_id, |
end_date=end_date).execute() | metrics='ga:pageviews', |
sort='-ga:pageviews', | |
dimensions="ga:browser,ga:browserVersion", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
# e.g. [u'Firefox', u'19.0', u'20'] | # e.g. [u'Firefox', u'19.0', u'20'] |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Browsers", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Browsers", data, period_complete_day) |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
key = "%s %s" % (result[0], self._filter_browser_version(result[0], result[1])) | key = "%s %s" % (result[0], self._filter_browser_version(result[0], result[1])) |
data[key] = data.get(key, 0) + int(result[2]) | data[key] = data.get(key, 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Browser versions", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Browser versions", data, period_complete_day) |
@classmethod | @classmethod |
def _filter_browser_version(cls, browser, version_str): | def _filter_browser_version(cls, browser, version_str): |
''' | ''' |
Simplifies a browser version string if it is detailed. | Simplifies a browser version string if it is detailed. |
i.e. groups together Firefox 3.5.1 and 3.5.2 to be just 3. | i.e. groups together Firefox 3.5.1 and 3.5.2 to be just 3. |
This is helpful when viewing stats and good to protect privacy. | This is helpful when viewing stats and good to protect privacy. |
''' | ''' |
ver = version_str | ver = version_str |
parts = ver.split('.') | parts = ver.split('.') |
if len(parts) > 1: | if len(parts) > 1: |
if parts[1][0] == '0': | if parts[1][0] == '0': |
ver = parts[0] | ver = parts[0] |
else: | else: |
ver = "%s" % (parts[0]) | ver = "%s" % (parts[0]) |
# Special case complex version nums | # Special case complex version nums |
if browser in ['Safari', 'Android Browser']: | if browser in ['Safari', 'Android Browser']: |
ver = parts[0] | ver = parts[0] |
if len(ver) > 2: | if len(ver) > 2: |
num_hidden_digits = len(ver) - 2 | num_hidden_digits = len(ver) - 2 |
ver = ver[0] + ver[1] + 'X' * num_hidden_digits | ver = ver[0] + ver[1] + 'X' * num_hidden_digits |
return ver | return ver |
def _mobile_stats(self, start_date, end_date, period_name, period_complete_day): | def _mobile_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Info about mobile devices """ | """ Info about mobile devices """ |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
metrics='ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
sort='-ga:pageviews', | |
dimensions="ga:mobileDeviceBranding, ga:mobileDeviceInfo", | args = dict( ids='ga:' + self.profile_id, |
max_results=10000, | metrics='ga:pageviews', |
end_date=end_date).execute() | sort='-ga:pageviews', |
dimensions="ga:mobileDeviceBranding, ga:mobileDeviceInfo", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Mobile brands", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Mobile brands", data, period_complete_day) |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[1]] = data.get(result[1], 0) + int(result[2]) | data[result[1]] = data.get(result[1], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Mobile devices", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Mobile devices", data, period_complete_day) |
@classmethod | @classmethod |
def _filter_out_long_tail(cls, data, threshold=10): | def _filter_out_long_tail(cls, data, threshold=10): |
''' | ''' |
Given data which is a frequency distribution, filter out | Given data which is a frequency distribution, filter out |
results which are below a threshold count. This is good to protect | results which are below a threshold count. This is good to protect |
privacy. | privacy. |
''' | ''' |
for key, value in data.items(): | for key, value in data.items(): |
if value < threshold: | if value < threshold: |
del data[key] | del data[key] |
import os | import os |
import httplib2 | import httplib2 |
from apiclient.discovery import build | from apiclient.discovery import build |
from oauth2client.client import flow_from_clientsecrets | from oauth2client.client import flow_from_clientsecrets |
from oauth2client.file import Storage | from oauth2client.file import Storage |
from oauth2client.tools import run | from oauth2client.tools import run |
from pylons import config | from pylons import config |
def _prepare_credentials(token_filename, credentials_filename): | def _prepare_credentials(token_filename, credentials_filename): |
""" | """ |
Either returns the user's oauth credentials or uses the credentials | Either returns the user's oauth credentials or uses the credentials |
file to generate a token (by forcing the user to login in the browser) | file to generate a token (by forcing the user to login in the browser) |
""" | """ |
storage = Storage(token_filename) | storage = Storage(token_filename) |
credentials = storage.get() | credentials = storage.get() |
if credentials is None or credentials.invalid: | if credentials is None or credentials.invalid: |
flow = flow_from_clientsecrets(credentials_filename, | flow = flow_from_clientsecrets(credentials_filename, |
scope='https://www.googleapis.com/auth/analytics.readonly', | scope='https://www.googleapis.com/auth/analytics.readonly', |
message="Can't find the credentials file") | message="Can't find the credentials file") |
credentials = run(flow, storage) | credentials = run(flow, storage) |
return credentials | return credentials |
def init_service(token_file, credentials_file): | def init_service(token_file, credentials_file): |
""" | """ |
Given a file containing the user's oauth token (and another with | Given a file containing the user's oauth token (and another with |
credentials in case we need to generate the token) will return a | credentials in case we need to generate the token) will return a |
service object representing the analytics API. | service object representing the analytics API. |
""" | """ |
http = httplib2.Http() | http = httplib2.Http() |
credentials = _prepare_credentials(token_file, credentials_file) | credentials = _prepare_credentials(token_file, credentials_file) |
http = credentials.authorize(http) # authorize the http object | http = credentials.authorize(http) # authorize the http object |
return build('analytics', 'v3', http=http) | return credentials.access_token, build('analytics', 'v3', http=http) |
def get_profile_id(service): | def get_profile_id(service): |
""" | """ |
Get the profile ID for this user and the service specified by the | Get the profile ID for this user and the service specified by the |
'googleanalytics.id' configuration option. This function iterates | 'googleanalytics.id' configuration option. This function iterates |
over all of the accounts available to the user who invoked the | over all of the accounts available to the user who invoked the |
service to find one where the account name matches (in case the | service to find one where the account name matches (in case the |
user has several). | user has several). |
""" | """ |
accounts = service.management().accounts().list().execute() | accounts = service.management().accounts().list().execute() |
if not accounts.get('items'): | if not accounts.get('items'): |
return None | return None |
accountName = config.get('googleanalytics.account') | accountName = config.get('googleanalytics.account') |
if not accountName: | if not accountName: |
raise Exception('googleanalytics.account needs to be configured') | raise Exception('googleanalytics.account needs to be configured') |
webPropertyId = config.get('googleanalytics.id') | webPropertyId = config.get('googleanalytics.id') |
if not webPropertyId: | if not webPropertyId: |
raise Exception('googleanalytics.id needs to be configured') | raise Exception('googleanalytics.id needs to be configured') |
for acc in accounts.get('items'): | for acc in accounts.get('items'): |
if acc.get('name') == accountName: | if acc.get('name') == accountName: |
accountId = acc.get('id') | accountId = acc.get('id') |
webproperties = service.management().webproperties().list(accountId=accountId).execute() | webproperties = service.management().webproperties().list(accountId=accountId).execute() |
profiles = service.management().profiles().list( | profiles = service.management().profiles().list( |
accountId=accountId, webPropertyId=webPropertyId).execute() | accountId=accountId, webPropertyId=webPropertyId).execute() |
if profiles.get('items'): | if profiles.get('items'): |
return profiles.get('items')[0].get('id') | return profiles.get('items')[0].get('id') |
return None | return None |
<html xmlns:py="http://genshi.edgewall.org/" | <html xmlns:py="http://genshi.edgewall.org/" |
xmlns:i18n="http://genshi.edgewall.org/i18n" | xmlns:i18n="http://genshi.edgewall.org/i18n" |
xmlns:xi="http://www.w3.org/2001/XInclude" | xmlns:xi="http://www.w3.org/2001/XInclude" |
py:strip=""> | py:strip=""> |
<xi:include href="../ga_util.html" /> | <xi:include href="../ga_util.html" /> |
<py:def function="page_title">Usage by Publisher</py:def> | <py:def function="page_title">Usage by Publisher</py:def> |
<py:def path="sidebar"> | |
<py:def function="sidebar"> | |
${ga_sidebar(download_link=h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport',action='publisher_csv',month=c.month or 'all'))} | ${ga_sidebar(download_link=h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport',action='publisher_csv',month=c.month or 'all'))} |
</py:def> | </py:def> |
<py:def function="optional_head"> | <py:def function="optional_head"> |
<link rel="stylesheet" type="text/css" href="/scripts/vendor/rickshaw.min.css"/> | <link rel="stylesheet" type="text/css" href="/scripts/vendor/rickshaw.min.css"/> |
<link rel="stylesheet" type="text/css" href="/css/ga_report.css?1"/> | <link rel="stylesheet" type="text/css" href="/css/ga_report.css?1"/> |
<script type="text/javascript" src="/scripts/modernizr-2.6.2.custom.js"></script> | <script type="text/javascript" src="/scripts/modernizr-2.6.2.custom.js"></script> |
<script type="text/javascript" src="/scripts/ckanext_ga_reports.js?1"></script> | <script type="text/javascript" src="/scripts/ckanext_ga_reports.js?1"></script> |
<script type="text/javascript" src="/scripts/vendor/jquery.sparkline.modified.js"></script> | <script type="text/javascript" src="/scripts/vendor/jquery.sparkline.modified.js"></script> |
<script type="text/javascript" src="/scripts/rickshaw_ie7_shim.js"></script> | <script type="text/javascript" src="/scripts/rickshaw_ie7_shim.js"></script> |
<script type="text/javascript" src="/scripts/vendor/d3.v2.js"></script> | <script type="text/javascript" src="/scripts/vendor/d3.v2.js"></script> |
<script type="text/javascript" src="/scripts/vendor/d3.layout.min.js"></script> | <script type="text/javascript" src="/scripts/vendor/d3.layout.min.js"></script> |
<script type="text/javascript" src="/scripts/vendor/rickshaw.min.js"></script> | <script type="text/javascript" src="/scripts/vendor/rickshaw.min.js"></script> |
</py:def> | </py:def> |
<py:def function="page_heading">Site Usage ${usage_nav('Publishers')}</py:def> | <py:def function="page_heading">Site Usage ${usage_nav('Publishers')}</py:def> |
<div py:match="content"> | <div py:match="content"> |
<div class="boxed"> | <div class="boxed"> |
${rickshaw_graph(c.top_publishers_graph,'publishers')} | ${rickshaw_graph(c.top_publishers_graph,'publishers')} |
<hr/> | <hr/> |
<form class="form-inline" action="${h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport',action='publishers')}" method="get"> | <form class="form-inline" action="${h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport',action='publishers')}" method="get"> |
<div class="controls"> | <div class="controls"> |
<h4 class="ga-reports-heading">Statistics for</h4> | <h4 class="ga-reports-heading">Statistics for</h4> |
${month_selector(c.month, c.months, c.day)} | ${month_selector(c.month, c.months, c.day)} |
</div> | </div> |
</form> | </form> |
<table class="ga-reports-table table table-condensed table-bordered table-striped"> | <table class="ga-reports-table table table-condensed table-bordered table-striped"> |
<tr> | <tr> |
<th>Publisher</th> | <th>Publisher</th> |
<th class="td-numeric">Dataset Views</th> | <th class="td-numeric">Dataset Views</th> |
</tr> | </tr> |
<py:for each="publisher, views, visits in c.top_publishers"> | <py:for each="publisher, views, visits in c.top_publishers"> |
<tr> | <tr> |
<td> | <td> |
${h.link_to(publisher.title, h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport', action='read_publisher', id=publisher.name) + (("?month=" + c.month) if c.month else ''))} | ${h.link_to(publisher.title, h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport', action='read_publisher', id=publisher.name) + (("?month=" + c.month) if c.month else ''))} |
</td> | </td> |
<td class="td-numeric">${views}</td> | <td class="td-numeric">${views}</td> |
</tr> | </tr> |
</py:for> | </py:for> |
</table> | </table> |
</div><!--/boxed--> | </div><!--/boxed--> |
</div><!--/content--> | </div><!--/content--> |
<py:def function="optional_footer"> | <py:def function="optional_footer"> |
<script type="text/javascript"> | <script type="text/javascript"> |
$(function() { | $(function() { |
CKAN.GA_Reports.bind_month_selector(); | CKAN.GA_Reports.bind_month_selector(); |
}); | }); |
</script> | </script> |
</py:def> | </py:def> |
<xi:include href="../../layout.html" /> | <xi:include href="../../layout.html" /> |
</html> | </html> |
<html xmlns:py="http://genshi.edgewall.org/" | <html xmlns:py="http://genshi.edgewall.org/" |
xmlns:i18n="http://genshi.edgewall.org/i18n" | xmlns:i18n="http://genshi.edgewall.org/i18n" |
xmlns:xi="http://www.w3.org/2001/XInclude" | xmlns:xi="http://www.w3.org/2001/XInclude" |
py:strip=""> | py:strip=""> |
<xi:include href="../ga_util.html" /> | <xi:include href="../ga_util.html" /> |
<py:def function="page_title">Usage by Dataset</py:def> | <py:def function="page_title">Usage by Dataset</py:def> |
<py:def function="optional_head"> | <py:def function="optional_head"> |
<link rel="stylesheet" type="text/css" href="/scripts/vendor/rickshaw.min.css"/> | <link rel="stylesheet" type="text/css" href="/scripts/vendor/rickshaw.min.css"/> |
<link rel="stylesheet" type="text/css" href="/css/ga_report.css?1"/> | <link rel="stylesheet" type="text/css" href="/css/ga_report.css?1"/> |
<script type="text/javascript" src="/scripts/modernizr-2.6.2.custom.js"></script> | <script type="text/javascript" src="/scripts/modernizr-2.6.2.custom.js"></script> |
<script type="text/javascript" src="/scripts/ckanext_ga_reports.js?1"></script> | <script type="text/javascript" src="/scripts/ckanext_ga_reports.js?1"></script> |
<script type="text/javascript" src="/scripts/vendor/jquery.sparkline.modified.js"></script> | <script type="text/javascript" src="/scripts/vendor/jquery.sparkline.modified.js"></script> |
<script type="text/javascript" src="/scripts/rickshaw_ie7_shim.js"></script> | <script type="text/javascript" src="/scripts/rickshaw_ie7_shim.js"></script> |
<script type="text/javascript" src="/scripts/vendor/d3.v2.js"></script> | <script type="text/javascript" src="/scripts/vendor/d3.v2.js"></script> |
<script type="text/javascript" src="/scripts/vendor/d3.layout.min.js"></script> | <script type="text/javascript" src="/scripts/vendor/d3.layout.min.js"></script> |
<script type="text/javascript" src="/scripts/vendor/rickshaw.min.js"></script> | <script type="text/javascript" src="/scripts/vendor/rickshaw.min.js"></script> |
</py:def> | </py:def> |
<py:def path="sidebar"> | <py:def function="sidebar"> |
${ga_sidebar(download_link=h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport',action='dataset_csv',id=c.publisher_name or 'all',month=c.month or 'all'))} | ${ga_sidebar(download_link=h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport',action='dataset_csv',id=c.publisher_name or 'all',month=c.month or 'all'))} |
</py:def> | </py:def> |
<py:def function="page_heading">Site Usage ${usage_nav('Datasets')}</py:def> | <py:def function="page_heading">Site Usage ${usage_nav('Datasets')}</py:def> |
<div py:match="content"> | <div py:match="content"> |
<div class="boxed"> | <div class="boxed"> |
<h3 py:if="c.publisher"><a href="${h.url_for(controller='ckanext.dgu.controllers.publisher:PublisherController',action='read',id=c.publisher.name)}">${c.publisher.title}</a></h3> | <h3 py:if="c.publisher"><a href="${h.url_for(controller='ckanext.dgu.controllers.publisher:PublisherController',action='read',id=c.publisher.name)}">${c.publisher.title}</a></h3> |
<py:if test="c.graph_data"> | <py:if test="c.graph_data"> |
${rickshaw_graph(c.graph_data,'dataset-downloads',debug=True)} | ${rickshaw_graph(c.graph_data,'dataset-downloads',debug=True)} |
</py:if> | </py:if> |
<form class="form-inline" action="${h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport',action='read')}" method="get"> | <form class="form-inline" action="${h.url_for(controller='ckanext.ga_report.controller:GaDatasetReport',action='read')}" method="get"> |
<div class="controls"> | <div class="controls"> |
${month_selector(c.month, c.months, c.day)} | ${month_selector(c.month, c.months, c.day)} |
<select name="publisher"> | <select name="publisher"> |
<option value='' py:attrs="{'selected': 'selected' if not c.publisher else None}">All publishers</option> | <option value='' py:attrs="{'selected': 'selected' if not c.publisher else None}">All publishers</option> |
<py:for each="val,desc in c.publishers"> | <py:for each="val,desc in c.publishers"> |
<option value='${val}' py:attrs="{'selected': 'selected' if c.publisher_name == val else None}">${desc}</option> | <option value='${val}' py:attrs="{'selected': 'selected' if c.publisher_name == val else None}">${desc}</option> |
</py:for> | </py:for> |
</select> | </select> |
<input class="btn button btn-primary" type='submit' value="Update"/> | <input class="btn button btn-primary" type='submit' value="Update"/> |
</div> | </div> |
</form> | </form> |
<py:if test="c.month"> | <py:if test="c.month"> |
<h4>Statistics for ${h.month_option_title(c.month,c.months,c.day)}:</h4> | <h4>Statistics for ${h.month_option_title(c.month,c.months,c.day)}:</h4> |
</py:if> | </py:if> |
<py:if test="not c.month"> | <py:if test="not c.month"> |
<h4>Statistics for all months:</h4> | <h4>Statistics for all months:</h4> |
</py:if> | </py:if> |
<div class="alert alert-info" py:if="not c.top_packages">No page views in this period.</div> | <div class="alert alert-info" py:if="not c.top_packages">No page views in this period.</div> |
<py:if test="c.top_packages"> | <py:if test="c.top_packages"> |
<table class="ga-reports-table table table-condensed table-bordered table-striped"> | <table class="ga-reports-table table table-condensed table-bordered table-striped"> |
<tr> | <tr> |
<th>Dataset</th> | <th>Dataset</th> |
<th>Views</th> | <th>Views</th> |
<th>Downloads</th> | <th>Downloads</th> |
</tr> | </tr> |
<py:for each="package, views, visits,downloads in c.top_packages"> | <py:for each="package, views, visits,downloads in c.top_packages"> |
<tr> | <tr> |
<td> | <td> |
${h.link_to(package.title or package.name, h.url_for(controller='package', action='read', id=package.name))} | ${h.link_to(package.title or package.name, h.url_for(controller='package', action='read', id=package.name))} |
</td> | </td> |
<td class="td-numeric">${views}</td> | <td class="td-numeric">${views}</td> |
<td class="td-numeric">${downloads}</td> | <td class="td-numeric">${downloads}</td> |
</tr> | </tr> |
</py:for> | </py:for> |
</table> | </table> |
</py:if> | </py:if> |
</div> | </div> |
</div> | </div> |
<xi:include href="../../layout.html" /> | <xi:include href="../../layout.html" /> |
</html> | </html> |