import logging | import logging |
import datetime | import datetime |
import os | import os |
from pylons import config | from pylons import config |
from ckan.lib.cli import CkanCommand | from ckan.lib.cli import CkanCommand |
# No other CKAN imports allowed until _load_config is run, | # No other CKAN imports allowed until _load_config is run, |
# or logging is disabled | # or logging is disabled |
class InitDB(CkanCommand): | class InitDB(CkanCommand): |
"""Initialise the extension's database tables | """Initialise the extension's database tables |
""" | """ |
summary = __doc__.split('\n')[0] | summary = __doc__.split('\n')[0] |
usage = __doc__ | usage = __doc__ |
max_args = 0 | max_args = 0 |
min_args = 0 | min_args = 0 |
def command(self): | def command(self): |
self._load_config() | self._load_config() |
import ckan.model as model | import ckan.model as model |
model.Session.remove() | model.Session.remove() |
model.Session.configure(bind=model.meta.engine) | model.Session.configure(bind=model.meta.engine) |
log = logging.getLogger('ckanext.ga-report') | log = logging.getLogger('ckanext.ga_report') |
import ga_model | import ga_model |
ga_model.init_tables() | ga_model.init_tables() |
log.info("DB tables are setup") | log.info("DB tables are setup") |
class GetAuthToken(CkanCommand): | class GetAuthToken(CkanCommand): |
""" Get's the Google auth token | """ Get's the Google auth token |
Usage: paster getauthtoken <credentials_file> | Usage: paster getauthtoken <credentials_file> |
Where <credentials_file> is the file name containing the details | Where <credentials_file> is the file name containing the details |
for the service (obtained from https://code.google.com/apis/console). | for the service (obtained from https://code.google.com/apis/console). |
By default this is set to credentials.json | By default this is set to credentials.json |
""" | """ |
summary = __doc__.split('\n')[0] | summary = __doc__.split('\n')[0] |
usage = __doc__ | usage = __doc__ |
max_args = 0 | max_args = 0 |
min_args = 0 | min_args = 0 |
def command(self): | def command(self): |
""" | """ |
In this case we don't want a valid service, but rather just to | In this case we don't want a valid service, but rather just to |
force the user through the auth flow. We allow this to complete to | force the user through the auth flow. We allow this to complete to |
act as a form of verification instead of just getting the token and | act as a form of verification instead of just getting the token and |
assuming it is correct. | assuming it is correct. |
""" | """ |
from ga_auth import init_service | from ga_auth import init_service |
init_service('token.dat', | init_service('token.dat', |
self.args[0] if self.args | self.args[0] if self.args |
else 'credentials.json') | else 'credentials.json') |
class FixTimePeriods(CkanCommand): | |
""" | |
Fixes the 'All' records for GA_Urls | |
It is possible that older urls that haven't recently been visited | |
do not have All records. This command will traverse through those | |
records and generate valid All records for them. | |
""" | |
summary = __doc__.split('\n')[0] | |
usage = __doc__ | |
max_args = 0 | |
min_args = 0 | |
def __init__(self, name): | |
super(FixTimePeriods, self).__init__(name) | |
def command(self): | |
import ckan.model as model | |
from ga_model import post_update_url_stats | |
self._load_config() | |
model.Session.remove() | |
model.Session.configure(bind=model.meta.engine) | |
log = logging.getLogger('ckanext.ga_report') | |
log.info("Updating 'All' records for old URLs") | |
post_update_url_stats() | |
log.info("Processing complete") | |
class LoadAnalytics(CkanCommand): | class LoadAnalytics(CkanCommand): |
"""Get data from Google Analytics API and save it | """Get data from Google Analytics API and save it |
in the ga_model | in the ga_model |
Usage: paster loadanalytics <time-period> | Usage: paster loadanalytics <time-period> |
Where <time-period> is: | Where <time-period> is: |
all - data for all time | all - data for all time |
latest - (default) just the 'latest' data | latest - (default) just the 'latest' data |
YYYY-MM - just data for the specific month | YYYY-MM - just data for the specific month |
""" | """ |
summary = __doc__.split('\n')[0] | summary = __doc__.split('\n')[0] |
usage = __doc__ | usage = __doc__ |
max_args = 1 | max_args = 1 |
min_args = 0 | min_args = 0 |
def __init__(self, name): | def __init__(self, name): |
super(LoadAnalytics, self).__init__(name) | super(LoadAnalytics, self).__init__(name) |
self.parser.add_option('-d', '--delete-first', | self.parser.add_option('-d', '--delete-first', |
action='store_true', | action='store_true', |
default=False, | default=False, |
dest='delete_first', | dest='delete_first', |
help='Delete data for the period first') | help='Delete data for the period first') |
self.parser.add_option('-s', '--skip_url_stats', | self.parser.add_option('-s', '--skip_url_stats', |
action='store_true', | action='store_true', |
default=False, | default=False, |
dest='skip_url_stats', | dest='skip_url_stats', |
help='Skip the download of URL data - just do site-wide stats') | help='Skip the download of URL data - just do site-wide stats') |
self.token = "" | |
def command(self): | def command(self): |
self._load_config() | self._load_config() |
from download_analytics import DownloadAnalytics | from download_analytics import DownloadAnalytics |
from ga_auth import (init_service, get_profile_id) | from ga_auth import (init_service, get_profile_id) |
ga_token_filepath = os.path.expanduser(config.get('googleanalytics.token.filepath', '')) | ga_token_filepath = os.path.expanduser(config.get('googleanalytics.token.filepath', '')) |
if not ga_token_filepath: | if not ga_token_filepath: |
print 'ERROR: In the CKAN config you need to specify the filepath of the ' \ | print 'ERROR: In the CKAN config you need to specify the filepath of the ' \ |
'Google Analytics token file under key: googleanalytics.token.filepath' | 'Google Analytics token file under key: googleanalytics.token.filepath' |
return | return |
try: | try: |
svc = init_service(ga_token_filepath, None) | self.token, svc = init_service(ga_token_filepath, None) |
except TypeError: | except TypeError: |
print ('Have you correctly run the getauthtoken task and ' | print ('Have you correctly run the getauthtoken task and ' |
'specified the correct token file in the CKAN config under ' | 'specified the correct token file in the CKAN config under ' |
'"googleanalytics.token.filepath"?') | '"googleanalytics.token.filepath"?') |
return | return |
downloader = DownloadAnalytics(svc, profile_id=get_profile_id(svc), | downloader = DownloadAnalytics(svc, self.token, profile_id=get_profile_id(svc), |
delete_first=self.options.delete_first, | delete_first=self.options.delete_first, |
skip_url_stats=self.options.skip_url_stats) | skip_url_stats=self.options.skip_url_stats) |
time_period = self.args[0] if self.args else 'latest' | time_period = self.args[0] if self.args else 'latest' |
if time_period == 'all': | if time_period == 'all': |
downloader.all_() | downloader.all_() |
elif time_period == 'latest': | elif time_period == 'latest': |
downloader.latest() | downloader.latest() |
else: | else: |
# The month to use | # The month to use |
for_date = datetime.datetime.strptime(time_period, '%Y-%m') | for_date = datetime.datetime.strptime(time_period, '%Y-%m') |
downloader.specific_month(for_date) | downloader.specific_month(for_date) |
import re | import re |
import csv | import csv |
import sys | import sys |
import json | |
import logging | import logging |
import operator | import operator |
import collections | import collections |
from ckan.lib.base import (BaseController, c, g, render, request, response, abort) | from ckan.lib.base import (BaseController, c, g, render, request, response, abort) |
import sqlalchemy | import sqlalchemy |
from sqlalchemy import func, cast, Integer | from sqlalchemy import func, cast, Integer |
import ckan.model as model | import ckan.model as model |
from ga_model import GA_Url, GA_Stat, GA_ReferralStat, GA_Publisher | from ga_model import GA_Url, GA_Stat, GA_ReferralStat, GA_Publisher |
log = logging.getLogger('ckanext.ga-report') | log = logging.getLogger('ckanext.ga-report') |
DOWNLOADS_AVAILABLE_FROM = '2012-12' | |
def _get_month_name(strdate): | def _get_month_name(strdate): |
import calendar | import calendar |
from time import strptime | from time import strptime |
d = strptime(strdate, '%Y-%m') | d = strptime(strdate, '%Y-%m') |
return '%s %s' % (calendar.month_name[d.tm_mon], d.tm_year) | return '%s %s' % (calendar.month_name[d.tm_mon], d.tm_year) |
def _get_unix_epoch(strdate): | |
from time import strptime,mktime | |
d = strptime(strdate, '%Y-%m') | |
return int(mktime(d)) | |
def _month_details(cls, stat_key=None): | def _month_details(cls, stat_key=None): |
''' | ''' |
Returns a list of all the periods for which we have data, unfortunately | Returns a list of all the periods for which we have data, unfortunately |
knows too much about the type of the cls being passed as GA_Url has a | knows too much about the type of the cls being passed as GA_Url has a |
more complex query | more complex query |
This may need extending if we add a period_name to the stats | This may need extending if we add a period_name to the stats |
''' | ''' |
months = [] | months = [] |
day = None | day = None |
q = model.Session.query(cls.period_name,cls.period_complete_day)\ | q = model.Session.query(cls.period_name,cls.period_complete_day)\ |
.filter(cls.period_name!='All').distinct(cls.period_name) | .filter(cls.period_name!='All').distinct(cls.period_name) |
if stat_key: | if stat_key: |
q= q.filter(cls.stat_name==stat_key) | q= q.filter(cls.stat_name==stat_key) |
vals = q.order_by("period_name desc").all() | vals = q.order_by("period_name desc").all() |
if vals and vals[0][1]: | if vals and vals[0][1]: |
day = int(vals[0][1]) | day = int(vals[0][1]) |
ordinal = 'th' if 11 <= day <= 13 \ | ordinal = 'th' if 11 <= day <= 13 \ |
else {1:'st',2:'nd',3:'rd'}.get(day % 10, 'th') | else {1:'st',2:'nd',3:'rd'}.get(day % 10, 'th') |
day = "{day}{ordinal}".format(day=day, ordinal=ordinal) | day = "{day}{ordinal}".format(day=day, ordinal=ordinal) |
for m in vals: | for m in vals: |
months.append( (m[0], _get_month_name(m[0]))) | months.append( (m[0], _get_month_name(m[0]))) |
return months, day | return months, day |
class GaReport(BaseController): | class GaReport(BaseController): |
def csv(self, month): | def csv(self, month): |
import csv | import csv |
q = model.Session.query(GA_Stat).filter(GA_Stat.stat_name!='Downloads') | q = model.Session.query(GA_Stat).filter(GA_Stat.stat_name!='Downloads') |
if month != 'all': | if month != 'all': |
q = q.filter(GA_Stat.period_name==month) | q = q.filter(GA_Stat.period_name==month) |
entries = q.order_by('GA_Stat.period_name, GA_Stat.stat_name, GA_Stat.key').all() | entries = q.order_by('GA_Stat.period_name, GA_Stat.stat_name, GA_Stat.key').all() |
response.headers['Content-Type'] = "text/csv; charset=utf-8" | response.headers['Content-Type'] = "text/csv; charset=utf-8" |
response.headers['Content-Disposition'] = str('attachment; filename=stats_%s.csv' % (month,)) | response.headers['Content-Disposition'] = str('attachment; filename=stats_%s.csv' % (month,)) |
writer = csv.writer(response) | writer = csv.writer(response) |
writer.writerow(["Period", "Statistic", "Key", "Value"]) | writer.writerow(["Period", "Statistic", "Key", "Value"]) |
for entry in entries: | for entry in entries: |
writer.writerow([entry.period_name.encode('utf-8'), | writer.writerow([entry.period_name.encode('utf-8'), |
entry.stat_name.encode('utf-8'), | entry.stat_name.encode('utf-8'), |
entry.key.encode('utf-8'), | |
entry.value.encode('utf-8')]) | |
def csv_downloads(self, month): | |
import csv | |
q = model.Session.query(GA_Stat).filter(GA_Stat.stat_name=='Downloads') | |
if month != 'all': | |
q = q.filter(GA_Stat.period_name==month) | |
entries = q.order_by('GA_Stat.period_name, GA_Stat.key').all() | |
response.headers['Content-Type'] = "text/csv; charset=utf-8" | |
response.headers['Content-Disposition'] = str('attachment; filename=downloads_%s.csv' % (month,)) | |
writer = csv.writer(response) | |
writer.writerow(["Period", "Resource URL", "Count"]) | |
for entry in entries: | |
writer.writerow([entry.period_name.encode('utf-8'), | |
entry.key.encode('utf-8'), | entry.key.encode('utf-8'), |
entry.value.encode('utf-8')]) | entry.value.encode('utf-8')]) |
def index(self): | def index(self): |
# Get the month details by fetching distinct values and determining the | # Get the month details by fetching distinct values and determining the |
# month names from the values. | # month names from the values. |
c.months, c.day = _month_details(GA_Stat) | c.months, c.day = _month_details(GA_Stat) |
# Work out which month to show, based on query params of the first item | # Work out which month to show, based on query params of the first item |
c.month_desc = 'all months' | c.month_desc = 'all months' |
c.month = request.params.get('month', '') | c.month = request.params.get('month', '') |
if c.month: | if c.month: |
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) | c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) |
q = model.Session.query(GA_Stat).\ | q = model.Session.query(GA_Stat).\ |
filter(GA_Stat.stat_name=='Totals') | filter(GA_Stat.stat_name=='Totals') |
if c.month: | if c.month: |
q = q.filter(GA_Stat.period_name==c.month) | q = q.filter(GA_Stat.period_name==c.month) |
entries = q.order_by('ga_stat.key').all() | entries = q.order_by('ga_stat.key').all() |
def clean_key(key, val): | def clean_key(key, val): |
if key in ['Average time on site', 'Pages per visit', 'New visits', 'Bounce rate (home page)']: | if key in ['Average time on site', 'Pages per visit', 'New visits', 'Bounce rate (home page)']: |
val = "%.2f" % round(float(val), 2) | val = "%.2f" % round(float(val), 2) |
if key == 'Average time on site': | if key == 'Average time on site': |
mins, secs = divmod(float(val), 60) | mins, secs = divmod(float(val), 60) |
hours, mins = divmod(mins, 60) | hours, mins = divmod(mins, 60) |
val = '%02d:%02d:%02d (%s seconds) ' % (hours, mins, secs, val) | val = '%02d:%02d:%02d (%s seconds) ' % (hours, mins, secs, val) |
if key in ['New visits','Bounce rate (home page)']: | if key in ['New visits','Bounce rate (home page)']: |
val = "%s%%" % val | val = "%s%%" % val |
if key in ['Total page views', 'Total visits']: | if key in ['Total page views', 'Total visits']: |
val = int(val) | val = int(val) |
return key, val | return key, val |
# Query historic values for sparkline rendering | |
sparkline_query = model.Session.query(GA_Stat)\ | |
.filter(GA_Stat.stat_name=='Totals')\ | |
.order_by(GA_Stat.period_name) | |
sparkline_data = {} | |
for x in sparkline_query: | |
sparkline_data[x.key] = sparkline_data.get(x.key,[]) | |
key, val = clean_key(x.key,float(x.value)) | |
tooltip = '%s: %s' % (_get_month_name(x.period_name), val) | |
sparkline_data[x.key].append( (tooltip,x.value) ) | |
# Trim the latest month, as it looks like a huge dropoff | |
for key in sparkline_data: | |
sparkline_data[key] = sparkline_data[key][:-1] | |
c.global_totals = [] | c.global_totals = [] |
if c.month: | if c.month: |
for e in entries: | for e in entries: |
key, val = clean_key(e.key, e.value) | key, val = clean_key(e.key, e.value) |
c.global_totals.append((key, val)) | sparkline = sparkline_data[e.key] |
c.global_totals.append((key, val, sparkline)) | |
else: | else: |
d = collections.defaultdict(list) | d = collections.defaultdict(list) |
for e in entries: | for e in entries: |
d[e.key].append(float(e.value)) | d[e.key].append(float(e.value)) |
for k, v in d.iteritems(): | for k, v in d.iteritems(): |
if k in ['Total page views', 'Total visits']: | if k in ['Total page views', 'Total visits']: |
v = sum(v) | v = sum(v) |
else: | else: |
v = float(sum(v))/float(len(v)) | v = float(sum(v))/float(len(v)) |
sparkline = sparkline_data[k] | |
key, val = clean_key(k,v) | key, val = clean_key(k,v) |
c.global_totals.append((key, val)) | c.global_totals.append((key, val, sparkline)) |
c.global_totals = sorted(c.global_totals, key=operator.itemgetter(0)) | # Sort the global totals into a more pleasant order |
def sort_func(x): | |
key = x[0] | |
total_order = ['Total page views','Total visits','Pages per visit'] | |
if key in total_order: | |
return total_order.index(key) | |
return 999 | |
c.global_totals = sorted(c.global_totals, key=sort_func) | |
keys = { | keys = { |
'Browser versions': 'browser_versions', | 'Browser versions': 'browser_versions', |
'Browsers': 'browsers', | 'Browsers': 'browsers', |
'Operating Systems versions': 'os_versions', | 'Operating Systems versions': 'os_versions', |
'Operating Systems': 'os', | 'Operating Systems': 'os', |
'Social sources': 'social_networks', | 'Social sources': 'social_networks', |
'Languages': 'languages', | 'Languages': 'languages', |
'Country': 'country' | 'Country': 'country' |
} | } |
def shorten_name(name, length=60): | def shorten_name(name, length=60): |
return (name[:length] + '..') if len(name) > 60 else name | return (name[:length] + '..') if len(name) > 60 else name |
def fill_out_url(url): | def fill_out_url(url): |
import urlparse | import urlparse |
return urlparse.urljoin(g.site_url, url) | return urlparse.urljoin(g.site_url, url) |
c.social_referrer_totals, c.social_referrers = [], [] | c.social_referrer_totals, c.social_referrers = [], [] |
q = model.Session.query(GA_ReferralStat) | q = model.Session.query(GA_ReferralStat) |
q = q.filter(GA_ReferralStat.period_name==c.month) if c.month else q | q = q.filter(GA_ReferralStat.period_name==c.month) if c.month else q |
q = q.order_by('ga_referrer.count::int desc') | q = q.order_by('ga_referrer.count::int desc') |
for entry in q.all(): | for entry in q.all(): |
c.social_referrers.append((shorten_name(entry.url), fill_out_url(entry.url), | c.social_referrers.append((shorten_name(entry.url), fill_out_url(entry.url), |
entry.source,entry.count)) | entry.source,entry.count)) |
q = model.Session.query(GA_ReferralStat.url, | q = model.Session.query(GA_ReferralStat.url, |
func.sum(GA_ReferralStat.count).label('count')) | func.sum(GA_ReferralStat.count).label('count')) |
q = q.filter(GA_ReferralStat.period_name==c.month) if c.month else q | q = q.filter(GA_ReferralStat.period_name==c.month) if c.month else q |
q = q.order_by('count desc').group_by(GA_ReferralStat.url) | q = q.order_by('count desc').group_by(GA_ReferralStat.url) |
for entry in q.all(): | for entry in q.all(): |
c.social_referrer_totals.append((shorten_name(entry[0]), fill_out_url(entry[0]),'', | c.social_referrer_totals.append((shorten_name(entry[0]), fill_out_url(entry[0]),'', |
entry[1])) | entry[1])) |
for k, v in keys.iteritems(): | for k, v in keys.iteritems(): |
q = model.Session.query(GA_Stat).\ | q = model.Session.query(GA_Stat).\ |
filter(GA_Stat.stat_name==k) | filter(GA_Stat.stat_name==k).\ |
order_by(GA_Stat.period_name) | |
# Buffer the tabular data | |
if c.month: | if c.month: |
entries = [] | entries = [] |
q = q.filter(GA_Stat.period_name==c.month).\ | q = q.filter(GA_Stat.period_name==c.month).\ |
order_by('ga_stat.value::int desc') | order_by('ga_stat.value::int desc') |
d = collections.defaultdict(int) | d = collections.defaultdict(int) |
for e in q.all(): | for e in q.all(): |
d[e.key] += int(e.value) | d[e.key] += int(e.value) |
entries = [] | entries = [] |
for key, val in d.iteritems(): | for key, val in d.iteritems(): |
entries.append((key,val,)) | entries.append((key,val,)) |
entries = sorted(entries, key=operator.itemgetter(1), reverse=True) | entries = sorted(entries, key=operator.itemgetter(1), reverse=True) |
# Run a query on all months to gather graph data | |
graph_query = model.Session.query(GA_Stat).\ | |
filter(GA_Stat.stat_name==k).\ | |
order_by(GA_Stat.period_name) | |
graph_dict = {} | |
for stat in graph_query: | |
graph_dict[ stat.key ] = graph_dict.get(stat.key,{ | |
'name':stat.key, | |
'raw': {} | |
}) | |
graph_dict[ stat.key ]['raw'][stat.period_name] = float(stat.value) | |
stats_in_table = [x[0] for x in entries] | |
stats_not_in_table = set(graph_dict.keys()) - set(stats_in_table) | |
stats = stats_in_table + sorted(list(stats_not_in_table)) | |
graph = [graph_dict[x] for x in stats] | |
setattr(c, v+'_graph', json.dumps( _to_rickshaw(graph,percentageMode=True) )) | |
# Get the total for each set of values and then set the value as | # Get the total for each set of values and then set the value as |
# a percentage of the total | # a percentage of the total |
if k == 'Social sources': | if k == 'Social sources': |
total = sum([x for n,x in c.global_totals if n == 'Total visits']) | total = sum([x for n,x,graph in c.global_totals if n == 'Total visits']) |
else: | else: |
total = sum([num for _,num in entries]) | total = sum([num for _,num in entries]) |
setattr(c, v, [(k,_percent(v,total)) for k,v in entries ]) | setattr(c, v, [(k,_percent(v,total)) for k,v in entries ]) |
return render('ga_report/site/index.html') | return render('ga_report/site/index.html') |
def downloads(self): | |
# Get the month details by fetching distinct values and determining the | |
# month names from the values. | |
c.months, c.day = _month_details(GA_Stat, "Downloads") | |
# Work out which month to show, based on query params of the first item | |
c.month_desc = 'all months' | |
c.month = request.params.get('month', '') | |
if c.month: | |
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) | |
c.downloads = [] | |
q = model.Session.query(GA_Stat).filter(GA_Stat.stat_name=='Downloads') | |
q = q.filter(GA_Stat.period_name==c.month) if c.month else q | |
q = q.order_by("ga_stat.value::int desc") | |
data = collections.defaultdict(int) | |
for entry in q.all(): | |
r = model.Session.query(model.Resource).filter(model.Resource.url==entry.key).first() | |
if not r: | |
continue | |
data[r] += int(entry.value) | |
c.downloads = [(k,v,) for k,v in data.iteritems()] | |
c.downloads = sorted(c.downloads, key=operator.itemgetter(1), reverse=True) | |
return render('ga_report/site/downloads.html') | |
class GaDatasetReport(BaseController): | class GaDatasetReport(BaseController): |
""" | """ |
Displays the pageview and visit count for datasets | Displays the pageview and visit count for datasets |
with options to filter by publisher and time period. | with options to filter by publisher and time period. |
""" | """ |
def publisher_csv(self, month): | def publisher_csv(self, month): |
''' | ''' |
Returns a CSV of each publisher with the total number of dataset | Returns a CSV of each publisher with the total number of dataset |
views & visits. | views & visits. |
''' | ''' |
c.month = month if not month == 'all' else '' | c.month = month if not month == 'all' else '' |
response.headers['Content-Type'] = "text/csv; charset=utf-8" | response.headers['Content-Type'] = "text/csv; charset=utf-8" |
response.headers['Content-Disposition'] = str('attachment; filename=publishers_%s.csv' % (month,)) | response.headers['Content-Disposition'] = str('attachment; filename=publishers_%s.csv' % (month,)) |
writer = csv.writer(response) | writer = csv.writer(response) |
writer.writerow(["Publisher Title", "Publisher Name", "Views", "Visits", "Period Name"]) | writer.writerow(["Publisher Title", "Publisher Name", "Views", "Visits", "Period Name"]) |
for publisher,view,visit in _get_top_publishers(None): | top_publishers = _get_top_publishers(limit=None) |
for publisher,view,visit in top_publishers: | |
writer.writerow([publisher.title.encode('utf-8'), | writer.writerow([publisher.title.encode('utf-8'), |
publisher.name.encode('utf-8'), | publisher.name.encode('utf-8'), |
view, | view, |
visit, | visit, |
month]) | month]) |
def dataset_csv(self, id='all', month='all'): | def dataset_csv(self, id='all', month='all'): |
''' | ''' |
Returns a CSV with the number of views & visits for each dataset. | Returns a CSV with the number of views & visits for each dataset. |
:param id: A Publisher ID or None if you want for all | :param id: A Publisher ID or None if you want for all |
:param month: The time period, or 'all' | :param month: The time period, or 'all' |
''' | ''' |
c.month = month if not month == 'all' else '' | c.month = month if not month == 'all' else '' |
if id != 'all': | if id != 'all': |
c.publisher = model.Group.get(id) | c.publisher = model.Group.get(id) |
if not c.publisher: | if not c.publisher: |
abort(404, 'A publisher with that name could not be found') | abort(404, 'A publisher with that name could not be found') |
packages = self._get_packages(c.publisher) | packages = self._get_packages(publisher=c.publisher, month=c.month) |
response.headers['Content-Type'] = "text/csv; charset=utf-8" | response.headers['Content-Type'] = "text/csv; charset=utf-8" |
response.headers['Content-Disposition'] = \ | response.headers['Content-Disposition'] = \ |
str('attachment; filename=datasets_%s_%s.csv' % (c.publisher_name, month,)) | str('attachment; filename=datasets_%s_%s.csv' % (c.publisher_name, month,)) |
writer = csv.writer(response) | writer = csv.writer(response) |
writer.writerow(["Dataset Title", "Dataset Name", "Views", "Visits", "Period Name"]) | writer.writerow(["Dataset Title", "Dataset Name", "Views", "Visits", "Resource downloads", "Period Name"]) |
for package,view,visit in packages: | for package,view,visit,downloads in packages: |
writer.writerow([package.title.encode('utf-8'), | writer.writerow([package.title.encode('utf-8'), |
package.name.encode('utf-8'), | package.name.encode('utf-8'), |
view, | view, |
visit, | visit, |
downloads, | |
month]) | month]) |
def publishers(self): | def publishers(self): |
'''A list of publishers and the number of views/visits for each''' | '''A list of publishers and the number of views/visits for each''' |
# Get the month details by fetching distinct values and determining the | # Get the month details by fetching distinct values and determining the |
# month names from the values. | # month names from the values. |
c.months, c.day = _month_details(GA_Url) | c.months, c.day = _month_details(GA_Url) |
# Work out which month to show, based on query params of the first item | # Work out which month to show, based on query params of the first item |
c.month = request.params.get('month', '') | c.month = request.params.get('month', '') |
c.month_desc = 'all months' | c.month_desc = 'all months' |
if c.month: | if c.month: |
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) | c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) |
c.top_publishers = _get_top_publishers() | c.top_publishers = _get_top_publishers() |
return render('ga_report/publisher/index.html') | graph_data = _get_top_publishers_graph() |
c.top_publishers_graph = json.dumps( _to_rickshaw(graph_data) ) | |
def _get_packages(self, publisher=None, count=-1): | |
x = render('ga_report/publisher/index.html') | |
return x | |
def _get_packages(self, publisher=None, month='', count=-1): | |
'''Returns the datasets in order of views''' | '''Returns the datasets in order of views''' |
if count == -1: | have_download_data = True |
count = sys.maxint | month = month or 'All' |
if month != 'All': | |
month = c.month or 'All' | have_download_data = month >= DOWNLOADS_AVAILABLE_FROM |
q = model.Session.query(GA_Url,model.Package)\ | q = model.Session.query(GA_Url,model.Package)\ |
.filter(model.Package.name==GA_Url.package_id)\ | .filter(model.Package.name==GA_Url.package_id)\ |
.filter(GA_Url.url.like('/dataset/%')) | .filter(GA_Url.url.like('/dataset/%')) |
if publisher: | if publisher: |
q = q.filter(GA_Url.department_id==publisher.name) | q = q.filter(GA_Url.department_id==publisher.name) |
q = q.filter(GA_Url.period_name==month) | q = q.filter(GA_Url.period_name==month) |
q = q.order_by('ga_url.pageviews::int desc') | q = q.order_by('ga_url.pageviews::int desc') |
top_packages = [] | top_packages = [] |
for entry,package in q.limit(count): | if count == -1: |
entries = q.all() | |
else: | |
entries = q.limit(count) | |
for entry,package in entries: | |
if package: | if package: |
top_packages.append((package, entry.pageviews, entry.visits)) | # Downloads .... |
if have_download_data: | |
dls = model.Session.query(GA_Stat).\ | |
filter(GA_Stat.stat_name=='Downloads').\ | |
filter(GA_Stat.key==package.name) | |
if month != 'All': # Fetch everything unless the month is specific | |
dls = dls.filter(GA_Stat.period_name==month) | |
downloads = 0 | |
for x in dls: | |
downloads += int(x.value) | |
else: | |
downloads = 'No data' | |
top_packages.append((package, entry.pageviews, entry.visits, downloads)) | |
else: | else: |
log.warning('Could not find package associated package') | log.warning('Could not find package associated package') |
return top_packages | return top_packages |
def read(self): | def read(self): |
''' | ''' |
Lists the most popular datasets across all publishers | Lists the most popular datasets across all publishers |
''' | ''' |
return self.read_publisher(None) | return self.read_publisher(None) |
def read_publisher(self, id): | def read_publisher(self, id): |
''' | ''' |
Lists the most popular datasets for a publisher (or across all publishers) | Lists the most popular datasets for a publisher (or across all publishers) |
''' | ''' |
count = 20 | count = 20 |
c.publishers = _get_publishers() | c.publishers = _get_publishers() |
id = request.params.get('publisher', id) | id = request.params.get('publisher', id) |
if id and id != 'all': | if id and id != 'all': |
c.publisher = model.Group.get(id) | c.publisher = model.Group.get(id) |
if not c.publisher: | if not c.publisher: |
abort(404, 'A publisher with that name could not be found') | abort(404, 'A publisher with that name could not be found') |
c.publisher_name = c.publisher.name | c.publisher_name = c.publisher.name |
c.top_packages = [] # package, dataset_views in c.top_packages | c.top_packages = [] # package, dataset_views in c.top_packages |
# Get the month details by fetching distinct values and determining the | # Get the month details by fetching distinct values and determining the |
# month names from the values. | # month names from the values. |
c.months, c.day = _month_details(GA_Url) | c.months, c.day = _month_details(GA_Url) |
# Work out which month to show, based on query params of the first item | # Work out which month to show, based on query params of the first item |
c.month = request.params.get('month', '') | c.month = request.params.get('month', '') |
if not c.month: | if not c.month: |
c.month_desc = 'all months' | c.month_desc = 'all months' |
else: | else: |
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) | c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month]) |
month = c.month or 'All' | month = c.month or 'All' |
c.publisher_page_views = 0 | c.publisher_page_views = 0 |
q = model.Session.query(GA_Url).\ | q = model.Session.query(GA_Url).\ |
filter(GA_Url.url=='/publisher/%s' % c.publisher_name) | filter(GA_Url.url=='/publisher/%s' % c.publisher_name) |
entry = q.filter(GA_Url.period_name==c.month).first() | entry = q.filter(GA_Url.period_name==c.month).first() |
c.publisher_page_views = entry.pageviews if entry else 0 | c.publisher_page_views = entry.pageviews if entry else 0 |
c.top_packages = self._get_packages(c.publisher, 20) | c.top_packages = self._get_packages(publisher=c.publisher, count=20, month=c.month) |
# Graph query | |
top_packages_all_time = self._get_packages(publisher=c.publisher, count=20, month='All') | |
top_package_names = [ x[0].name for x in top_packages_all_time ] | |
graph_query = model.Session.query(GA_Url,model.Package)\ | |
.filter(model.Package.name==GA_Url.package_id)\ | |
.filter(GA_Url.url.like('/dataset/%'))\ | |
.filter(GA_Url.package_id.in_(top_package_names)) | |
all_series = {} | |
for entry,package in graph_query: | |
if not package: continue | |
if entry.period_name=='All': continue | |
all_series[package.name] = all_series.get(package.name,{ | |
'name':package.title, | |
'raw': {} | |
}) | |
all_series[package.name]['raw'][entry.period_name] = int(entry.pageviews) | |
graph = [ all_series[series_name] for series_name in top_package_names ] | |
c.graph_data = json.dumps( _to_rickshaw(graph) ) | |
return render('ga_report/publisher/read.html') | return render('ga_report/publisher/read.html') |
def _to_rickshaw(data, percentageMode=False): | |
if data==[]: | |
return data | |
# x-axis is every month in c.months. Note that data might not exist | |
# for entire history, eg. for recently-added datasets | |
x_axis = [x[0] for x in c.months] | |
x_axis.reverse() # Ascending order | |
x_axis = x_axis[:-1] # Remove latest month | |
totals = {} | |
for series in data: | |
series['data'] = [] | |
for x_string in x_axis: | |
x = _get_unix_epoch( x_string ) | |
y = series['raw'].get(x_string,0) | |
series['data'].append({'x':x,'y':y}) | |
totals[x] = totals.get(x,0)+y | |
if not percentageMode: | |
return data | |
# Turn all data into percentages | |
# Roll insignificant series into a catch-all | |
THRESHOLD = 1 | |
raw_data = data | |
data = [] | |
for series in raw_data: | |
for point in series['data']: | |
percentage = (100*float(point['y'])) / totals[point['x']] | |
if not (series in data) and percentage>THRESHOLD: | |
data.append(series) | |
point['y'] = percentage | |
others = [ x for x in raw_data if not (x in data) ] | |
if len(others): | |
data_other = [] | |
for i in range(len(x_axis)): | |
x = _get_unix_epoch(x_axis[i]) | |
y = 0 | |
for series in others: | |
y += series['data'][i]['y'] | |
data_other.append({'x':x,'y':y}) | |
data.append({ | |
'name':'Other', | |
'data': data_other | |
}) | |
return data | |
def _get_top_publishers(limit=20): | def _get_top_publishers(limit=20): |
''' | ''' |
Returns a list of the top 20 publishers by dataset visits. | Returns a list of the top 20 publishers by dataset visits. |
(The number to show can be varied with 'limit') | (The number to show can be varied with 'limit') |
''' | ''' |
month = c.month or 'All' | month = c.month or 'All' |
connection = model.Session.connection() | connection = model.Session.connection() |
q = """ | q = """ |
select department_id, sum(pageviews::int) views, sum(visits::int) visits | select department_id, sum(pageviews::int) views, sum(visits::int) visits |
from ga_url | from ga_url |
where department_id <> '' | where department_id <> '' |
and package_id <> '' | and package_id <> '' |
and url like '/dataset/%%' | and url like '/dataset/%%' |
and period_name=%s | and period_name=%s |
group by department_id order by views desc | group by department_id order by views desc |
""" | """ |
if limit: | if limit: |
q = q + " limit %s;" % (limit) | q = q + " limit %s;" % (limit) |
top_publishers = [] | top_publishers = [] |
res = connection.execute(q, month) | res = connection.execute(q, month) |
for row in res: | for row in res: |
g = model.Group.get(row[0]) | g = model.Group.get(row[0]) |
if g: | if g: |
top_publishers.append((g, row[1], row[2])) | top_publishers.append((g, row[1], row[2])) |
return top_publishers | return top_publishers |
def _get_top_publishers_graph(limit=20): | |
''' | |
Returns a list of the top 20 publishers by dataset visits. | |
(The number to show can be varied with 'limit') | |
''' | |
connection = model.Session.connection() | |
q = """ | |
select department_id, sum(pageviews::int) views | |
from ga_url | |
where department_id <> '' | |
and package_id <> '' | |
and url like '/dataset/%%' | |
and period_name='All' | |
group by department_id order by views desc | |
""" | |
if limit: | |
q = q + " limit %s;" % (limit) | |
res = connection.execute(q) | |
department_ids = [ row[0] for row in res ] | |
# Query for a history graph of these department ids | |
q = model.Session.query( | |
GA_Url.department_id, | |
GA_Url.period_name, | |
func.sum(cast(GA_Url.pageviews,sqlalchemy.types.INT)))\ | |
.filter( GA_Url.department_id.in_(department_ids) )\ | |
.filter( GA_Url.url.like('/dataset/%') )\ | |
.filter( GA_Url.package_id!='' )\ | |
.group_by( GA_Url.department_id, GA_Url.period_name ) | |
graph_dict = {} | |
for dept_id,period_name,views in q: | |
graph_dict[dept_id] = graph_dict.get( dept_id, { | |
'name' : model.Group.get(dept_id).title, | |
'raw' : {} | |
}) | |
graph_dict[dept_id]['raw'][period_name] = views | |
return [ graph_dict[id] for id in department_ids ] | |
def _get_publishers(): | def _get_publishers(): |
''' | ''' |
Returns a list of all publishers. Each item is a tuple: | Returns a list of all publishers. Each item is a tuple: |
(name, title) | (name, title) |
''' | ''' |
publishers = [] | publishers = [] |
for pub in model.Session.query(model.Group).\ | for pub in model.Session.query(model.Group).\ |
filter(model.Group.type=='publisher').\ | filter(model.Group.type=='organization').\ |
filter(model.Group.state=='active').\ | filter(model.Group.state=='active').\ |
order_by(model.Group.name): | order_by(model.Group.name): |
publishers.append((pub.name, pub.title)) | publishers.append((pub.name, pub.title)) |
return publishers | return publishers |
def _percent(num, total): | def _percent(num, total): |
p = 100 * float(num)/float(total) | p = 100 * float(num)/float(total) |
return "%.2f%%" % round(p, 2) | return "%.2f%%" % round(p, 2) |
import os | import os |
import logging | import logging |
import datetime | import datetime |
import httplib | |
import collections | import collections |
import requests | |
import json | |
from pylons import config | from pylons import config |
from ga_model import _normalize_url | from ga_model import _normalize_url |
import ga_model | import ga_model |
#from ga_client import GA | #from ga_client import GA |
log = logging.getLogger('ckanext.ga-report') | log = logging.getLogger('ckanext.ga-report') |
FORMAT_MONTH = '%Y-%m' | FORMAT_MONTH = '%Y-%m' |
MIN_VIEWS = 50 | MIN_VIEWS = 50 |
MIN_VISITS = 20 | MIN_VISITS = 20 |
MIN_DOWNLOADS = 10 | MIN_DOWNLOADS = 10 |
class DownloadAnalytics(object): | class DownloadAnalytics(object): |
'''Downloads and stores analytics info''' | '''Downloads and stores analytics info''' |
def __init__(self, service=None, profile_id=None, delete_first=False, | def __init__(self, service=None, token=None, profile_id=None, delete_first=False, |
skip_url_stats=False): | skip_url_stats=False): |
self.period = config['ga-report.period'] | self.period = config['ga-report.period'] |
self.service = service | self.service = service |
self.profile_id = profile_id | self.profile_id = profile_id |
self.delete_first = delete_first | self.delete_first = delete_first |
self.skip_url_stats = skip_url_stats | self.skip_url_stats = skip_url_stats |
self.token = token | |
def specific_month(self, date): | def specific_month(self, date): |
import calendar | import calendar |
first_of_this_month = datetime.datetime(date.year, date.month, 1) | first_of_this_month = datetime.datetime(date.year, date.month, 1) |
_, last_day_of_month = calendar.monthrange(int(date.year), int(date.month)) | _, last_day_of_month = calendar.monthrange(int(date.year), int(date.month)) |
last_of_this_month = datetime.datetime(date.year, date.month, last_day_of_month) | last_of_this_month = datetime.datetime(date.year, date.month, last_day_of_month) |
# if this is the latest month, note that it is only up until today | |
now = datetime.datetime.now() | |
if now.year == date.year and now.month == date.month: | |
last_day_of_month = now.day | |
last_of_this_month = now | |
periods = ((date.strftime(FORMAT_MONTH), | periods = ((date.strftime(FORMAT_MONTH), |
last_day_of_month, | last_day_of_month, |
first_of_this_month, last_of_this_month),) | first_of_this_month, last_of_this_month),) |
self.download_and_store(periods) | self.download_and_store(periods) |
def latest(self): | def latest(self): |
if self.period == 'monthly': | if self.period == 'monthly': |
# from first of this month to today | # from first of this month to today |
now = datetime.datetime.now() | now = datetime.datetime.now() |
first_of_this_month = datetime.datetime(now.year, now.month, 1) | first_of_this_month = datetime.datetime(now.year, now.month, 1) |
periods = ((now.strftime(FORMAT_MONTH), | periods = ((now.strftime(FORMAT_MONTH), |
now.day, | now.day, |
first_of_this_month, now),) | first_of_this_month, now),) |
else: | else: |
raise NotImplementedError | raise NotImplementedError |
self.download_and_store(periods) | self.download_and_store(periods) |
def for_date(self, for_date): | def for_date(self, for_date): |
assert isinstance(since_date, datetime.datetime) | assert isinstance(since_date, datetime.datetime) |
periods = [] # (period_name, period_complete_day, start_date, end_date) | periods = [] # (period_name, period_complete_day, start_date, end_date) |
if self.period == 'monthly': | if self.period == 'monthly': |
first_of_the_months_until_now = [] | first_of_the_months_until_now = [] |
year = for_date.year | year = for_date.year |
month = for_date.month | month = for_date.month |
now = datetime.datetime.now() | now = datetime.datetime.now() |
first_of_this_month = datetime.datetime(now.year, now.month, 1) | first_of_this_month = datetime.datetime(now.year, now.month, 1) |
while True: | while True: |
first_of_the_month = datetime.datetime(year, month, 1) | first_of_the_month = datetime.datetime(year, month, 1) |
if first_of_the_month == first_of_this_month: | if first_of_the_month == first_of_this_month: |
periods.append((now.strftime(FORMAT_MONTH), | periods.append((now.strftime(FORMAT_MONTH), |
now.day, | now.day, |
first_of_this_month, now)) | first_of_this_month, now)) |
break | break |
elif first_of_the_month < first_of_this_month: | elif first_of_the_month < first_of_this_month: |
in_the_next_month = first_of_the_month + datetime.timedelta(40) | in_the_next_month = first_of_the_month + datetime.timedelta(40) |
last_of_the_month = datetime.datetime(in_the_next_month.year, | last_of_the_month = datetime.datetime(in_the_next_month.year, |
in_the_next_month.month, 1)\ | in_the_next_month.month, 1)\ |
- datetime.timedelta(1) | - datetime.timedelta(1) |
periods.append((now.strftime(FORMAT_MONTH), 0, | periods.append((now.strftime(FORMAT_MONTH), 0, |
first_of_the_month, last_of_the_month)) | first_of_the_month, last_of_the_month)) |
else: | else: |
# first_of_the_month has got to the future somehow | # first_of_the_month has got to the future somehow |
break | break |
month += 1 | month += 1 |
if month > 12: | if month > 12: |
year += 1 | year += 1 |
month = 1 | month = 1 |
else: | else: |
raise NotImplementedError | raise NotImplementedError |
self.download_and_store(periods) | self.download_and_store(periods) |
@staticmethod | @staticmethod |
def get_full_period_name(period_name, period_complete_day): | def get_full_period_name(period_name, period_complete_day): |
if period_complete_day: | if period_complete_day: |
return period_name + ' (up to %ith)' % period_complete_day | return period_name + ' (up to %ith)' % period_complete_day |
else: | else: |
return period_name | return period_name |
def download_and_store(self, periods): | def download_and_store(self, periods): |
for period_name, period_complete_day, start_date, end_date in periods: | for period_name, period_complete_day, start_date, end_date in periods: |
log.info('Period "%s" (%s - %s)', | log.info('Period "%s" (%s - %s)', |
self.get_full_period_name(period_name, period_complete_day), | self.get_full_period_name(period_name, period_complete_day), |
start_date.strftime('%Y-%m-%d'), | start_date.strftime('%Y-%m-%d'), |
end_date.strftime('%Y-%m-%d')) | end_date.strftime('%Y-%m-%d')) |
if self.delete_first: | if self.delete_first: |
log.info('Deleting existing Analytics for this period "%s"', | log.info('Deleting existing Analytics for this period "%s"', |
period_name) | period_name) |
ga_model.delete(period_name) | ga_model.delete(period_name) |
if not self.skip_url_stats: | if not self.skip_url_stats: |
# Clean out old url data before storing the new | # Clean out old url data before storing the new |
ga_model.pre_update_url_stats(period_name) | ga_model.pre_update_url_stats(period_name) |
accountName = config.get('googleanalytics.account') | accountName = config.get('googleanalytics.account') |
log.info('Downloading analytics for dataset views') | log.info('Downloading analytics for dataset views') |
data = self.download(start_date, end_date, '~/%s/dataset/[a-z0-9-_]+' % accountName) | data = self.download(start_date, end_date, '~/%s/dataset/[a-z0-9-_]+' % accountName) |
log.info('Storing dataset views (%i rows)', len(data.get('url'))) | log.info('Storing dataset views (%i rows)', len(data.get('url'))) |
self.store(period_name, period_complete_day, data, ) | self.store(period_name, period_complete_day, data, ) |
log.info('Downloading analytics for publisher views') | log.info('Downloading analytics for publisher views') |
data = self.download(start_date, end_date, '~/%s/publisher/[a-z0-9-_]+' % accountName) | data = self.download(start_date, end_date, '~/%s/publisher/[a-z0-9-_]+' % accountName) |
log.info('Storing publisher views (%i rows)', len(data.get('url'))) | log.info('Storing publisher views (%i rows)', len(data.get('url'))) |
self.store(period_name, period_complete_day, data,) | self.store(period_name, period_complete_day, data,) |
log.info('Aggregating datasets by publisher') | # Make sure the All records are correct. |
ga_model.post_update_url_stats() | |
log.info('Associating datasets with their publisher') | |
ga_model.update_publisher_stats(period_name) # about 30 seconds. | ga_model.update_publisher_stats(period_name) # about 30 seconds. |
log.info('Downloading and storing analytics for site-wide stats') | log.info('Downloading and storing analytics for site-wide stats') |
self.sitewide_stats( period_name, period_complete_day ) | self.sitewide_stats( period_name, period_complete_day ) |
log.info('Downloading and storing analytics for social networks') | log.info('Downloading and storing analytics for social networks') |
self.update_social_info(period_name, start_date, end_date) | self.update_social_info(period_name, start_date, end_date) |
def update_social_info(self, period_name, start_date, end_date): | def update_social_info(self, period_name, start_date, end_date): |
start_date = start_date.strftime('%Y-%m-%d') | start_date = start_date.strftime('%Y-%m-%d') |
end_date = end_date.strftime('%Y-%m-%d') | end_date = end_date.strftime('%Y-%m-%d') |
query = 'ga:hasSocialSourceReferral=~Yes$' | query = 'ga:hasSocialSourceReferral=~Yes$' |
metrics = 'ga:entrances' | metrics = 'ga:entrances' |
sort = '-ga:entrances' | sort = '-ga:entrances' |
# Supported query params at | try: |
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference | # Because of issues of invalid responses, we are going to make these requests |
results = self.service.data().ga().get( | # ourselves. |
ids='ga:' + self.profile_id, | headers = {'authorization': 'Bearer ' + self.token} |
filters=query, | |
start_date=start_date, | args = dict(ids='ga:' + self.profile_id, |
metrics=metrics, | filters=query, |
sort=sort, | metrics=metrics, |
dimensions="ga:landingPagePath,ga:socialNetwork", | sort=sort, |
max_results=10000, | dimensions="ga:landingPagePath,ga:socialNetwork", |
end_date=end_date).execute() | max_results=10000) |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
data = collections.defaultdict(list) | data = collections.defaultdict(list) |
rows = results.get('rows',[]) | rows = results.get('rows',[]) |
for row in rows: | for row in rows: |
url = _normalize_url('http:/' + row[0]) | url = _normalize_url('http:/' + row[0]) |
data[url].append( (row[1], int(row[2]),) ) | data[url].append( (row[1], int(row[2]),) ) |
ga_model.update_social(period_name, data) | ga_model.update_social(period_name, data) |
def download(self, start_date, end_date, path=None): | def download(self, start_date, end_date, path=None): |
'''Get data from GA for a given time period''' | '''Get data from GA for a given time period''' |
start_date = start_date.strftime('%Y-%m-%d') | start_date = start_date.strftime('%Y-%m-%d') |
end_date = end_date.strftime('%Y-%m-%d') | end_date = end_date.strftime('%Y-%m-%d') |
query = 'ga:pagePath=%s$' % path | query = 'ga:pagePath=%s$' % path |
metrics = 'ga:pageviews, ga:visits' | metrics = 'ga:pageviews, ga:visits' |
sort = '-ga:pageviews' | sort = '-ga:pageviews' |
# Supported query params at | # Supported query params at |
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference | # https://developers.google.com/analytics/devguides/reporting/core/v3/reference |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | # Because of issues of invalid responses, we are going to make these requests |
filters=query, | # ourselves. |
start_date=start_date, | headers = {'authorization': 'Bearer ' + self.token} |
metrics=metrics, | |
sort=sort, | args = {} |
dimensions="ga:pagePath", | args["sort"] = "-ga:pageviews" |
max_results=10000, | args["max-results"] = 100000 |
end_date=end_date).execute() | args["dimensions"] = "ga:pagePath" |
args["start-date"] = start_date | |
args["end-date"] = end_date | |
args["metrics"] = metrics | |
args["ids"] = "ga:" + self.profile_id | |
args["filters"] = query | |
args["alt"] = "json" | |
r = requests.get("https://www.googleapis.com/analytics/v3/data/ga", params=args, headers=headers) | |
if r.status_code != 200: | |
raise Exception("Request with params: %s failed" % args) | |
results = json.loads(r.content) | |
print len(results.keys()) | |
except Exception, e: | |
log.exception(e) | |
#return dict(url=[]) | |
raise e | |
packages = [] | packages = [] |
log.info("There are %d results" % results['totalResults']) | |
for entry in results.get('rows'): | for entry in results.get('rows'): |
(loc,pageviews,visits) = entry | (loc,pageviews,visits) = entry |
url = _normalize_url('http:/' + loc) # strips off domain e.g. www.data.gov.uk or data.gov.uk | url = _normalize_url('http:/' + loc) # strips off domain e.g. www.data.gov.uk or data.gov.uk |
if not url.startswith('/dataset/') and not url.startswith('/publisher/'): | if not url.startswith('/dataset/') and not url.startswith('/publisher/'): |
# filter out strays like: | # filter out strays like: |
# /data/user/login?came_from=http://data.gov.uk/dataset/os-code-point-open | # /data/user/login?came_from=http://data.gov.uk/dataset/os-code-point-open |
# /403.html?page=/about&from=http://data.gov.uk/publisher/planning-inspectorate | # /403.html?page=/about&from=http://data.gov.uk/publisher/planning-inspectorate |
continue | continue |
packages.append( (url, pageviews, visits,) ) # Temporary hack | packages.append( (url, pageviews, visits,) ) # Temporary hack |
return dict(url=packages) | return dict(url=packages) |
def store(self, period_name, period_complete_day, data): | def store(self, period_name, period_complete_day, data): |
if 'url' in data: | if 'url' in data: |
ga_model.update_url_stats(period_name, period_complete_day, data['url']) | ga_model.update_url_stats(period_name, period_complete_day, data['url']) |
def sitewide_stats(self, period_name, period_complete_day): | def sitewide_stats(self, period_name, period_complete_day): |
import calendar | import calendar |
year, month = period_name.split('-') | year, month = period_name.split('-') |
_, last_day_of_month = calendar.monthrange(int(year), int(month)) | _, last_day_of_month = calendar.monthrange(int(year), int(month)) |
start_date = '%s-01' % period_name | start_date = '%s-01' % period_name |
end_date = '%s-%s' % (period_name, last_day_of_month) | end_date = '%s-%s' % (period_name, last_day_of_month) |
funcs = ['_totals_stats', '_social_stats', '_os_stats', | funcs = ['_totals_stats', '_social_stats', '_os_stats', |
'_locale_stats', '_browser_stats', '_mobile_stats', '_download_stats'] | '_locale_stats', '_browser_stats', '_mobile_stats', '_download_stats'] |
for f in funcs: | for f in funcs: |
log.info('Downloading analytics for %s' % f.split('_')[1]) | log.info('Downloading analytics for %s' % f.split('_')[1]) |
getattr(self, f)(start_date, end_date, period_name, period_complete_day) | getattr(self, f)(start_date, end_date, period_name, period_complete_day) |
def _get_results(result_data, f): | def _get_results(result_data, f): |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
key = f(result) | key = f(result) |
data[key] = data.get(key,0) + result[1] | data[key] = data.get(key,0) + result[1] |
return data | return data |
def _get_json(self, params, prev_fail=False): | |
if prev_fail: | |
import os | |
ga_token_filepath = os.path.expanduser(config.get('googleanalytics.token.filepath', '')) | |
if not ga_token_filepath: | |
print 'ERROR: In the CKAN config you need to specify the filepath of the ' \ | |
'Google Analytics token file under key: googleanalytics.token.filepath' | |
return | |
try: | |
self.token, svc = init_service(ga_token_filepath, None) | |
except TypeError: | |
print ('Have you correctly run the getauthtoken task and ' | |
'specified the correct token file in the CKAN config under ' | |
'"googleanalytics.token.filepath"?') | |
try: | |
# Because of issues of invalid responses, we are going to make these requests | |
# ourselves. | |
headers = {'authorization': 'Bearer ' + self.token} | |
r = requests.get("https://www.googleapis.com/analytics/v3/data/ga", params=params, headers=headers) | |
if r.status_code != 200: | |
log.info("STATUS: %s" % (r.status_code,)) | |
log.info("CONTENT: %s" % (r.content,)) | |
raise Exception("Request with params: %s failed" % params) | |
return json.loads(r.content) | |
except Exception, e: | |
if not prev_fail: | |
print e | |
results = self._get_json(self, params, prev_fail=True) | |
else: | |
log.exception(e) | |
return dict(url=[]) | |
def _totals_stats(self, start_date, end_date, period_name, period_complete_day): | def _totals_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Fetches distinct totals, total pageviews etc """ | """ Fetches distinct totals, total pageviews etc """ |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | args = {} |
start_date=start_date, | args["max-results"] = 100000 |
metrics='ga:pageviews', | args["start-date"] = start_date |
sort='-ga:pageviews', | args["end-date"] = end_date |
max_results=10000, | args["ids"] = "ga:" + self.profile_id |
end_date=end_date).execute() | |
args["metrics"] = "ga:pageviews" | |
args["sort"] = "-ga:pageviews" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
ga_model.update_sitewide_stats(period_name, "Totals", {'Total page views': result_data[0][0]}, | ga_model.update_sitewide_stats(period_name, "Totals", {'Total page views': result_data[0][0]}, |
period_complete_day) | period_complete_day) |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
metrics='ga:pageviewsPerVisit,ga:avgTimeOnSite,ga:percentNewVisits,ga:visits', | headers = {'authorization': 'Bearer ' + self.token} |
max_results=10000, | |
end_date=end_date).execute() | args = {} |
args["max-results"] = 100000 | |
args["start-date"] = start_date | |
args["end-date"] = end_date | |
args["ids"] = "ga:" + self.profile_id | |
args["metrics"] = "ga:pageviewsPerVisit,ga:avgTimeOnSite,ga:percentNewVisits,ga:visits" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = { | data = { |
'Pages per visit': result_data[0][0], | 'Pages per visit': result_data[0][0], |
'Average time on site': result_data[0][1], | 'Average time on site': result_data[0][1], |
'New visits': result_data[0][2], | 'New visits': result_data[0][2], |
'Total visits': result_data[0][3], | 'Total visits': result_data[0][3], |
} | } |
ga_model.update_sitewide_stats(period_name, "Totals", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Totals", data, period_complete_day) |
# Bounces from / or another configurable page. | # Bounces from / or another configurable page. |
path = '/%s%s' % (config.get('googleanalytics.account'), | path = '/%s%s' % (config.get('googleanalytics.account'), |
config.get('ga-report.bounce_url', '/')) | config.get('ga-report.bounce_url', '/')) |
results = self.service.data().ga().get( | |
ids='ga:' + self.profile_id, | try: |
filters='ga:pagePath==%s' % (path,), | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
metrics='ga:visitBounceRate', | headers = {'authorization': 'Bearer ' + self.token} |
dimensions='ga:pagePath', | |
max_results=10000, | args = {} |
end_date=end_date).execute() | args["max-results"] = 100000 |
args["start-date"] = start_date | |
args["end-date"] = end_date | |
args["ids"] = "ga:" + self.profile_id | |
args["filters"] = 'ga:pagePath==%s' % (path,) | |
args["dimensions"] = 'ga:pagePath' | |
args["metrics"] = "ga:visitBounceRate" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
if not result_data or len(result_data) != 1: | if not result_data or len(result_data) != 1: |
log.error('Could not pinpoint the bounces for path: %s. Got results: %r', | log.error('Could not pinpoint the bounces for path: %s. Got results: %r', |
path, result_data) | path, result_data) |
return | return |
results = result_data[0] | results = result_data[0] |
bounces = float(results[1]) | bounces = float(results[1]) |
# visitBounceRate is already a % | # visitBounceRate is already a % |
log.info('Google reports visitBounceRate as %s', bounces) | log.info('Google reports visitBounceRate as %s', bounces) |
ga_model.update_sitewide_stats(period_name, "Totals", {'Bounce rate (home page)': float(bounces)}, | ga_model.update_sitewide_stats(period_name, "Totals", {'Bounce rate (home page)': float(bounces)}, |
period_complete_day) | period_complete_day) |
def _locale_stats(self, start_date, end_date, period_name, period_complete_day): | def _locale_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Fetches stats about language and country """ | """ Fetches stats about language and country """ |
results = self.service.data().ga().get( | |
ids='ga:' + self.profile_id, | try: |
start_date=start_date, | # Because of issues of invalid responses, we are going to make these requests |
metrics='ga:pageviews', | # ourselves. |
sort='-ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
dimensions="ga:language,ga:country", | |
max_results=10000, | args = {} |
end_date=end_date).execute() | args["max-results"] = 100000 |
args["start-date"] = start_date | |
args["end-date"] = end_date | |
args["ids"] = "ga:" + self.profile_id | |
args["dimensions"] = "ga:language,ga:country" | |
args["metrics"] = "ga:pageviews" | |
args["sort"] = "-ga:pageviews" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Languages", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Languages", data, period_complete_day) |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[1]] = data.get(result[1], 0) + int(result[2]) | data[result[1]] = data.get(result[1], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Country", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Country", data, period_complete_day) |
def _download_stats(self, start_date, end_date, period_name, period_complete_day): | def _download_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Fetches stats about language and country """ | """ Fetches stats about data downloads """ |
results = self.service.data().ga().get( | import ckan.model as model |
ids='ga:' + self.profile_id, | |
start_date=start_date, | data = {} |
filters='ga:eventAction==download', | |
metrics='ga:totalEvents', | try: |
sort='-ga:totalEvents', | # Because of issues of invalid responses, we are going to make these requests |
dimensions="ga:eventLabel", | # ourselves. |
max_results=10000, | headers = {'authorization': 'Bearer ' + self.token} |
end_date=end_date).execute() | |
args = {} | |
args["max-results"] = 100000 | |
args["start-date"] = start_date | |
args["end-date"] = end_date | |
args["ids"] = "ga:" + self.profile_id | |
args["filters"] = 'ga:eventAction==download' | |
args["dimensions"] = "ga:eventLabel" | |
args["metrics"] = "ga:totalEvents" | |
args["alt"] = "json" | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
if not result_data: | if not result_data: |
# We may not have data for this time period, so we need to bail | # We may not have data for this time period, so we need to bail |
# early. | # early. |
log.info("There is no download data for this time period") | log.info("There is no download data for this time period") |
return | return |
# [[url, count], [url],count] | def process_result_data(result_data, cached=False): |
data = {} | progress_total = len(result_data) |
for result in result_data: | progress_count = 0 |
data[result[0]] = data.get(result[0], 0) + int(result[1]) | resources_not_matched = [] |
for result in result_data: | |
progress_count += 1 | |
if progress_count % 100 == 0: | |
log.debug('.. %d/%d done so far', progress_count, progress_total) | |
url = result[0].strip() | |
# Get package id associated with the resource that has this URL. | |
q = model.Session.query(model.Resource) | |
if cached: | |
r = q.filter(model.Resource.cache_url.like("%s%%" % url)).first() | |
else: | |
r = q.filter(model.Resource.url.like("%s%%" % url)).first() | |
package_name = r.resource_group.package.name if r else "" | |
if package_name: | |
data[package_name] = data.get(package_name, 0) + int(result[1]) | |
else: | |
resources_not_matched.append(url) | |
continue | |
if resources_not_matched: | |
log.debug('Could not match %i or %i resource URLs to datasets. e.g. %r', | |
len(resources_not_matched), progress_total, resources_not_matched[:3]) | |
log.info('Associating downloads of resource URLs with their respective datasets') | |
process_result_data(results.get('rows')) | |
try: | |
# Because of issues of invalid responses, we are going to make these requests | |
# ourselves. | |
headers = {'authorization': 'Bearer ' + self.token} | |
args = dict( ids='ga:' + self.profile_id, | |
filters='ga:eventAction==download-cache', | |
metrics='ga:totalEvents', | |
sort='-ga:totalEvents', | |
dimensions="ga:eventLabel", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
log.info('Associating downloads of cache resource URLs with their respective datasets') | |
process_result_data(results.get('rows'), cached=False) | |
self._filter_out_long_tail(data, MIN_DOWNLOADS) | self._filter_out_long_tail(data, MIN_DOWNLOADS) |
ga_model.update_sitewide_stats(period_name, "Downloads", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Downloads", data, period_complete_day) |
def _social_stats(self, start_date, end_date, period_name, period_complete_day): | def _social_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Finds out which social sites people are referred from """ | """ Finds out which social sites people are referred from """ |
results = self.service.data().ga().get( | |
ids='ga:' + self.profile_id, | try: |
start_date=start_date, | # Because of issues of invalid responses, we are going to make these requests |
metrics='ga:pageviews', | # ourselves. |
sort='-ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
dimensions="ga:socialNetwork,ga:referralPath", | |
max_results=10000, | args = dict( ids='ga:' + self.profile_id, |
end_date=end_date).execute() | metrics='ga:pageviews', |
sort='-ga:pageviews', | |
dimensions="ga:socialNetwork,ga:referralPath", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
if not result[0] == '(not set)': | if not result[0] == '(not set)': |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, 3) | self._filter_out_long_tail(data, 3) |
ga_model.update_sitewide_stats(period_name, "Social sources", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Social sources", data, period_complete_day) |
def _os_stats(self, start_date, end_date, period_name, period_complete_day): | def _os_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Operating system stats """ | """ Operating system stats """ |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
metrics='ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
sort='-ga:pageviews', | |
dimensions="ga:operatingSystem,ga:operatingSystemVersion", | args = dict( ids='ga:' + self.profile_id, |
max_results=10000, | metrics='ga:pageviews', |
end_date=end_date).execute() | sort='-ga:pageviews', |
dimensions="ga:operatingSystem,ga:operatingSystemVersion", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Operating Systems", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Operating Systems", data, period_complete_day) |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
if int(result[2]) >= MIN_VIEWS: | if int(result[2]) >= MIN_VIEWS: |
key = "%s %s" % (result[0],result[1]) | key = "%s %s" % (result[0],result[1]) |
data[key] = result[2] | data[key] = result[2] |
ga_model.update_sitewide_stats(period_name, "Operating Systems versions", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Operating Systems versions", data, period_complete_day) |
def _browser_stats(self, start_date, end_date, period_name, period_complete_day): | def _browser_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Information about browsers and browser versions """ | """ Information about browsers and browser versions """ |
results = self.service.data().ga().get( | |
ids='ga:' + self.profile_id, | try: |
start_date=start_date, | # Because of issues of invalid responses, we are going to make these requests |
metrics='ga:pageviews', | # ourselves. |
sort='-ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
dimensions="ga:browser,ga:browserVersion", | |
max_results=10000, | args = dict( ids='ga:' + self.profile_id, |
end_date=end_date).execute() | metrics='ga:pageviews', |
sort='-ga:pageviews', | |
dimensions="ga:browser,ga:browserVersion", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
# e.g. [u'Firefox', u'19.0', u'20'] | # e.g. [u'Firefox', u'19.0', u'20'] |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Browsers", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Browsers", data, period_complete_day) |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
key = "%s %s" % (result[0], self._filter_browser_version(result[0], result[1])) | key = "%s %s" % (result[0], self._filter_browser_version(result[0], result[1])) |
data[key] = data.get(key, 0) + int(result[2]) | data[key] = data.get(key, 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Browser versions", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Browser versions", data, period_complete_day) |
@classmethod | @classmethod |
def _filter_browser_version(cls, browser, version_str): | def _filter_browser_version(cls, browser, version_str): |
''' | ''' |
Simplifies a browser version string if it is detailed. | Simplifies a browser version string if it is detailed. |
i.e. groups together Firefox 3.5.1 and 3.5.2 to be just 3. | i.e. groups together Firefox 3.5.1 and 3.5.2 to be just 3. |
This is helpful when viewing stats and good to protect privacy. | This is helpful when viewing stats and good to protect privacy. |
''' | ''' |
ver = version_str | ver = version_str |
parts = ver.split('.') | parts = ver.split('.') |
if len(parts) > 1: | if len(parts) > 1: |
if parts[1][0] == '0': | if parts[1][0] == '0': |
ver = parts[0] | ver = parts[0] |
else: | else: |
ver = "%s" % (parts[0]) | ver = "%s" % (parts[0]) |
# Special case complex version nums | # Special case complex version nums |
if browser in ['Safari', 'Android Browser']: | if browser in ['Safari', 'Android Browser']: |
ver = parts[0] | ver = parts[0] |
if len(ver) > 2: | if len(ver) > 2: |
num_hidden_digits = len(ver) - 2 | num_hidden_digits = len(ver) - 2 |
ver = ver[0] + ver[1] + 'X' * num_hidden_digits | ver = ver[0] + ver[1] + 'X' * num_hidden_digits |
return ver | return ver |
def _mobile_stats(self, start_date, end_date, period_name, period_complete_day): | def _mobile_stats(self, start_date, end_date, period_name, period_complete_day): |
""" Info about mobile devices """ | """ Info about mobile devices """ |
results = self.service.data().ga().get( | try: |
ids='ga:' + self.profile_id, | # Because of issues of invalid responses, we are going to make these requests |
start_date=start_date, | # ourselves. |
metrics='ga:pageviews', | headers = {'authorization': 'Bearer ' + self.token} |
sort='-ga:pageviews', | |
dimensions="ga:mobileDeviceBranding, ga:mobileDeviceInfo", | args = dict( ids='ga:' + self.profile_id, |
max_results=10000, | metrics='ga:pageviews', |
end_date=end_date).execute() | sort='-ga:pageviews', |
dimensions="ga:mobileDeviceBranding, ga:mobileDeviceInfo", | |
max_results=10000) | |
args['start-date'] = start_date | |
args['end-date'] = end_date | |
results = self._get_json(args) | |
except Exception, e: | |
log.exception(e) | |
results = dict(url=[]) | |
result_data = results.get('rows') | result_data = results.get('rows') |
data = {} | data = {} |
for result in result_data: | for result in result_data: |
data[result[0]] = data.get(result[0], 0) + int(result[2]) | data[result[0]] = data.get(result[0], 0) + int(result[2]) |
self._filter_out_long_tail(data, MIN_VIEWS) | self._filter_out_long_tail(data, MIN_VIEWS) |
ga_model.update_sitewide_stats(period_name, "Mobile brands", data, period_complete_day) | ga_model.update_sitewide_stats(period_name, "Mobile brands", data, period_complete_day) |
data = {} | data = {} |
for result in result_data: | for result in result_data: |