[854] Fixes unexplained failure with fetching publisher information.
Whilst we are still using the google lib we only let it do the oauth dance
before we take over making the requests using the python-requests library.
In cases where it fails with a 401 we will retry the request ONCE after
Re-enabled all of the relevant links and html blocks that were commented out.
--- a/ckanext/ga_report/controller.py
+++ b/ckanext/ga_report/controller.py
@@ -191,32 +191,11 @@
q = model.Session.query(GA_Stat).\
filter(GA_Stat.stat_name==k).\
order_by(GA_Stat.period_name)
- # Run the query on all months to gather graph data
- series = {}
- x_axis = set()
- for stat in q:
- x_val = _get_unix_epoch(stat.period_name)
- series[ stat.key ] = series.get(stat.key,{})
- series[ stat.key ][x_val] = float(stat.value)
- x_axis.add(x_val)
- # Common x-axis for all series. Exclude this month (incomplete data)
- x_axis = sorted(list(x_axis))[:-1]
- # Buffer a rickshaw dataset from the series
- def create_graph(series_name, series_data):
- return {
- 'name':series_name,
- 'data':[ {'x':x,'y':series_data.get(x,0)} for x in x_axis ]
- }
- rickshaw = [ create_graph(name,data) for name, data in series.items() ]
- rickshaw = sorted(rickshaw,key=lambda x:x['data'][-1]['y'])
- setattr(c, v+'_graph', json.dumps(rickshaw))
-
# Buffer the tabular data
if c.month:
entries = []
q = q.filter(GA_Stat.period_name==c.month).\
order_by('ga_stat.value::int desc')
-
d = collections.defaultdict(int)
for e in q.all():
d[e.key] += int(e.value)
@@ -225,6 +204,23 @@
entries.append((key,val,))
entries = sorted(entries, key=operator.itemgetter(1), reverse=True)
+ # Run a query on all months to gather graph data
+ graph_query = model.Session.query(GA_Stat).\
+ filter(GA_Stat.stat_name==k).\
+ order_by(GA_Stat.period_name)
+ graph_dict = {}
+ for stat in graph_query:
+ graph_dict[ stat.key ] = graph_dict.get(stat.key,{
+ 'name':stat.key,
+ 'raw': {}
+ })
+ graph_dict[ stat.key ]['raw'][stat.period_name] = float(stat.value)
+ stats_in_table = [x[0] for x in entries]
+ stats_not_in_table = set(graph_dict.keys()) - set(stats_in_table)
+ stats = stats_in_table + sorted(list(stats_not_in_table))
+ graph = [graph_dict[x] for x in stats]
+ setattr(c, v+'_graph', json.dumps( _to_rickshaw(graph,percentageMode=True) ))
+
# Get the total for each set of values and then set the value as
# a percentage of the total
if k == 'Social sources':
@@ -253,7 +249,7 @@
writer = csv.writer(response)
writer.writerow(["Publisher Title", "Publisher Name", "Views", "Visits", "Period Name"])
- top_publishers, top_publishers_graph = _get_top_publishers(None)
+ top_publishers = _get_top_publishers(limit=None)
for publisher,view,visit in top_publishers:
writer.writerow([publisher.title.encode('utf-8'),
@@ -275,7 +271,7 @@
if not c.publisher:
abort(404, 'A publisher with that name could not be found')
- packages = self._get_packages(c.publisher)
+ packages = self._get_packages(publisher=c.publisher, month=c.month)
response.headers['Content-Type'] = "text/csv; charset=utf-8"
response.headers['Content-Disposition'] = \
str('attachment; filename=datasets_%s_%s.csv' % (c.publisher_name, month,))
@@ -304,15 +300,18 @@
if c.month:
c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month])
- c.top_publishers, graph_data = _get_top_publishers()
- c.top_publishers_graph = json.dumps( _to_rickshaw(graph_data.values()) )
-
- return render('ga_report/publisher/index.html')
-
- def _get_packages(self, publisher=None, count=-1):
+ c.top_publishers = _get_top_publishers()
+ graph_data = _get_top_publishers_graph()
+ c.top_publishers_graph = json.dumps( _to_rickshaw(graph_data) )
+
+ x = render('ga_report/publisher/index.html')
+
+ return x
+
+ def _get_packages(self, publisher=None, month='', count=-1):
'''Returns the datasets in order of views'''
have_download_data = True
- month = c.month or 'All'
+ month = month or 'All'
if month != 'All':
have_download_data = month >= DOWNLOADS_AVAILABLE_FROM
@@ -389,42 +388,73 @@
entry = q.filter(GA_Url.period_name==c.month).first()
c.publisher_page_views = entry.pageviews if entry else 0
- c.top_packages = self._get_packages(c.publisher, 20)
+ c.top_packages = self._get_packages(publisher=c.publisher, count=20, month=c.month)
# Graph query
- top_package_names = [ x[0].name for x in c.top_packages ]
+ top_packages_all_time = self._get_packages(publisher=c.publisher, count=20, month='All')
+ top_package_names = [ x[0].name for x in top_packages_all_time ]
graph_query = model.Session.query(GA_Url,model.Package)\
.filter(model.Package.name==GA_Url.package_id)\
.filter(GA_Url.url.like('/dataset/%'))\
.filter(GA_Url.package_id.in_(top_package_names))
- graph_data = {}
+ all_series = {}
for entry,package in graph_query:
if not package: continue
if entry.period_name=='All': continue
- graph_data[package.id] = graph_data.get(package.id,{
+ all_series[package.name] = all_series.get(package.name,{
'name':package.title,
- 'data':[]
+ 'raw': {}
})
- graph_data[package.id]['data'].append({
- 'x':_get_unix_epoch(entry.period_name),
- 'y':int(entry.pageviews),
- })
-
- c.graph_data = json.dumps( _to_rickshaw(graph_data.values()) )
+ all_series[package.name]['raw'][entry.period_name] = int(entry.pageviews)
+ graph = [ all_series[series_name] for series_name in top_package_names ]
+ c.graph_data = json.dumps( _to_rickshaw(graph) )
return render('ga_report/publisher/read.html')
-def _to_rickshaw(data):
- num_points = []
- for package in data:
- package['data'] = sorted( package['data'], key=lambda x:x['x'] )
- num_points.append( len(package['data']) )
- if len(set(num_points))>1:
- example = num_points[ num_points.index(max(num_points)) ]
- for package in data:
- while len(package['data'])<example:
- package['data'].insert(0, package['data'][0])
+def _to_rickshaw(data, percentageMode=False):
+ if data==[]:
+ return data
+ # x-axis is every month in c.months. Note that data might not exist
+ # for entire history, eg. for recently-added datasets
+ x_axis = [x[0] for x in c.months]
+ x_axis.reverse() # Ascending order
+ x_axis = x_axis[:-1] # Remove latest month
+ totals = {}
+ for series in data:
+ series['data'] = []
+ for x_string in x_axis:
+ x = _get_unix_epoch( x_string )
+ y = series['raw'].get(x_string,0)
+ series['data'].append({'x':x,'y':y})
+ totals[x] = totals.get(x,0)+y
+ if not percentageMode:
+ return data
+ # Turn all data into percentages
+ # Roll insignificant series into a catch-all
+ THRESHOLD = 1
+ raw_data = data
+ data = []
+ for series in raw_data:
+ for point in series['data']:
+ percentage = (100*float(point['y'])) / totals[point['x']]
+ if not (series in data) and percentage>THRESHOLD:
+ data.append(series)
+ point['y'] = percentage
+ others = [ x for x in raw_data if not (x in data) ]
+ if len(others):
+ data_other = []
+ for i in range(len(x_axis)):
+ x = _get_unix_epoch(x_axis[i])
+ y = 0
+ for series in others:
+ y += series['data'][i]['y']
+ data_other.append({'x':x,'y':y})
+ data.append({
+ 'name':'Other',
+ 'data': data_other
+ })
return data
+
def _get_top_publishers(limit=20):
'''
@@ -447,35 +477,51 @@
top_publishers = []
res = connection.execute(q, month)
- department_ids = []
for row in res:
g = model.Group.get(row[0])
if g:
- department_ids.append(row[0])
top_publishers.append((g, row[1], row[2]))
-
- graph = {}
- if limit is not None:
- # Query for a history graph of these publishers
- q = model.Session.query(
- GA_Url.department_id,
- GA_Url.period_name,
- func.sum(cast(GA_Url.pageviews,sqlalchemy.types.INT)))\
- .filter( GA_Url.department_id.in_(department_ids) )\
- .filter( GA_Url.period_name!='All' )\
- .filter( GA_Url.url.like('/dataset/%') )\
- .filter( GA_Url.package_id!='' )\
- .group_by( GA_Url.department_id, GA_Url.period_name )
- for dept_id,period_name,views in q:
- graph[dept_id] = graph.get( dept_id, {
- 'name' : model.Group.get(dept_id).title,
- 'data' : []
- })
- graph[dept_id]['data'].append({
- 'x': _get_unix_epoch(period_name),
- 'y': views
- })
- return top_publishers, graph
+ return top_publishers
+
+
+def _get_top_publishers_graph(limit=20):
+ '''
+ Returns a list of the top 20 publishers by dataset visits.
+ (The number to show can be varied with 'limit')
+ '''
+ connection = model.Session.connection()
+ q = """
+ select department_id, sum(pageviews::int) views
+ from ga_url
+ where department_id <> ''
+ and package_id <> ''
+ and url like '/dataset/%%'
+ and period_name='All'
+ group by department_id order by views desc
+ """
+ if limit:
+ q = q + " limit %s;" % (limit)
+
+ res = connection.execute(q)
+ department_ids = [ row[0] for row in res ]
+
+ # Query for a history graph of these department ids
+ q = model.Session.query(
+ GA_Url.department_id,
+ GA_Url.period_name,
+ func.sum(cast(GA_Url.pageviews,sqlalchemy.types.INT)))\
+ .filter( GA_Url.department_id.in_(department_ids) )\
+ .filter( GA_Url.url.like('/dataset/%') )\
+ .filter( GA_Url.package_id!='' )\
+ .group_by( GA_Url.department_id, GA_Url.period_name )
+ graph_dict = {}
+ for dept_id,period_name,views in q:
+ graph_dict[dept_id] = graph_dict.get( dept_id, {
+ 'name' : model.Group.get(dept_id).title,
+ 'raw' : {}
+ })
+ graph_dict[dept_id]['raw'][period_name] = views
+ return [ graph_dict[id] for id in department_ids ]
def _get_publishers():
--- a/ckanext/ga_report/download_analytics.py
+++ b/ckanext/ga_report/download_analytics.py
@@ -1,7 +1,10 @@
import os
import logging
import datetime
+import httplib
import collections
+import requests
+import json
from pylons import config
from ga_model import _normalize_url
import ga_model
@@ -18,13 +21,14 @@
class DownloadAnalytics(object):
'''Downloads and stores analytics info'''
- def __init__(self, service=None, profile_id=None, delete_first=False,
+ def __init__(self, service=None, token=None, profile_id=None, delete_first=False,
skip_url_stats=False):
self.period = config['ga-report.period']
self.service = service
self.profile_id = profile_id
self.delete_first = delete_first
self.skip_url_stats = skip_url_stats
+ self.token = token
def specific_month(self, date):
import calendar
@@ -32,6 +36,11 @@
first_of_this_month = datetime.datetime(date.year, date.month, 1)
_, last_day_of_month = calendar.monthrange(int(date.year), int(date.month))
last_of_this_month = datetime.datetime(date.year, date.month, last_day_of_month)
+ # if this is the latest month, note that it is only up until today
+ now = datetime.datetime.now()
+ if now.year == date.year and now.month == date.month:
+ last_day_of_month = now.day
+ last_of_this_month = now
periods = ((date.strftime(FORMAT_MONTH),
last_day_of_month,
first_of_this_month, last_of_this_month),)
@@ -126,7 +135,7 @@
# Make sure the All records are correct.
ga_model.post_update_url_stats()
- log.info('Aggregating datasets by publisher')
+ log.info('Associating datasets with their publisher')
ga_model.update_publisher_stats(period_name) # about 30 seconds.
@@ -144,17 +153,27 @@
metrics = 'ga:entrances'
sort = '-ga:entrances'
- # Supported query params at
- # https://developers.google.com/analytics/devguides/reporting/core/v3/reference
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- filters=query,
- start_date=start_date,
- metrics=metrics,
- sort=sort,
- dimensions="ga:landingPagePath,ga:socialNetwork",
- max_results=10000,
- end_date=end_date).execute()
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+
+ args = dict(ids='ga:' + self.profile_id,
+ filters=query,
+ metrics=metrics,
+ sort=sort,
+ dimensions="ga:landingPagePath,ga:socialNetwork",
+ max_results=10000)
+
+ args['start-date'] = start_date
+ args['end-date'] = end_date
+
+ results = self._get_json(args)
+ except Exception, e:
+ log.exception(e)
+ results = dict(url=[])
+
+
data = collections.defaultdict(list)
rows = results.get('rows',[])
for row in rows:
@@ -173,15 +192,32 @@
# Supported query params at
# https://developers.google.com/analytics/devguides/reporting/core/v3/reference
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- filters=query,
- start_date=start_date,
- metrics=metrics,
- sort=sort,
- dimensions="ga:pagePath",
- max_results=10000,
- end_date=end_date).execute()
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+
+ args = {}
+ args["sort"] = "-ga:pageviews"
+ args["max-results"] = 100000
+ args["dimensions"] = "ga:pagePath"
+ args["start-date"] = start_date
+ args["end-date"] = end_date
+ args["metrics"] = metrics
+ args["ids"] = "ga:" + self.profile_id
+ args["filters"] = query
+ args["alt"] = "json"
+
+ r = requests.get("https://www.googleapis.com/analytics/v3/data/ga", params=args, headers=headers)
+ if r.status_code != 200:
+ raise Exception("Request with params: %s failed" % args)
+
+ results = json.loads(r.content)
+ print len(results.keys())
+ except Exception, e:
+ log.exception(e)
+ #return dict(url=[])
+ raise e
packages = []
log.info("There are %d results" % results['totalResults'])
@@ -221,25 +257,83 @@
data[key] = data.get(key,0) + result[1]
return data
+ def _get_json(self, params, prev_fail=False):
+ if prev_fail:
+ import os
+ ga_token_filepath = os.path.expanduser(config.get('googleanalytics.token.filepath', ''))
+ if not ga_token_filepath:
+ print 'ERROR: In the CKAN config you need to specify the filepath of the ' \
+ 'Google Analytics token file under key: googleanalytics.token.filepath'
+ return
+
+ try:
+ self.token, svc = init_service(ga_token_filepath, None)
+ except TypeError:
+ print ('Have you correctly run the getauthtoken task and '
+ 'specified the correct token file in the CKAN config under '
+ '"googleanalytics.token.filepath"?')
+
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+ r = requests.get("https://www.googleapis.com/analytics/v3/data/ga", params=params, headers=headers)
+ if r.status_code != 200:
+ log.info("STATUS: %s" % (r.status_code,))
+ log.info("CONTENT: %s" % (r.content,))
+ raise Exception("Request with params: %s failed" % params)
+
+ return json.loads(r.content)
+ except Exception, e:
+ if not prev_fail:
+ print e
+ results = self._get_json(self, params, prev_fail=True)
+ else:
+ log.exception(e)
+
+ return dict(url=[])
+
def _totals_stats(self, start_date, end_date, period_name, period_complete_day):
""" Fetches distinct totals, total pageviews etc """
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- start_date=start_date,
- metrics='ga:pageviews',
- sort='-ga:pageviews',
- max_results=10000,
- end_date=end_date).execute()
+ try:
+ args = {}
+ args["max-results"] = 100000
+ args["start-date"] = start_date
+ args["end-date"] = end_date
+ args["ids"] = "ga:" + self.profile_id
+
+ args["metrics"] = "ga:pageviews"
+ args["sort"] = "-ga:pageviews"
+ args["alt"] = "json"
+
+ results = self._get_json(args)
+ except Exception, e:
+ log.exception(e)
+ results = dict(url=[])
+
result_data = results.get('rows')
ga_model.update_sitewide_stats(period_name, "Totals", {'Total page views': result_data[0][0]},
period_complete_day)
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- start_date=start_date,
- metrics='ga:pageviewsPerVisit,ga:avgTimeOnSite,ga:percentNewVisits,ga:visits',
- max_results=10000,
- end_date=end_date).execute()
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+
+ args = {}
+ args["max-results"] = 100000
+ args["start-date"] = start_date
+ args["end-date"] = end_date
+ args["ids"] = "ga:" + self.profile_id
+
+ args["metrics"] = "ga:pageviewsPerVisit,ga:avgTimeOnSite,ga:percentNewVisits,ga:visits"
+ args["alt"] = "json"
+
+ results = self._get_json(args)
+ except Exception, e:
+ log.exception(e)
+ results = dict(url=[])
+
result_data = results.get('rows')
data = {
'Pages per visit': result_data[0][0],
@@ -252,14 +346,28 @@
# Bounces from / or another configurable page.
path = '/%s%s' % (config.get('googleanalytics.account'),
config.get('ga-report.bounce_url', '/'))
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- filters='ga:pagePath==%s' % (path,),
- start_date=start_date,
- metrics='ga:visitBounceRate',
- dimensions='ga:pagePath',
- max_results=10000,
- end_date=end_date).execute()
+
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+
+ args = {}
+ args["max-results"] = 100000
+ args["start-date"] = start_date
+ args["end-date"] = end_date
+ args["ids"] = "ga:" + self.profile_id
+
+ args["filters"] = 'ga:pagePath==%s' % (path,)
+ args["dimensions"] = 'ga:pagePath'
+ args["metrics"] = "ga:visitBounceRate"
+ args["alt"] = "json"
+
+ results = self._get_json(args)
+ except Exception, e:
+ log.exception(e)
+ results = dict(url=[])
+
result_data = results.get('rows')
if not result_data or len(result_data) != 1:
log.error('Could not pinpoint the bounces for path: %s. Got results: %r',
@@ -275,14 +383,28 @@
def _locale_stats(self, start_date, end_date, period_name, period_complete_day):
""" Fetches stats about language and country """
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- start_date=start_date,
- metrics='ga:pageviews',
- sort='-ga:pageviews',
- dimensions="ga:language,ga:country",
- max_results=10000,
- end_date=end_date).execute()
+
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+
+ args = {}
+ args["max-results"] = 100000
+ args["start-date"] = start_date
+ args["end-date"] = end_date
+ args["ids"] = "ga:" + self.profile_id
+
+ args["dimensions"] = "ga:language,ga:country"
+ args["metrics"] = "ga:pageviews"
+ args["sort"] = "-ga:pageviews"
+ args["alt"] = "json"
+
+ results = self._get_json(args)
+ except Exception, e:
+ log.exception(e)
+ results = dict(url=[])
+
result_data = results.get('rows')
data = {}
for result in result_data:
@@ -298,20 +420,32 @@
def _download_stats(self, start_date, end_date, period_name, period_complete_day):
- """ Fetches stats about language and country """
+ """ Fetches stats about data downloads """
import ckan.model as model
data = {}
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- start_date=start_date,
- filters='ga:eventAction==download',
- metrics='ga:totalEvents',
- sort='-ga:totalEvents',
- dimensions="ga:eventLabel",
- max_results=10000,
- end_date=end_date).execute()
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+
+ args = {}
+ args["max-results"] = 100000
+ args["start-date"] = start_date
+ args["end-date"] = end_date
+ args["ids"] = "ga:" + self.profile_id
+
+ args["filters"] = 'ga:eventAction==download'
+ args["dimensions"] = "ga:eventLabel"
+ args["metrics"] = "ga:totalEvents"
+ args["alt"] = "json"
+
+ results = self._get_json(args)
+ except Exception, e:
+ log.exception(e)
+ results = dict(url=[])
+
result_data = results.get('rows')
if not result_data:
# We may not have data for this time period, so we need to bail
@@ -320,7 +454,14 @@
return
def process_result_data(result_data, cached=False):
+ progress_total = len(result_data)
+ progress_count = 0
+ resources_not_matched = []
for result in result_data:
+ progress_count += 1
+ if progress_count % 100 == 0:
+ log.debug('.. %d/%d done so far', progress_count, progress_total)
+
url = result[0].strip()
# Get package id associated with the resource that has this URL.
@@ -334,20 +475,35 @@
if package_name:
data[package_name] = data.get(package_name, 0) + int(result[1])
else:
- log.warning(u"Could not find resource for URL: {url}".format(url=url))
+ resources_not_matched.append(url)
continue
-
+ if resources_not_matched:
+ log.debug('Could not match %i or %i resource URLs to datasets. e.g. %r',
+ len(resources_not_matched), progress_total, resources_not_matched[:3])
+
+ log.info('Associating downloads of resource URLs with their respective datasets')
process_result_data(results.get('rows'))
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- start_date=start_date,
- filters='ga:eventAction==download-cache',
- metrics='ga:totalEvents',
- sort='-ga:totalEvents',
- dimensions="ga:eventLabel",
- max_results=10000,
- end_date=end_date).execute()
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+
+ args = dict( ids='ga:' + self.profile_id,
+ filters='ga:eventAction==download-cache',
+ metrics='ga:totalEvents',
+ sort='-ga:totalEvents',
+ dimensions="ga:eventLabel",
+ max_results=10000)
+ args['start-date'] = start_date
+ args['end-date'] = end_date
+
+ results = self._get_json(args)
+ except Exception, e:
+ log.exception(e)
+ results = dict(url=[])
+
+ log.info('Associating downloads of cache resource URLs with their respective datasets')
process_result_data(results.get('rows'), cached=False)
self._filter_out_long_tail(data, MIN_DOWNLOADS)
@@ -355,14 +511,25 @@
def _social_stats(self, start_date, end_date, period_name, period_complete_day):
""" Finds out which social sites people are referred from """
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- start_date=start_date,
- metrics='ga:pageviews',
- sort='-ga:pageviews',
- dimensions="ga:socialNetwork,ga:referralPath",
- max_results=10000,
- end_date=end_date).execute()
+
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+
+ args = dict( ids='ga:' + self.profile_id,
+ metrics='ga:pageviews',
+ sort='-ga:pageviews',
+ dimensions="ga:socialNetwork,ga:referralPath",
+ max_results=10000)
+ args['start-date'] = start_date
+ args['end-date'] = end_date
+
+ results = self._get_json(args)
+ except Exception, e:
+ log.exception(e)
+ results = dict(url=[])
+
result_data = results.get('rows')
data = {}
for result in result_data:
@@ -374,14 +541,24 @@
def _os_stats(self, start_date, end_date, period_name, period_complete_day):
""" Operating system stats """
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- start_date=start_date,
- metrics='ga:pageviews',
- sort='-ga:pageviews',
- dimensions="ga:operatingSystem,ga:operatingSystemVersion",
- max_results=10000,
- end_date=end_date).execute()
+ try:
+ # Because of issues of invalid responses, we are going to make these requests
+ # ourselves.
+ headers = {'authorization': 'Bearer ' + self.token}
+
+ args = dict( ids='ga:' + self.profile_id,
+ metrics='ga:pageviews',
+ sort='-ga:pageviews',
+ dimensions="ga:operatingSystem,ga:operatingSystemVersion",
+ max_results=10000)
+ args['start-date'] = start_date
+ args['end-date'] = end_date
+
+ results = self._get_json(args)
+ except Exception, e:
+ log.exception(e)
+ results = dict(url=[])
+
result_data = results.get('rows')
data = {}
for result in result_data:
@@ -399,14 +576,27 @@
def _browser_stats(self, start_date, end_date, period_name, period_complete_day):
""" Information about browsers and browser versions """
- results = self.service.data().ga().get(
- ids='ga:' + self.profile_id,
- start_date=start_date,
- metrics='ga:pageviews',
- sort='-ga:pageviews',
- dimensions="ga:browser,ga:browserVersion",
- max_results=10000,
- end_date=end_date).execute()
+
+ try:
+ # Because of issues o