Refactoring and tidying Rickshaw code. Placeholder graphs inserted. Front page sparklines are sensibly sorted.
[ckanext-ga-report.git] / ckanext / ga_report / controller.py
blob:a/ckanext/ga_report/controller.py -> blob:b/ckanext/ga_report/controller.py
--- a/ckanext/ga_report/controller.py
+++ b/ckanext/ga_report/controller.py
@@ -253,7 +253,9 @@
         writer = csv.writer(response)
         writer.writerow(["Publisher Title", "Publisher Name", "Views", "Visits", "Period Name"])
 
-        for publisher,view,visit in _get_top_publishers(None):
+        top_publishers, top_publishers_graph = _get_top_publishers(None)
+
+        for publisher,view,visit in top_publishers:
             writer.writerow([publisher.title.encode('utf-8'),
                              publisher.name.encode('utf-8'),
                              view,
@@ -302,7 +304,9 @@
         if c.month:
             c.month_desc = ''.join([m[1] for m in c.months if m[0]==c.month])
 
-        c.top_publishers = _get_top_publishers()
+        c.top_publishers, graph_data = _get_top_publishers()
+        c.top_publishers_graph = json.dumps( _to_rickshaw(graph_data.values()) )
+
         return render('ga_report/publisher/index.html')
 
     def _get_packages(self, publisher=None, count=-1):
@@ -334,8 +338,9 @@
                         filter(GA_Stat.key==package.name)
                     if month != 'All':  # Fetch everything unless the month is specific
                         dls = dls.filter(GA_Stat.period_name==month)
-
-                    downloads = sum(int(d.value) for d in dls.all())
+                    downloads = 0
+                    for x in dls:
+                        downloads += int(x.value)
                 else:
                     downloads = 'No data'
                 top_packages.append((package, entry.pageviews, entry.visits, downloads))
@@ -386,7 +391,40 @@
 
         c.top_packages = self._get_packages(c.publisher, 20)
 
+        # Graph query
+        top_package_names = [ x[0].name for x in c.top_packages ]
+        graph_query = model.Session.query(GA_Url,model.Package)\
+            .filter(model.Package.name==GA_Url.package_id)\
+            .filter(GA_Url.url.like('/dataset/%'))\
+            .filter(GA_Url.package_id.in_(top_package_names))
+        graph_data = {}
+        for entry,package in graph_query:
+            if not package: continue
+            if entry.period_name=='All': continue
+            graph_data[package.id] = graph_data.get(package.id,{
+                'name':package.title,
+                'data':[]
+                })
+            graph_data[package.id]['data'].append({
+                'x':_get_unix_epoch(entry.period_name),
+                'y':int(entry.pageviews),
+                })
+
+        c.graph_data = json.dumps( _to_rickshaw(graph_data.values()) )
+
         return render('ga_report/publisher/read.html')
+
+def _to_rickshaw(data):
+    num_points = []    
+    for package in data:
+        package['data'] = sorted( package['data'], key=lambda x:x['x'] )
+        num_points.append( len(package['data']) )
+    if len(set(num_points))>1:
+        example = num_points[ num_points.index(max(num_points)) ]
+        for package in data:
+            while len(package['data'])<example:
+                package['data'].insert(0, package['data'][0])
+    return data
 
 def _get_top_publishers(limit=20):
     '''
@@ -409,11 +447,35 @@
 
     top_publishers = []
     res = connection.execute(q, month)
+    department_ids = []
     for row in res:
         g = model.Group.get(row[0])
         if g:
+            department_ids.append(row[0])
             top_publishers.append((g, row[1], row[2]))
-    return top_publishers
+
+    graph = {}
+    if limit is not None:
+        # Query for a history graph of these publishers
+        q = model.Session.query(
+                GA_Url.department_id, 
+                GA_Url.period_name, 
+                func.sum(cast(GA_Url.pageviews,sqlalchemy.types.INT)))\
+            .filter( GA_Url.department_id.in_(department_ids) )\
+            .filter( GA_Url.period_name!='All' )\
+            .filter( GA_Url.url.like('/dataset/%') )\
+            .filter( GA_Url.package_id!='' )\
+            .group_by( GA_Url.department_id, GA_Url.period_name )
+        for dept_id,period_name,views in q:
+            graph[dept_id] = graph.get( dept_id, {
+                'name' : model.Group.get(dept_id).title,
+                'data' : []
+                })
+            graph[dept_id]['data'].append({
+                'x': _get_unix_epoch(period_name),
+                'y': views
+                })
+    return top_publishers, graph
 
 
 def _get_publishers():