graph/ranking fixes
graph/ranking fixes


Former-commit-id: c08fb5bb63762d6d850ae16d8fc7ad947b845078

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -197,7 +197,7 @@
                 links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
                 linkurls = set([])
                 for link in links:
-                    if link.has_key("href"):
+                    if link.has_attr("href"):
                         if link['href'].startswith("http"):
                             # lets not do external links for now
                             # linkurls.add(link['href'])

--- a/documents/scrapers/0e46f8bd1414b1fdd4f0543d54a97500.py
+++ b/documents/scrapers/0e46f8bd1414b1fdd4f0543d54a97500.py
@@ -7,7 +7,7 @@
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
         def getTable(self,soup):
-                return soup.find(id = "maincontentcontainer").table
+               return soup.find(class_ = "contentcontainer").table
         def getColumnCount(self):
                 return 5
         def getColumns(self,columns):

--- a/documents/scrapers/1803322b27286950cab0c543168b5f21.py
+++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py
@@ -16,7 +16,7 @@
         links = []
         description = ""
         for atag in entry.find_all('a'):
-            if atag.has_key('href'):
+            if atag.has_attr('href'):
                 link = scrape.fullurl(self.getURL(), atag['href'])
                 (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
                 if htcontent != None:
@@ -25,7 +25,7 @@
                         row  = soup.find(id="content_div_148050")
                         description = ''.join(row.stripped_strings)
                         for atag in row.find_all("a"):
-                                    if atag.has_key('href'):
+                                    if atag.has_attr('href'):
                                         links.append(scrape.fullurl(link, atag['href']))
 
         if links != []:

--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -11,7 +11,7 @@
                 links = []
                 description = ""
 		for atag in entry.find_all('a'):
-			if atag.has_key('href'):
+			if atag.has_attr('href'):
 				link = scrape.fullurl(self.getURL(),atag['href'])			
                                 (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
                                 if htcontent != None:
@@ -26,7 +26,7 @@
                                                                 for text in row.stripped_strings:
                                                                     description = description + text + "\n"
                                                      		for atag in row.find_all("a"):
-                                                                	if atag.has_key('href'):
+                                                                	if atag.has_attr('href'):
                                                                         	links.append(scrape.fullurl(link,atag['href']))
 
 		if links != []:

--- a/documents/scrapers/53d2884f8afd026096a27bd5051ec50e.py
+++ b/documents/scrapers/53d2884f8afd026096a27bd5051ec50e.py
@@ -16,7 +16,7 @@
 		link = None
                 links = []
 		for atag in entry.find_all('a'):
-			if atag.has_key('href'):
+			if atag.has_attr('href'):
 				link = scrape.fullurl(self.getURL(),atag['href'])			
                                 (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
                                 if htcontent != None:
@@ -24,7 +24,7 @@
                                         # http://www.crummy.com/software/BeautifulSoup/documentation.html
                                                 soup = BeautifulSoup(htcontent)
                                                 for atag in soup.find(class_ = "article-content").find_all('a'):
-	                                               	if atag.has_key('href'):
+	                                               	if atag.has_attr('href'):
         	                                              	links.append(scrape.fullurl(link,atag['href']))
 
 		if links != []:

--- a/documents/scrapers/69d59284ef0ccd2677394d82d3292abc.py
+++ b/documents/scrapers/69d59284ef0ccd2677394d82d3292abc.py
@@ -6,8 +6,6 @@
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-        def getTable(self,soup):
-                return soup.find(id = "centercontent").table       
         def getColumnCount(self):
                 return 5
         def getColumns(self,columns):

--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -11,7 +11,7 @@
                 links = []
                 description = ""
 		for atag in entry.find_all('a'):
-			if atag.has_key('href'):
+			if atag.has_attr('href'):
 				link = scrape.fullurl(self.getURL(),atag['href'])			
                                 (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
                                 if htcontent != None:
@@ -22,7 +22,7 @@
                                                     description = description + text.encode('ascii', 'ignore')
 
                                                 for atag in soup.find(id="SortingTable").find_all("a"):
-                                                      	if atag.has_key('href'):
+                                                      	if atag.has_attr('href'):
                                                               	links.append(scrape.fullurl(link,atag['href']))
 
 		if links != []:
@@ -43,7 +43,7 @@
                 links = []
                 description = ""
 		for atag in entry.find_all('a'):
-			if atag.has_key('href'):
+			if atag.has_attr('href'):
 				link = scrape.fullurl(self.getURL(),atag['href'])			
                                 (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
                                 if htcontent != None:
@@ -53,7 +53,7 @@
                                                 for text in soup.find(id="content-item").stripped_strings:
                                                     description = description + text + " \n"
                                                 for atag in soup.find(id="content-item").find_all("a"):
-                                                    if atag.has_key('href'):
+                                                    if atag.has_attr('href'):
                                                         links.append(scrape.fullurl(link,atag['href']))
 		if links != []:
                  	doc.update({'links': links})

--- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
+++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
@@ -17,7 +17,7 @@
 				dldivs = soup.find('div',class_="download")
 				if dldivs != None:
                              		for atag in dldivs.find_all("a"):
-                                		if atag.has_key('href'):
+                                		if atag.has_attr('href'):
                                         		links.append(scrape.fullurl(url,atag['href']))
 				nodldivs = soup.find('div',class_="incompleteNotification")
 				if nodldivs != None and nodldivs.stripped_strings != None:

--- a/documents/scrapers/d1296c366287f7a9faedf235c7e6df01.py
+++ b/documents/scrapers/d1296c366287f7a9faedf235c7e6df01.py
@@ -6,8 +6,6 @@
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-        def getTable(self,soup):
-                return soup.find(id="main").table       
         def getColumnCount(self):
                 return 7
         def getColumns(self,columns):

file:a/graph.php -> file:b/graph.php
--- a/graph.php
+++ b/graph.php
@@ -9,13 +9,13 @@
 function add_node($id, $label, $parent="") {
     global $format;
     if ($format == "html") {
-       // echo "nodes[\"$id\"] = graph.newNode({label: \"$label\"});" . PHP_EOL;
+      //  echo "nodes[\"$id\"] = graph.newNode({label: \"$label\"});" . PHP_EOL;
     }
      if ($format == "dot" && $label != "") {
-         echo "$id [label=\"$label\"];". PHP_EOL;
+         echo "\"$id\" [label=\"$label\", shape=plaintext];". PHP_EOL;
      }
       if ($format == "gexf") {
-          echo "<node id='$id' label=\"".htmlentities($label,ENT_XML1)."\" ".($parent != ""? "pid='$parent'><viz:size value='1'/>":"><viz:size value='2'/>")
+          echo "<node id='$id' label=\"".htmlentities($label)."\" ".($parent != ""? "pid='$parent'><viz:size value='1'/>":"><viz:size value='2'/>")
               ."<viz:color b='".rand(0,255)."' g='".rand(0,255)."' r='".rand(0,255)."'/>"
                   ."</node>". PHP_EOL;
       }
@@ -27,7 +27,7 @@
      //   echo "graph.newEdge(nodes[\"$from\"], nodes['$to'], {color: '$color'});" . PHP_EOL;
     }
     if ($format == "dot") {
-        echo "$from -> $to ".($color != ""? "[color=$color]":"").";". PHP_EOL;
+        echo "\"$from\" -> \"$to\" ".($color != ""? "[color=$color]":"").";". PHP_EOL;
     }
      if ($format == "gexf") {
           echo "<edge id='$from$to' source='$from' target='$to' />". PHP_EOL;
@@ -55,7 +55,7 @@
     $rows = $db->get_view("app", "byCanonicalName", null, true)->rows;
 //print_r($rows);
     foreach ($rows as $row) {
-        add_node($row->id, $row->key);
+        add_node($row->id, $row->value->name);
     }
 } catch (SetteeRestClientException $e) {
     setteErrorHandler($e);

--- a/ranking.php
+++ b/ranking.php
@@ -32,8 +32,12 @@
                 $columnKeys = array_unique(array_merge($columnKeys, array_keys($columns)));
                 //print_r($columnKeys);
                 $score = count($columns);
-                $scores[$score]++;
-                $scoredagencies[] = Array("id"=> $row->key, "website"=> $row->value->website, "name" => $row->value->name, "columns" => $columns, "score" => $score);
+                if (isset($scores[$score])){
+$scores[$score]++;
+} else {
+$scores[$score] =1;
+}
+                $scoredagencies[] = Array("id"=> $row->key, "website"=> (isset($row->value->website)?$row->value->website:""), "name" => $row->value->name, "columns" => $columns, "score" => $score);
             }
         }
 
@@ -74,7 +78,7 @@
                 } else {
                     $href = $value;
                 }
-                if ($href[0] == "@") {
+                if (isset($href[0]) && $href[0] == "@") {
                     $href = str_replace("@","https://twitter.com/",$href);
                 }
                 //$href= urlencode($href);