From: Alex Sadleir Date: Tue, 22 Oct 2013 03:18:13 +0000 Subject: add email to about X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=a5f659aa030a48eff40300de02383b4d79aa894f --- add email to about Former-commit-id: 505693e6c8100430bce13e6ef586d885e916a468 --- --- a/documents/about.php +++ b/documents/about.php @@ -5,6 +5,7 @@ include_once('../include/common.inc.php'); ?>

About

+Written and managed by Alex Sadleir (maxious [at] lambdacomplex.org) --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -199,6 +199,17 @@ def getRows(self, table): return table.find_all('tr') + def getDocHash(self, id,date, url): + if id.string is None: + print "no id, using date as hash" + return scrape.mkhash( + self.remove_control_chars( + url + (''.join(date.stripped_strings)))) + else: + return scrape.mkhash( + self.remove_control_chars( + url + (''.join(id.stripped_strings)))) + def getDate(self, content, entry, doc): strdate = ''.join(content.stripped_strings).strip() (a, b, c) = strdate.partition("(") @@ -240,15 +251,7 @@ description, notes) = self.getColumns(columns) print self.remove_control_chars( ''.join(id.stripped_strings)) - if id.string is None: - print "no id, using date as hash" - dochash = scrape.mkhash( - self.remove_control_chars( - url + (''.join(date.stripped_strings)))) - else: - dochash = scrape.mkhash( - self.remove_control_chars( - url + (''.join(id.stripped_strings)))) + dochash = self.getDocHash(id,date,url) doc = foidocsdb.get(dochash) if doc is None: --- a/documents/runScrapers.sh +++ b/documents/runScrapers.sh @@ -1,3 +1,4 @@ +#!/bin/bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" echo $DIR cd $DIR --- a/documents/scrape.py +++ b/documents/scrape.py @@ -197,7 +197,7 @@ links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) linkurls = set([]) for link in links: - if link.has_key("href"): + if link.has_attr("href"): if link['href'].startswith("http"): # lets not do external links for now # linkurls.add(link['href']) --- a/documents/scrapers/0e46f8bd1414b1fdd4f0543d54a97500.py +++ b/documents/scrapers/0e46f8bd1414b1fdd4f0543d54a97500.py @@ -7,7 +7,7 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getTable(self,soup): - return soup.find(id = "maincontentcontainer").table + return soup.find(class_ = "contentcontainer").table def getColumnCount(self): return 5 def getColumns(self,columns): --- a/documents/scrapers/1803322b27286950cab0c543168b5f21.py +++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py @@ -16,7 +16,7 @@ links = [] description = "" for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(), atag['href']) (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -25,7 +25,7 @@ row = soup.find(id="content_div_148050") description = ''.join(row.stripped_strings) for atag in row.find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link, atag['href'])) if links != []: --- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py +++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py @@ -11,7 +11,7 @@ links = [] description = "" for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -26,7 +26,7 @@ for text in row.stripped_strings: description = description + text + "\n" for atag in row.find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: --- a/documents/scrapers/53d2884f8afd026096a27bd5051ec50e.py +++ b/documents/scrapers/53d2884f8afd026096a27bd5051ec50e.py @@ -16,7 +16,7 @@ link = None links = [] for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -24,7 +24,7 @@ # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(htcontent) for atag in soup.find(class_ = "article-content").find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: --- a/documents/scrapers/5716ce0aacfe98f7d638b7a66b7f1040.py +++ b/documents/scrapers/5716ce0aacfe98f7d638b7a66b7f1040.py @@ -6,6 +6,11 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getDocHash(self, id,date, url): + ''' url changes on ever request so ignore for hash ''' + return scrape.mkhash( + self.remove_control_chars( + ''.join(id.stripped_strings))) def getColumnCount(self): return 4 def getColumns(self,columns): --- a/documents/scrapers/69d59284ef0ccd2677394d82d3292abc.py +++ b/documents/scrapers/69d59284ef0ccd2677394d82d3292abc.py @@ -6,8 +6,6 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getTable(self,soup): - return soup.find(id = "centercontent").table def getColumnCount(self): return 5 def getColumns(self,columns): --- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py +++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py @@ -11,7 +11,7 @@ links = [] description = "" for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -22,7 +22,7 @@ description = description + text.encode('ascii', 'ignore') for atag in soup.find(id="SortingTable").find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: @@ -43,7 +43,7 @@ links = [] description = "" for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -53,7 +53,7 @@ for text in soup.find(id="content-item").stripped_strings: description = description + text + " \n" for atag in soup.find(id="content-item").find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: doc.update({'links': links}) --- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py +++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py @@ -17,7 +17,7 @@ dldivs = soup.find('div',class_="download") if dldivs != None: for atag in dldivs.find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(url,atag['href'])) nodldivs = soup.find('div',class_="incompleteNotification") if nodldivs != None and nodldivs.stripped_strings != None: --- a/documents/scrapers/d1296c366287f7a9faedf235c7e6df01.py +++ b/documents/scrapers/d1296c366287f7a9faedf235c7e6df01.py @@ -6,8 +6,6 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getTable(self,soup): - return soup.find(id="main").table def getColumnCount(self): return 7 def getColumns(self,columns): --- a/documents/scrapers/f5ce2d1651739704634eb8ca4b2b46d3.py +++ b/documents/scrapers/f5ce2d1651739704634eb8ca4b2b46d3.py @@ -7,12 +7,12 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getTable(self,soup): - return soup.find(id = "ctl00_PlaceHolderMain_PublishingPageContent__ControlWrapper_RichHtmlField").table + return soup.find(id = "block-system-main").table def getColumnCount(self): - return 7 + return 2 def getColumns(self,columns): - (id, date, title, description,link,deldate, notes) = columns - return (id, date, title, description, notes) + (date, title) = columns + return (date, date, title, title, None) if __name__ == '__main__': print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) --- a/documents/sitemap.xml.php +++ b/documents/sitemap.xml.php @@ -1,30 +1,48 @@ "; echo '' . "\n"; echo " " . local_url() . "index.php1.0\n"; foreach (scandir("./") as $file) { - if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php"&& $file != "viewDocument.php") + if (strpos($file, ".php") !== false && ($file != "index.php" && $file != "sitemap.xml.php"&& $file != "viewDocument.php")) { echo " " . local_url() . "$file0.6\n"; + } } $agenciesdb = $server->get_db('disclosr-agencies'); +$foidocsdb = $server->get_db('disclosr-foidocuments'); try { $rows = $agenciesdb->get_view("app", "byCanonicalName")->rows; foreach ($rows as $row) { echo '' . local_url() . 'agency.php?id=' . $row->value->_id . "0.3\n"; } + unset($rows); + $rows = null; } catch (SetteeRestClientException $e) { setteErrorHandler($e); } -$foidocsdb = $server->get_db('disclosr-foidocuments'); + +foreach (range(0, 8) as $number) { try { - $rows = $foidocsdb->get_view("app", "all")->rows; + $rows = $foidocsdb->get_view("app", "all", Array($number,$number+1))->rows; foreach ($rows as $row) { echo '' . local_url() . 'view.php?id=' . $row->value->_id . "0.3\n"; } + unset($rows); + $rows = null; +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +} + +try { + $rows = $foidocsdb->get_view("app", "all", Array('9','fffffffff'))->rows; + foreach ($rows as $row) { + echo '' . local_url() . 'view.php?id=' . $row->value->_id . "0.3\n"; + } + unset($rows); + $rows = null; } catch (SetteeRestClientException $e) { setteErrorHandler($e); } --- a/graph.php +++ b/graph.php @@ -9,13 +9,13 @@ function add_node($id, $label, $parent="") { global $format; if ($format == "html") { - // echo "nodes[\"$id\"] = graph.newNode({label: \"$label\"});" . PHP_EOL; + // echo "nodes[\"$id\"] = graph.newNode({label: \"$label\"});" . PHP_EOL; } if ($format == "dot" && $label != "") { - echo "$id [label=\"$label\"];". PHP_EOL; + echo "\"$id\" [label=\"$label\", shape=plaintext];". PHP_EOL; } if ($format == "gexf") { - echo "":">") + echo "":">") ."" ."". PHP_EOL; } @@ -27,7 +27,7 @@ // echo "graph.newEdge(nodes[\"$from\"], nodes['$to'], {color: '$color'});" . PHP_EOL; } if ($format == "dot") { - echo "$from -> $to ".($color != ""? "[color=$color]":"").";". PHP_EOL; + echo "\"$from\" -> \"$to\" ".($color != ""? "[color=$color]":"").";". PHP_EOL; } if ($format == "gexf") { echo "". PHP_EOL; @@ -55,7 +55,7 @@ $rows = $db->get_view("app", "byCanonicalName", null, true)->rows; //print_r($rows); foreach ($rows as $row) { - add_node($row->id, $row->key); + add_node($row->id, $row->value->name); } } catch (SetteeRestClientException $e) { setteErrorHandler($e); --- a/include/common.inc.php +++ b/include/common.inc.php @@ -10,6 +10,7 @@ || strstr($_SERVER['PHP_SELF'], "documents/") || $_SERVER['SERVER_NAME'] == "disclosurelo.gs" || $_SERVER['SERVER_NAME'] == "www.disclosurelo.gs" + || $_SERVER['SERVER_NAME'] == "direct.disclosurelo.gs" ) $basePath = "../"; --- a/ranking.php +++ b/ranking.php @@ -32,8 +32,12 @@ $columnKeys = array_unique(array_merge($columnKeys, array_keys($columns))); //print_r($columnKeys); $score = count($columns); - $scores[$score]++; - $scoredagencies[] = Array("id"=> $row->key, "website"=> $row->value->website, "name" => $row->value->name, "columns" => $columns, "score" => $score); + if (isset($scores[$score])){ +$scores[$score]++; +} else { +$scores[$score] =1; +} + $scoredagencies[] = Array("id"=> $row->key, "website"=> (isset($row->value->website)?$row->value->website:""), "name" => $row->value->name, "columns" => $columns, "score" => $score); } } @@ -74,7 +78,7 @@ } else { $href = $value; } - if ($href[0] == "@") { + if (isset($href[0]) && $href[0] == "@") { $href = str_replace("@","https://twitter.com/",$href); } //$href= urlencode($href);