scraper and sort order updatyes
Former-commit-id: c8bfc5c3ecbee616fa6dd8bfdd147bedf4d64646
--- /dev/null
+++ b/admin/massdelete.sh
@@ -1,1 +1,10 @@
+for line in `curl "http://localhost:5984/disclosr-foidocuments/_design/app/_view/byAgencyID?reduce=false&keys=%5B\"5716ce0aacfe98f7d638b7a66b7f1040\"%5D&limit=600" | xargs -L1`; do
+# echo $line
+ id=`echo $line | grep -Po '_id:.*?[^\\\],' | perl -pe 's/_id://; s/^//; s/,$//'`
+ rev=`echo $line | grep -Po 'rev:.*?[^\\\],'| perl -pe 's/rev://; s/^//; s/,$//'`
+ if [ -n "$id" ]; then
+ echo "curl -X DELETE http://localhost:5984/disclosr-foidocuments/$id?rev=$rev"
+ curl -X DELETE http://localhost:5984/disclosr-foidocuments/$id?rev=$rev
+ fi
+done;
--- a/documents/about.php
+++ b/documents/about.php
@@ -5,6 +5,7 @@
include_once('../include/common.inc.php');
?>
<h1>About</h1>
+Written and managed by Alex Sadleir (maxious [at] lambdacomplex.org)
<?php
include_footer_documents();
?>
--- a/documents/agency.php
+++ b/documents/agency.php
@@ -31,6 +31,12 @@
} else {
$rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
if ($rows) {
+function cmp($a, $b)
+{
+ global $idtoname;
+ return strcmp($idtoname[$a->key], $idtoname[$b->key]);
+}
+usort($rows, "cmp");
foreach ($rows as $row) {
echo '<a href="agency.php?id=' . $row->key . '">' . $idtoname[$row->key] . " (" . $row->value . " records)</a> <br>\n";
}
--- a/documents/charts.php
+++ b/documents/charts.php
@@ -112,7 +112,11 @@
<?php
try {
$rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows;
-
+function cmp($a, $b)
+{
+ return $a->value > $b->value;
+}
+usort($rows, "cmp");
$dataValues = Array();
$i = 0;
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -199,6 +199,17 @@
def getRows(self, table):
return table.find_all('tr')
+ def getDocHash(self, id,date, url):
+ if id.string is None:
+ print "no id, using date as hash"
+ return scrape.mkhash(
+ self.remove_control_chars(
+ url + (''.join(date.stripped_strings))))
+ else:
+ return scrape.mkhash(
+ self.remove_control_chars(
+ url + (''.join(id.stripped_strings))))
+
def getDate(self, content, entry, doc):
strdate = ''.join(content.stripped_strings).strip()
(a, b, c) = strdate.partition("(")
@@ -240,15 +251,7 @@
description, notes) = self.getColumns(columns)
print self.remove_control_chars(
''.join(id.stripped_strings))
- if id.string is None:
- print "no id, using date as hash"
- dochash = scrape.mkhash(
- self.remove_control_chars(
- url + (''.join(date.stripped_strings))))
- else:
- dochash = scrape.mkhash(
- self.remove_control_chars(
- url + (''.join(id.stripped_strings))))
+ dochash = self.getDocHash(id,date,url)
doc = foidocsdb.get(dochash)
if doc is None:
--- a/documents/index.php
+++ b/documents/index.php
@@ -18,6 +18,7 @@
$idtoname[$row->id] = trim($row->value->name);
}
$foidocsdb = $server->get_db('disclosr-foidocuments');
+//print_r($foidocsdb);
try {
$rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20, null, $enddocid)->rows;
if ($rows) {
--- a/documents/robots.txt
+++ b/documents/robots.txt
@@ -3,4 +3,5 @@
User-agent: *
Disallow: /admin/
+Disallow: /viewDocument.php
Sitemap: http://disclosurelo.gs/sitemap.xml.php
--- a/documents/rss.xml.php
+++ b/documents/rss.xml.php
@@ -31,11 +31,12 @@
//print_r($rows);
+$i =0;
foreach ($rows as $row) {
//Create an empty FeedItem
$newItem = $TestFeed->createNewItem();
//Add elements to the feed item
- $newItem->setTitle($row->value->title);
+ $newItem->setTitle(preg_replace('/[\x00-\x1F\x80-\xFF]/', '', $row->value->title));
$newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id);
$newItem->setDate(strtotime($row->value->date));
$newItem->setDescription(displayLogEntry($row, $idtoname));
@@ -43,6 +44,8 @@
$newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true'));
//Now add the feed item
$TestFeed->addItem($newItem);
+$i++;
+if ($i > 50) break;
}
//OK. Everything is done. Now genarate the feed.
$TestFeed->generateFeed();
--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo $DIR
cd $DIR
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -197,7 +197,7 @@
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
linkurls = set([])
for link in links:
- if link.has_key("href"):
+ if link.has_attr("href"):
if link['href'].startswith("http"):
# lets not do external links for now
# linkurls.add(link['href'])
--- a/documents/scrapers/0e46f8bd1414b1fdd4f0543d54a97500.py
+++ b/documents/scrapers/0e46f8bd1414b1fdd4f0543d54a97500.py
@@ -7,7 +7,7 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):
- return soup.find(id = "maincontentcontainer").table
+ return soup.find(class_ = "contentcontainer").table
def getColumnCount(self):
return 5
def getColumns(self,columns):
--- a/documents/scrapers/1803322b27286950cab0c543168b5f21.py
+++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py
@@ -16,7 +16,7 @@
links = []
description = ""
for atag in entry.find_all('a'):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(), atag['href'])
(url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None:
@@ -25,7 +25,7 @@
row = soup.find(id="content_div_148050")
description = ''.join(row.stripped_strings)
for atag in row.find_all("a"):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
links.append(scrape.fullurl(link, atag['href']))
if links != []:
--- a/documents/scrapers/1d404c4934f74feacd00dcb434e7c10a.py
+++ b/documents/scrapers/1d404c4934f74feacd00dcb434e7c10a.py
@@ -6,8 +6,8 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- #def getTable(self,soup):
- # return soup.find(id = "cphMain_C001_Col01").table
+ def getTable(self,soup):
+ return soup.findAll('table')[1]
def getColumnCount(self):
return 5
def getColumns(self,columns):
--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -11,7 +11,7 @@
links = []
description = ""
for atag in entry.find_all('a'):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None:
@@ -26,7 +26,7 @@
for text in row.stripped_strings:
description = description + text + "\n"
for atag in row.find_all("a"):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
links.append(scrape.fullurl(link,atag['href']))
if links != []:
--- a/documents/scrapers/41a166419503bb50e410c58be54c102f.py
+++ b/documents/scrapers/41a166419503bb50e410c58be54c102f.py
@@ -8,7 +8,7 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):
- return soup.find(id= "ctl00_MSO_ContentDiv").table
+ return soup.find(class_ = "rgMasterTable")
def getColumns(self,columns):
(id, title, description, notes) = columns
--- a/documents/scrapers/53d2884f8afd026096a27bd5051ec50e.py
+++ b/documents/scrapers/53d2884f8afd026096a27bd5051ec50e.py
@@ -16,7 +16,7 @@
link = None
links = []
for atag in entry.find_all('a'):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None:
@@ -24,7 +24,7 @@
# http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent)
for atag in soup.find(class_ = "article-content").find_all('a'):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
links.append(scrape.fullurl(link,atag['href']))
if links != []:
--- a/documents/scrapers/5716ce0aacfe98f7d638b7a66b7f1040.py
+++ b/documents/scrapers/5716ce0aacfe98f7d638b7a66b7f1040.py
@@ -6,6 +6,11 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getDocHash(self, id,date, url):
+ ''' url changes on ever request so ignore for hash '''
+ return scrape.mkhash(
+ self.remove_control_chars(
+ ''.join(id.stripped_strings)))
def getColumnCount(self):
return 4
def getColumns(self,columns):
--- a/documents/scrapers/601aedeef4344638d635bdd761e9fdba.py
+++ b/documents/scrapers/601aedeef4344638d635bdd761e9fdba.py
@@ -6,8 +6,8 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- #def getTable(self,soup):
- # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
+ def getTable(self,soup):
+ return soup.find(id = "main").table
def getColumnCount(self):
return 4
def getColumns(self,columns):
--- a/documents/scrapers/69d59284ef0ccd2677394d82d3292abc.py
+++ b/documents/scrapers/69d59284ef0ccd2677394d82d3292abc.py
@@ -6,8 +6,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(id = "centercontent").table
def getColumnCount(self):
return 5
def getColumns(self,columns):
--- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
+++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
@@ -5,6 +5,8 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getTable(self,soup):
+ return soup.find(id = "page_content").table
def getColumns(self,columns):
(id, date, title, description, notes) = columns
return (id, date, title, description, notes)
--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -11,7 +11,7 @@
links = []
description = ""
for atag in entry.find_all('a'):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None:
@@ -22,7 +22,7 @@
description = description + text.encode('ascii', 'ignore')
for atag in soup.find(id="SortingTable").find_all("a"):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
links.append(scrape.fullurl(link,atag['href']))
if links != []:
@@ -43,7 +43,7 @@
links = []
description = ""
for atag in entry.find_all('a'):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
link = scrape.fullurl(self.getURL(),atag['href'])
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
if htcontent != None:
@@ -53,7 +53,7 @@
for text in soup.find(id="content-item").stripped_strings:
description = description + text + " \n"
for atag in soup.find(id="content-item").find_all("a"):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
links.append(scrape.fullurl(link,atag['href']))
if links != []:
doc.update({'links': links})
--- a/documents/scrapers/ad033512610d8e36886ab6a795f26561.py
+++ b/documents/scrapers/ad033512610d8e36886ab6a795f26561.py
@@ -6,8 +6,8 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(id = "_ctl0__ctl0_MainContentPlaceHolder_MainContentPlaceHolder_ContentSpan").findAll("table")[3]
+# def getTable(self,soup):
+# return soup.find(_class = "content").table
def getColumnCount(self):
return 5
def getColumns(self,columns):
--- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
+++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
@@ -17,7 +17,7 @@
dldivs = soup.find('div',class_="download")
if dldivs != None:
for atag in dldivs.find_all("a"):
- if atag.has_key('href'):
+ if atag.has_attr('href'):
links.append(scrape.fullurl(url,atag['href']))
nodldivs = soup.find('div',class_="incompleteNotification")
if nodldivs != None and nodldivs.stripped_strings != None:
--- a/documents/scrapers/d1296c366287f7a9faedf235c7e6df01.py
+++ b/documents/scrapers/d1296c366287f7a9faedf235c7e6df01.py
@@ -6,8 +6,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(id="main").table
def getColumnCount(self):
return 7
def getColumns(self,columns):
--- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
+++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
@@ -1,16 +1,54 @@
import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers
-#RSS feed not detailed
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import scrape
+from bs4 import BeautifulSoup
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-#http://www.doughellmann.com/PyMOTW/abc/
-class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
- def getColumns(self,columns):
- (id, date, title, description, notes) = columns
- return (id, date, title, description, notes)
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+ def getTable(self, soup):
+ return soup.find(id='content')
+
+ def getDescription(self,content, entry,doc):
+ link = None
+ links = []
+ description = ""
+ for atag in entry.find_all('a'):
+ if atag.has_attr('href'):
+ link = scrape.fullurl(self.getURL(), atag['href'])
+ (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+ if htcontent != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ soup = BeautifulSoup(htcontent)
+ row = soup.find(id="foidetails")
+ if row == None:
+ row = soup.find(id="content").table
+ if row == None:
+ row = soup.find(id="content")
+ description = ''.join(row.stripped_strings)
+ for atag in row.find_all("a"):
+ if atag.has_attr('href'):
+ links.append(scrape.fullurl(link, atag['href']))
+
+ if links != []:
+ doc.update({'links': links})
+ if description != "":
+ doc.update({ 'description': description})
+
+ def getColumnCount(self):
+ return 3
+
+ def getColumns(self, columns):
+ (id, title, date) = columns
+ return (id, date, title, title, None)
+
if __name__ == '__main__':
- print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
- print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape()
--- a/documents/scrapers/f5ce2d1651739704634eb8ca4b2b46d3.py
+++ b/documents/scrapers/f5ce2d1651739704634eb8ca4b2b46d3.py
@@ -7,12 +7,12 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):
- return soup.find(id = "ctl00_PlaceHolderMain_PublishingPageContent__ControlWrapper_RichHtmlField").table
+ return soup.find(id = "block-system-main").table
def getColumnCount(self):
- return 7
+ return 2
def getColumns(self,columns):
- (id, date, title, description,link,deldate, notes) = columns
- return (id, date, title, description, notes)
+ (date, title) = columns
+ return (date, date, title, title, None)
if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
--- a/documents/sitemap.xml.php
+++ b/documents/sitemap.xml.php
@@ -1,30 +1,48 @@
<?php
include ('../include/common.inc.php');
-$last_updated = date('Y-m-d', @filemtime('cbrfeed.zip'));
header("Content-Type: text/xml");
echo "<?xml version='1.0' encoding='UTF-8'?>";
echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">' . "\n";
echo " <url><loc>" . local_url() . "index.php</loc><priority>1.0</priority></url>\n";
foreach (scandir("./") as $file) {
- if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php")
+ if (strpos($file, ".php") !== false && ($file != "index.php" && $file != "sitemap.xml.php"&& $file != "viewDocument.php")) {
echo " <url><loc>" . local_url() . "$file</loc><priority>0.6</priority></url>\n";
+ }
}
$agenciesdb = $server->get_db('disclosr-agencies');
+$foidocsdb = $server->get_db('disclosr-foidocuments');
try {
$rows = $agenciesdb->get_view("app", "byCanonicalName")->rows;
foreach ($rows as $row) {
echo '<url><loc>' . local_url() . 'agency.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n";
}
+ unset($rows);
+ $rows = null;
} catch (SetteeRestClientException $e) {
setteErrorHandler($e);
}
-$foidocsdb = $server->get_db('disclosr-foidocuments');
+
+foreach (range(0, 8) as $number) {
try {
- $rows = $foidocsdb->get_view("app", "all")->rows;
+ $rows = $foidocsdb->get_view("app", "all", Array($number,$number+1))->rows;
foreach ($rows as $row) {
echo '<url><loc>' . local_url() . 'view.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n";
}
+ unset($rows);
+ $rows = null;
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+}
+
+try {
+ $rows = $foidocsdb->get_view("app", "all", Array('9','fffffffff'))->rows;
+ foreach ($rows as $row) {
+ echo '<url><loc>' . local_url() . 'view.php?id=' . $row->value->_id . "</loc><priority>0.3</priority></url>\n";
+ }
+ unset($rows);
+ $rows = null;
} catch (SetteeRestClientException $e) {
setteErrorHandler($e);
}
--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -77,7 +77,7 @@
</p>
<ul class="nav">
<li><a href="agency.php">By Agency</a></li>
- <li><a href="date.php">By Date</a></li>
+<!-- <li><a href="date.php">By Date</a></li> -->
<li><a href="disclogsList.php">List of Disclosure Logs</a></li>
<li><a href="charts.php">Charts</a></li>
<li><a href="about.php">About</a></li>
--- a/graph.php
+++ b/graph.php
@@ -9,13 +9,13 @@
function add_node($id, $label, $parent="") {
global $format;
if ($format == "html") {
- // echo "nodes[\&qu