scraper and sort order updatyes
scraper and sort order updatyes


Former-commit-id: c8bfc5c3ecbee616fa6dd8bfdd147bedf4d64646

--- /dev/null
+++ b/admin/massdelete.sh
@@ -1,1 +1,10 @@
+for line in `curl "http://localhost:5984/disclosr-foidocuments/_design/app/_view/byAgencyID?reduce=false&keys=%5B\"5716ce0aacfe98f7d638b7a66b7f1040\"%5D&limit=600" | xargs -L1`; do
+#	echo $line
+	id=`echo $line | grep -Po '_id:.*?[^\\\],' | perl -pe 's/_id://; s/^//; s/,$//'`
+	rev=`echo $line | grep -Po 'rev:.*?[^\\\],'| perl -pe 's/rev://; s/^//; s/,$//'`
+	if [ -n "$id" ]; then
+		echo "curl -X DELETE http://localhost:5984/disclosr-foidocuments/$id?rev=$rev"
+		curl -X DELETE http://localhost:5984/disclosr-foidocuments/$id?rev=$rev
+	fi
+done;
 

--- a/documents/about.php
+++ b/documents/about.php
@@ -5,6 +5,7 @@
 include_once('../include/common.inc.php');
 ?>
 <h1>About</h1>
+Written and managed by Alex Sadleir (maxious [at] lambdacomplex.org) 
 <?php
 include_footer_documents();
 ?>

--- a/documents/agency.php
+++ b/documents/agency.php
@@ -31,6 +31,12 @@
     } else {
         $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
         if ($rows) {
+function cmp($a, $b)
+{
+	global $idtoname;
+    return strcmp($idtoname[$a->key], $idtoname[$b->key]);
+}
+usort($rows, "cmp");
             foreach ($rows as $row) {
                 echo '<a href="agency.php?id=' . $row->key . '">' . $idtoname[$row->key] . " (" . $row->value . " records)</a> <br>\n";
             }

--- a/documents/charts.php
+++ b/documents/charts.php
@@ -112,7 +112,11 @@
     <?php
         try {
             $rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows;
-
+function cmp($a, $b)
+{
+    return $a->value > $b->value;
+}
+usort($rows, "cmp");
 
             $dataValues = Array();
             $i = 0;

--- a/documents/index.php
+++ b/documents/index.php
@@ -18,6 +18,7 @@
     $idtoname[$row->id] = trim($row->value->name);
 }
 $foidocsdb = $server->get_db('disclosr-foidocuments');
+//print_r($foidocsdb);
 try {
     $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20, null, $enddocid)->rows;
     if ($rows) {

--- a/documents/rss.xml.php
+++ b/documents/rss.xml.php
@@ -31,11 +31,12 @@
 
 
 //print_r($rows);
+$i =0;
 foreach ($rows as $row) {
     //Create an empty FeedItem
     $newItem = $TestFeed->createNewItem();
     //Add elements to the feed item
-    $newItem->setTitle($row->value->title);
+    $newItem->setTitle(preg_replace('/[\x00-\x1F\x80-\xFF]/', '', $row->value->title));
     $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id);
     $newItem->setDate(strtotime($row->value->date));
     $newItem->setDescription(displayLogEntry($row, $idtoname));
@@ -43,6 +44,8 @@
     $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true'));
     //Now add the feed item
     $TestFeed->addItem($newItem);
+$i++;
+if ($i > 50) break;
 }
 //OK. Everything is done. Now genarate the feed.
 $TestFeed->generateFeed();

--- a/documents/scrapers/1d404c4934f74feacd00dcb434e7c10a.py
+++ b/documents/scrapers/1d404c4934f74feacd00dcb434e7c10a.py
@@ -6,8 +6,8 @@
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-        #def getTable(self,soup):
-        #        return soup.find(id = "cphMain_C001_Col01").table       
+        def getTable(self,soup):
+                return soup.findAll('table')[1]     
         def getColumnCount(self):
                 return 5
         def getColumns(self,columns):

--- a/documents/scrapers/41a166419503bb50e410c58be54c102f.py
+++ b/documents/scrapers/41a166419503bb50e410c58be54c102f.py
@@ -8,7 +8,7 @@
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
     def getTable(self,soup):
-        return soup.find(id= "ctl00_MSO_ContentDiv").table
+        return soup.find(class_ = "rgMasterTable")
 
     def getColumns(self,columns):
         (id, title, description, notes) = columns

--- a/documents/scrapers/601aedeef4344638d635bdd761e9fdba.py
+++ b/documents/scrapers/601aedeef4344638d635bdd761e9fdba.py
@@ -6,8 +6,8 @@
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-        #def getTable(self,soup):
-        #        return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table       
+        def getTable(self,soup):
+                return soup.find(id = "main").table       
         def getColumnCount(self):
                 return 4
         def getColumns(self,columns):

--- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
+++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
@@ -5,6 +5,8 @@
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+	def getTable(self,soup):
+                return soup.find(id = "page_content").table
         def getColumns(self,columns):
                 (id, date, title, description, notes) = columns
                 return (id, date, title, description, notes)

--- a/documents/scrapers/ad033512610d8e36886ab6a795f26561.py
+++ b/documents/scrapers/ad033512610d8e36886ab6a795f26561.py
@@ -6,8 +6,8 @@
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-        def getTable(self,soup):
-                return soup.find(id = "_ctl0__ctl0_MainContentPlaceHolder_MainContentPlaceHolder_ContentSpan").findAll("table")[3]
+#        def getTable(self,soup):
+#                return soup.find(_class = "content").table
         def getColumnCount(self):
                 return 5
         def getColumns(self,columns):

--- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
+++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
@@ -1,16 +1,54 @@
 import sys,os
 sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
 import genericScrapers
-#RSS feed not detailed
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import scrape
+from bs4 import BeautifulSoup
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
-#http://www.doughellmann.com/PyMOTW/abc/
-class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
-        def getColumns(self,columns):
-                (id, date, title, description, notes) = columns
-                return (id, date, title, description, notes)
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+    def getTable(self, soup):
+        return soup.find(id='content')
+
+    def getDescription(self,content, entry,doc):
+        link = None
+        links = []
+        description = ""
+        for atag in entry.find_all('a'):
+            if atag.has_attr('href'):
+                link = scrape.fullurl(self.getURL(), atag['href'])
+                (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+                if htcontent != None:
+                    if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                        soup = BeautifulSoup(htcontent)
+                        row  = soup.find(id="foidetails")
+			if row == None:
+				row  = soup.find(id="content").table
+			if row == None:
+				row  = soup.find(id="content")
+                        description = ''.join(row.stripped_strings)
+                        for atag in row.find_all("a"):
+                                    if atag.has_attr('href'):
+                                        links.append(scrape.fullurl(link, atag['href']))
+
+        if links != []:
+                     doc.update({'links': links})
+        if description != "":
+            doc.update({ 'description': description})
+
+    def getColumnCount(self):
+        return 3
+
+    def getColumns(self, columns):
+        (id, title, date) = columns
+        return (id, date, title, title, None)
+
 
 if __name__ == '__main__':
-    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
-    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
     ScraperImplementation().doScrape()
 

--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -77,7 +77,7 @@
                     </p>
                     <ul class="nav">
                         <li><a href="agency.php">By Agency</a></li>
-                        <li><a href="date.php">By Date</a></li>
+<!--                        <li><a href="date.php">By Date</a></li> -->
                         <li><a href="disclogsList.php">List of Disclosure Logs</a></li>
                         <li><a href="charts.php">Charts</a></li>
                         <li><a href="about.php">About</a></li>