prod fixes
prod fixes


Former-commit-id: 130b8c05fff32afd5b4e3f8a9faadac5381bd456

--- a/couchdb/settee/src/classes/SetteeDatabase.class.php
+++ b/couchdb/settee/src/classes/SetteeDatabase.class.php
@@ -251,7 +251,7 @@
    * 
    * @return void
    */
-  function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce=false) {
+  function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce=null) {
     $id = "_design/" . urlencode($design_doc);
     $view_name = urlencode($view_name);
     $id .= "/_view/$view_name";
@@ -269,10 +269,12 @@
       if ($descending) {
         $data .= "&descending=true";
       }
-      if ($reduce) {
+      if ($reduce != null) {
+      if ($reduce == true) {
         $data .= "&reduce=true";
       } else {
           $data .= "&reduce=false";
+      }
       }
       if ($limit) {
           $data .= "&limit=".$limit;

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -13,6 +13,8 @@
 from datetime import *
 import codecs
 
+import difflib
+
 from StringIO import StringIO
 
 from pdfminer.pdfparser import PDFDocument, PDFParser
@@ -49,6 +51,31 @@
         """ do the scraping """
         return
 
+class GenericHTMLDisclogScraper(GenericDisclogScraper):
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
+             self.getURL(), "foidocuments", self.getAgencyID())
+        content = rcontent
+        dochash = scrape.mkhash(content)
+        doc = foidocsdb.get(dochash)
+        if doc is None:
+            print "saving " + dochash
+            description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
+            last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
+            if last_attach != None:
+                html_diff = difflib.HtmlDiff()
+                description = description + "\nChanges: "
+                description = description + html_diff.make_table(last_attach.read().split('\n'),
+                           content.split('\n'))
+            edate = date.today().strftime("%Y-%m-%d")
+            doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+            , 'url': self.getURL(), 'docID': dochash,
+            "date": edate, "title": "Disclosure Log Updated", "description": description}
+            foidocsdb.save(doc)
+        else:
+            print "already saved"
 
 class GenericPDFDisclogScraper(GenericDisclogScraper):
 
@@ -62,7 +89,7 @@
         device = TextConverter(rsrcmgr, outfp, codec='utf-8',
              laparams=laparams)
         fp = StringIO()
-        fp.write(content.read())
+        fp.write(content)
 
         process_pdf(rsrcmgr, device, fp, set(), caching=True,
              check_extractable=True)

--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,3 +1,10 @@
-for f in scrapers/*.py; do echo "Processing $f file.."; python $f; done
+for f in scrapers/*.py; 
+	do echo "Processing $f file.."; 
+	python $f; 
+	if [ "$?" -ne "0" ]; then
+		echo "error";
+		sleep 2; 
+	fi
+done
 
 

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -76,6 +76,16 @@
         addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
         addinfourl.code = code
         return addinfourl
+
+def getLastAttachment(docsdb,url):
+    hash = mkhash(url)
+    doc = docsdb.get(hash)
+    if doc != None:
+        last_attachment_fname = doc["_attachments"].keys()[-1]
+        last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+        return last_attachment
+    else:
+        return None
 
 def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
     url = canonurl(url)
@@ -94,10 +104,10 @@
             last_attachment_fname = doc["_attachments"].keys()[-1]
             last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
             content = last_attachment
-            return (doc['url'],doc['mime_type'],content)
+            return (doc['url'],doc['mime_type'],content.read())
         if scrape_again == False:
             print "Not scraping this URL again as requested"
-            return (None,None,None)
+            return (doc['url'],doc['mime_type'],content.read())
 
     req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
     #if there is a previous version stored in couchdb, load caching helper tags
@@ -131,7 +141,7 @@
                 last_attachment_fname = doc["_attachments"].keys()[-1]
                 last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
                 content = last_attachment
-                return (doc['url'],doc['mime_type'],content)
+                return (doc['url'],doc['mime_type'],content.read())
             else:
                 print "new webpage loaded"
                 content = url_handle.read()

--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -12,8 +12,8 @@
 
 if __name__ == '__main__':
     print 'Subclass:', issubclass(ScraperImplementation,
-         genericScrapers.GenericOAICDisclogScraper)
+         genericScrapers.GenericPDFDisclogScraper)
     print 'Instance:', isinstance(ScraperImplementation(),
-         genericScrapers.GenericOAICDisclogScraper)
+         genericScrapers.GenericPDFDisclogScraper)
     ScraperImplementation().doScrape()
 

--- /dev/null
+++ b/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- /dev/null
+++ b/documents/scrapers/31685505438d393f45a90f442b8fa27f.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericPDFDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericPDFDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/31685505438d393f45a90f442b8fa27f.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-pdf
 

--- /dev/null
+++ b/documents/scrapers/3e2f110af49d62833a835bd257771ffb.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/3e2f110af49d62833a835bd257771ffb.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
 

--- /dev/null
+++ b/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
 

--- a/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt
+++ b/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt
@@ -1,1 +1,1 @@
-apsc has ACMA style disclog
+ACMA style

--- /dev/null
+++ b/documents/scrapers/525c3953187da08cd702359b2fc2997f.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/525c3953187da08cd702359b2fc2997f.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
 

--- /dev/null
+++ b/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+

--- /dev/null
+++ b/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
 

--- /dev/null
+++ b/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
 

--- /dev/null
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -1,1 +1,51 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from datetime import date
+from pyquery import PyQuery as pq
+from lxml import etree
+import urllib
+import dateutil
+from dateutil.parser import *
 
+class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+             self.getURL(), "foidocuments", self.getAgencyID())
+
+        d = pq(content)
+        d.make_links_absolute(base_url = self.getURL())
+        for table in d('table').items():
+            title= table('thead').text()
+            print title
+            (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
+            links = table('a').map(lambda i, e: pq(e).attr('href'))
+            description = descA+" "+descB
+            edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+            print edate
+            dochash = scrape.mkhash(self.remove_control_chars(title))
+            doc = foidocsdb.get(dochash)
+            if doc is None:
+                print "saving " + dochash
+                edate = date.today().strftime("%Y-%m-%d")
+                doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+                , 'url': self.getURL(), 'docID': dochash,
+                "links": links,
+                "date": edate, "notes": notes, "title": title, "description": description}
+                #print doc
+                foidocsdb.save(doc)
+            else:
+                print "already saved"
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ACMADisclogScraper,
+         genericScrapers.GenericDisclogScraper)
+    print 'Instance:', isinstance(ACMADisclogScraper(),
+         genericScrapers.GenericDisclogScraper)
+    ACMADisclogScraper().doScrape()
+

--- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-acma style
+

--- /dev/null
+++ b/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+

--- /dev/null
+++ b/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+

--- /dev/null
+++ b/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+

--- a/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt
+++ b/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt
@@ -1,1 +1,1 @@
-uses RET disclog
+parent

--- /dev/null
+++ b/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+

--- /dev/null
+++ b/documents/scrapers/d72744fb1e5d6e87af9a5ea16cc27fa5.py
@@ -1,1 +1,50 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from datetime import date
+from pyquery import PyQuery as pq
+from lxml import etree
+import urllib
+import dateutil
+from dateutil.parser import *
 
+class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+             self.getURL(), "foidocuments", self.getAgencyID())
+
+        d = pq(content)
+        d.make_links_absolute(base_url = self.getURL())
+        for item in d('.item-list').items():
+            title= item('h3').text()
+            print title
+            links = item('a').map(lambda i, e: pq(e).attr('href'))
+            description = title= item('ul').text()
+            edate = date.today().strftime("%Y-%m-%d")
+            print edate
+            dochash = scrape.mkhash(self.remove_control_chars(title))
+            doc = foidocsdb.get(dochash)
+            if doc is None:
+                print "saving " + dochash
+                edate = date.today().strftime("%Y-%m-%d")
+                doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+                , 'url': self.getURL(), 'docID': dochash,
+                "links": links,
+                "date": edate, "title": title, "description": description}
+                #print doc
+                foidocsdb.save(doc)
+            else:
+                print "already saved"
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ACMADisclogScraper,
+         genericScrapers.GenericDisclogScraper)
+    print 'Instance:', isinstance(ACMADisclogScraper(),
+         genericScrapers.GenericDisclogScraper)
+    ACMADisclogScraper().doScrape()
+

--- /dev/null
+++ b/documents/scrapers/e770921522a49dc77de208cc724ce134.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
 
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation,
+         genericScrapers.GenericHTMLDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(),
+         genericScrapers.GenericHTMLDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/e770921522a49dc77de208cc724ce134.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+