argh
argh


Former-commit-id: 5633b69c577c7553ef393e89754b6647eedbf014

--- a/couchdb/settee/src/classes/SetteeDatabase.class.php
+++ b/couchdb/settee/src/classes/SetteeDatabase.class.php
@@ -251,7 +251,7 @@
    * 
    * @return void
    */
-  function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce=false) {
+  function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce=null) {
     $id = "_design/" . urlencode($design_doc);
     $view_name = urlencode($view_name);
     $id .= "/_view/$view_name";
@@ -269,10 +269,12 @@
       if ($descending) {
         $data .= "&descending=true";
       }
-      if ($reduce) {
+      if ($reduce != null) {
+      if ($reduce == true) {
         $data .= "&reduce=true";
       } else {
           $data .= "&reduce=false";
+      }
       }
       if ($limit) {
           $data .= "&limit=".$limit;

--- /dev/null
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -1,1 +1,51 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from datetime import date
+from pyquery import PyQuery as pq
+from lxml import etree
+import urllib
+import dateutil
+from dateutil.parser import *
 
+class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
+
+    def doScrape(self):
+        foidocsdb = scrape.couch['disclosr-foidocuments']
+        (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+             self.getURL(), "foidocuments", self.getAgencyID())
+        
+        d = pq(content.read())
+        d.make_links_absolute(base_url = self.getURL())
+        for table in d('table').items():
+            title= table('thead').text()
+            print title
+            (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
+            links = table('a').map(lambda i, e: pq(e).attr('href'))
+            description = descA+" "+descB
+            edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+            print edate
+            dochash = scrape.mkhash(self.remove_control_chars(title))
+            doc = foidocsdb.get(dochash)
+            if doc is None:
+                print "saving " + dochash
+                edate = date.today().strftime("%Y-%m-%d")
+                doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+                , 'url': self.getURL(), 'docID': dochash,
+                "links": links,
+                "date": edate, "notes": notes, "title": "Disclosure Log Updated", "description": description}
+                #print doc
+                foidocsdb.save(doc)
+            else:
+                print "already saved"
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ACMADisclogScraper,
+         genericScrapers.GenericDisclogScraper)
+    print 'Instance:', isinstance(ACMADisclogScraper(),
+         genericScrapers.GenericDisclogScraper)
+    ACMADisclogScraper().doScrape()
+

--- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-acma style
+