Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr
Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr


Former-commit-id: 795219845b9651051af38601d3b6b92fc0ed0d93

--- a/admin/refreshDesignDoc.php
+++ b/admin/refreshDesignDoc.php
@@ -42,6 +42,7 @@
 $obj->views->byWebServer->map = "function(doc) {\n  emit(doc.web_server, doc);\n}";
 
 $obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n  emit(doc._id, doc);\n}\n}";
+$obj->views->datasetGroups->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n  doc.metadata[\"data.gov.au Category\"] && doc.metadata[\"data.gov.au Category\"].forEach(function(tag) {\n emit(tag, doc.url); \n  });\n}\n}";
 $obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n  emit(doc._id, doc._attachments);\n}\n}";
 $docdb->save($obj, true);
 

file:b/disclosr.iml (new)
--- /dev/null
+++ b/disclosr.iml
@@ -1,1 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="WEB_MODULE" version="4">
+  <component name="FacetManager">
+    <facet type="Python" name="Python">
+      <configuration sdkName="" />
+    </facet>
+  </component>
+  <component name="NewModuleRootManager" inherit-compiler-output="true">
+    <exclude-output />
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+</module>
 
+

--- /dev/null
+++ b/documents/datagov-export-groups.py
@@ -1,1 +1,81 @@
+import ckanclient
+import couchdb
+from ckanclient import CkanApiError
+import re
 
+
+class LoaderError(Exception):
+    pass
+
+# Instantiate the CKAN client.
+#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a'
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+                             api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
+# https://github.com/okfn/ckanext-importlib
+def munge(name):
+    # convert spaces to underscores
+    name = re.sub(' ', '_', name).lower()
+    # convert symbols to dashes
+    name = re.sub('[:]', '_-', name).lower()
+    name = re.sub('[/]', '-', name).lower()
+    # take out not-allowed characters
+    name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
+    # remove double underscores
+    name = re.sub('__', '_', name).lower()
+    return name
+
+
+def name_munge(input_name):
+    return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+
+
+docsdb = couch['disclosr-documents']
+
+if __name__ == "__main__":
+    groups = {}
+    for doc in docsdb.view('app/datasetGroups'):
+            group_name = doc.key
+            if group_name != "Not specified":
+                pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+                                  doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+                if group_name in groups.keys():
+                    groups[group_name] = list(set(groups[group_name] + [pkg_name]))
+                else:
+                    groups[group_name] = [pkg_name]
+
+    # add dataset to group(s)
+    for group_name in groups.keys():
+        if group_name != "Not specified":
+            group_url = name_munge(group_name[:100])
+            print group_name
+            print groups[group_name]
+            try:
+                # Update the group details
+                group_entity = ckan.group_entity_get(group_url)
+                print "group "+group_name+" exists"
+                if 'packages' in group_entity.keys():
+                    group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
+                else:
+                    group_entity['packages'] = groups[group_name]
+                ckan.group_entity_put(group_entity)
+            except CkanApiError, e:
+                if ckan.last_status == 404:
+                    print "group "+group_name+" does not exist, creating"
+                    group_entity = {
+                        'name': group_url,
+                        'title': group_name,
+                        'description': group_name,
+                        'packages': groups[group_name]
+                    }
+                    #print group_entity
+                    ckan.group_register_post(group_entity)
+                elif ckan.last_status == 409:
+                    print "group already exists"
+                else:
+                    raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+                        ckan.last_status, pkg_name, e.args))
+

--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -3,17 +3,19 @@
 from ckanclient import CkanApiError
 import re
 import html2text # aaronsw :(
+import ckanapi # https://github.com/open-data/ckanapi
 
 
 class LoaderError(Exception):
     pass
 
 # Instantiate the CKAN client.
-#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
 ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
-    api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
+                             api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
 
 # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
 SYMBOLS = {
@@ -24,6 +26,7 @@
     'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
                 'zebi', 'yobi'),
 }
+
 
 def human2bytes(s):
     """
@@ -53,6 +56,9 @@
           ...
       ValueError: can't interpret '12 foo'
     """
+    if s == None:
+	return 0
+    s = s.replace(',', '')
     init = s
     num = ""
     while s and s[0:1].isdigit() or s[0:1] == '.':
@@ -91,7 +97,6 @@
 
 def name_munge(input_name):
     return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
-    #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
 
 
 def get_licence_id(licencename):
@@ -112,82 +117,105 @@
         raise Exception(licencename + " not found");
     return map[licencename];
 
+
 docsdb = couch['disclosr-documents']
 
 if __name__ == "__main__":
+    orgs_list = []
+    orgs_ids = {}
     for doc in docsdb.view('app/datasets'):
+        print "   ---   "
         print doc.id
-        if doc.value['url'] != "http://data.gov.au/data/":
+
+        if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+
+
             # Collect the package metadata.
-            pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','');                                                                  _
+            pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+                              doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+            print pkg_name
+            #add to or create organization using direct API
+            agency = doc.value['metadata']["Agency"]
+            if agency == "APS":
+                agency = "Australian Public Service Commission"
+            if agency == "Shared Services, Treasury Directorate":
+                agency = "Shared Services Procurement, Treasury Directorate"
+            if agency == "Treasury - Shared Services":
+                agency = "Shared Services Procurement, Treasury Directorate"
+            if agency == "Territory and Municipal Services (TAMS)":
+                agency = "Territory and Municipal Services Directorate"
+            if agency == "State Library of NSW":
+                agency = "State Library of New South Wales"
+            org_name = name_munge(agency[:100])
+            if org_name not in orgs_list:
+                orgs_list = ckandirect.action.organization_list()['result']
+                #print orgs_list
+                if org_name not in orgs_list:
+                    try:
+                        print "org not found, creating " + org_name
+                        ckandirect.action.organization_create(name=org_name, title=agency,
+                                                              description=agency)
+                        orgs_list.append(org_name)
+                    except ckanapi.ValidationError, e:
+                        print e
+                        raise LoaderError('Unexpected status')
+                else:
+                    print "org found, adding dataset to " + org_name
+
+            # cache org names -> id mapping
+            if org_name not in orgs_ids:
+                org = ckandirect.action.organization_show(id=org_name)
+                orgs_ids[org_name] = org["result"]["id"]
+            org_id = orgs_ids[org_name]
+            print "org id is "+org_id
             tags = []
-            if len(doc.value['metadata']["Keywords / Tags"]) > 0:
-                if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
-                    tags = tags + doc.value['metadata']["Keywords / Tags"]
-                else:
-                    tags = tags + [doc.value['metadata']["Keywords / Tags"]]
-            if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
-                if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
-                    tags = tags + doc.value['metadata']['data.gov.au Category']
-                else:
-                    tags = tags + [doc.value['metadata']['data.gov.au Category']]
-            tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
-            print tags
-            package_entity = {
-                'name': pkg_name,
-                'title': doc.value['metadata']['DCTERMS.Title'],
-                'url': doc.value['metadata']['DCTERMS.Source.URI'],
-                'tags': tags, #tags are mandatory?
-                'author': doc.value['metadata']["DCTERMS.Creator"],
-                'maintainer': doc.value['metadata']["DCTERMS.Creator"],
-                'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
-                'notes': html2text.html2text(doc.value['metadata']['Description']),
-            }
+            creator = doc.value['metadata']["DCTERMS.Creator"]
+            if doc.value['agencyID'] == "AGIMO":
+                if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+                    if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+                        tags = tags + doc.value['metadata']["Keywords / Tags"]
+                    else:
+                        tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+
+                tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
+                #print tags
+                extras = []
+
+                for extra_key in doc.value['metadata'].keys():
+                    if extra_key != "Keywords / Tags" and extra_key != "data.gov.au Category" and extra_key != "Download" :
+                        extras.append({'key':extra_key, 'value':doc.value['metadata'][extra_key]})
+
+                package_entity = {
+                    'name': pkg_name,
+                    'title': doc.value['metadata']['DCTERMS.Title'],
+                    'url': doc.value['metadata']['DCTERMS.Source.URI'],
+                    'tags': tags, #tags are mandatory?
+                    'author': creator,
+                    'maintainer': creator,
+                    'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+                    'notes': html2text.html2text(doc.value['metadata']['Description']),
+                    'owner_org': org_id,
+                    'extras': extras
+                }
+
 
             try:
-                print package_entity
+                #print package_entity
                 ckan.package_register_post(package_entity)
             except CkanApiError, e:
-                if ckan.last_status == 409:
+                if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
                     print "package already exists"
                 else:
+                    print ckan.last_message
                     raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
                         ckan.last_status, pkg_name, e.args))
-
-
-            #add to group
-
-            group_name = name_munge(doc.value['metadata']["Agency"][:100])
-            try:
-                print ckan.group_entity_get(group_name)
-
-                # Update the group details
-                group_entity = ckan.last_message
-                print "group exists"
-                if 'packages' in group_entity.keys():
-                    group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
-                else:
-                    group_entity['packages'] = [pkg_name]
-                ckan.group_entity_put(group_entity)
-            except CkanApiError, e:
-                if ckan.last_status == 404:
-                    print "group does not exist, creating"
-                    group_entity = {
-                        'name': group_name,
-                        'title': doc.value['metadata']["Agency"],
-                        'description': doc.value['metadata']["Agency"],
-                        'packages': [pkg_name],
-                        # 'type': "organization" # not allowed via API, use database query
-                        # update "group" set type = 'organization';
-                        }
-                    print group_entity
-                    ckan.group_register_post(group_entity)
-                else:
-                    raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
-                        ckan.last_status, pkg_name, e.args))
+            pkg = ckan.package_entity_get(pkg_name)
+
+
+            # add resources (downloadable data files)
             if 'Download' in doc.value['metadata'].keys():
                 try:
-                    pkg = ckan.package_entity_get(pkg_name)
+
                     resources = pkg.get('resources', [])
                     if len(resources) < len(doc.value['metadata']['Download']):
                         for resource in doc.value['metadata']['Download']:
@@ -199,12 +227,17 @@
                                 format = 'xml'
                             if resource['format'] == '(CSV/XLS)':
                                 format = 'csv'
+                            if resource['format'] == '(Shapefile)':
+                                format = 'shp'
+                            if resource['format'] == '(KML/KMZ)':
+                                format = 'kml'
                             name = resource['href']
                             if 'name' in resource.keys():
                                 name = resource['name']
                             print resource
                             ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
-                                format=format, size=human2bytes(resource['size'].replace(',', '')))
+                                                      format=format,
+                                                      size=human2bytes(resource.get('size','0B')))
                     else:
                         print "resources already exist"
                 except CkanApiError, e:

--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -39,7 +39,9 @@
                                     link = item.find("a")
                                     format = item.find(property="dc:format")
                                     linkobj = {"href":link['href'].replace("/bye?","").strip(),
-                                            "format": format.string.strip(), "size": format.next_sibling.string.strip()}
+                                            "format": format.string.strip()}
+				    if format.next_sibling.string != None:
+					linkobj["size"] = format.next_sibling.string.strip()
                                     if link.string != None:
                                         linkobj["name"] = link.string.strip()
                                     doc['metadata'][last_title].append(linkobj)

--- /dev/null
+++ b/documents/dataqld.py
@@ -1,1 +1,28 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
 
+from unidecode import unidecode
+import ckanclient
+
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
+
+# Get the package list.
+package_list = ckan.package_register_get()
+for package_name in package_list:
+# Get the details of a package.
+    (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+        "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
+    hash = scrape.mkhash(scrape.canonurl(url))
+    print hash
+    doc = scrape.docsdb.get(hash)
+    if "metadata" not in doc.keys() or True:
+        ckan.package_entity_get(package_name)
+        package_entity = ckan.last_message
+        doc['type'] = "dataset"
+        doc['metadata'] = package_entity
+        print package_entity
+        scrape.docsdb.save(doc)
+

--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -5,20 +5,53 @@
 
 from unidecode import unidecode
 
-listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960"
-(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
-    listurl, "gazette", "AGD")
-soup = BeautifulSoup(listhtml)
-for row in soup.find_all('tr'):
-    if row.has_key('valign'):
-	for col in tr.find_all('td'):
-		print col.string
-        #url = scrape.fullurl(listurl, atag['href'])
-        #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
-        #    url, "data", "AGIMO")
-        #hash = scrape.mkhash(scrape.canonurl(url))
-        #doc = scrape.docsdb.get(hash)
-        #print doc['metadata']
-        #scrape.docsdb.save(doc)
-        #time.sleep(2)
+items = 3950
+items = 1
+while True:
+    print str(items) + " (" +str(items/25) +" screens to go)"
+    listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
+    (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+        listurl, "gazette", "AGD", False)
+    for line in listhtml.split('\n'):
+        soup = BeautifulSoup(line)
+        #print line
+        for row in soup.find_all('tr'):
+            print line
+            if row.has_key('valign'):
+                i = 0
+                date = ""
+                id = ""
+                type = ""
+                description = ""
+                name = ""
+                url = ""
+                for col in soup.find_all('td'):
+                    #print ''.join(col.stripped_strings)
+                    if i == 0:
+                        date = ''.join(col.stripped_strings)
+                    if i == 1:
+                        id = ''.join(col.stripped_strings)
+                    if i == 2:
+                        type = ''.join(col.stripped_strings)
+                    if i == 3:
+                        description = ''.join(col.stripped_strings)
+                        for link in col.findAll('a'):
+                            if link.has_key("href"):
+                                url = link['href']
+                                name = ''.join(link.stripped_strings)
+                                print str(items) + " (" +str(items/25) +" screens to go)"
+                                print [date, id, type, description, name, url]
+                                itemurl = scrape.fullurl(listurl, url)
+                                (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+                                    itemurl, "gazette", "AGD", False)
+                                hash = scrape.mkhash(scrape.canonurl(itemurl))
+                                doc = scrape.docsdb.get(hash)
+                                doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
+                                scrape.docsdb.save(doc)
+                                #time.sleep(2)
+                    i = i + 1;
 
+    items = items - 25
+    if items <= 0:
+        break
+

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -72,7 +72,8 @@
             edate = date.today().strftime("%Y-%m-%d")
             doc = {'_id': dochash, 'agencyID': self.getAgencyID()
             , 'url': self.getURL(), 'docID': dochash,
-            "date": edate, "title": "Disclosure Log Updated", "description":  self.remove_control_chars(description), "diff": diff}
+            "date": edate, "title": "Disclosure Log Updated", 
+	    "description":  self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
             foidocsdb.save(doc)
         else:
             print "already saved"
@@ -199,11 +200,16 @@
         return table.find_all('tr')
 
     def getDate(self, content, entry, doc):
-        date = ''.join(content.stripped_strings).strip()
-        (a, b, c) = date.partition("(")
-        date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
-        print date
-        edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+        strdate = ''.join(content.stripped_strings).strip()
+        (a, b, c) = strdate.partition("(")
+        strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
+        print strdate
+        try:
+		edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+	except ValueError:
+		print >> sys.stderr, "ERROR date invalid %s " % strdate
+		print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip()
+		edate = date.today().strftime("%Y-%m-%d")  
         print edate
         doc.update({'date': edate})
         return
@@ -266,8 +272,7 @@
                                          'Summary of FOIrequest received by agency/minister',
                                          'Summary of FOI request received', 'Description of    FOI Request',
                                          "FOI request", 'Results 1 to 67 of 67']
-                            if doc['title'] not in badtitles\
-                            and doc['description'] != '':
+                            if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '':
                                 print "saving"
                                 foidocsdb.save(doc)
                         else:
@@ -277,6 +282,6 @@
                         print "header row"
 
                     else:
-                        print "ERROR number of columns incorrect"
+                        print >> sys.stderr, "ERROR number of columns incorrect"
                         print row
 

--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,10 +1,22 @@
-for f in scrapers/*.py; 
-	do echo "Processing $f file.."; 
-	python $f; 
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd $DIR
+echo "" > /tmp/disclosr-error
+for f in scrapers/*.py; do
+	echo "Processing $f file..";
+	md5=`md5sum /tmp/disclosr-error`
+	python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error;
+	md52=`md5sum /tmp/disclosr-error`
+	if [ "$md5" != "$md52" ]; then
+		echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error;
+	fi
 	if [ "$?" -ne "0" ]; then
 		echo "error";
-		sleep 2; 
+		sleep 1;
 	fi
 done
+if [ -s /tmp/disclosr-error ] ; then
+    echo "emailling logs..";
+    mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
+fi
 
 

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -7,14 +7,15 @@
 from urlparse import urljoin
 import time
 import os
+import sys
 import mimetypes
 import urllib
 import urlparse
 import socket
 
 #couch = couchdb.Server('http://192.168.1.148:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
 
 
 def mkhash(input):
@@ -89,7 +90,7 @@
 def getLastAttachment(docsdb, url):
     hash = mkhash(url)
     doc = docsdb.get(hash)
-    if doc != None:
+    if doc != None and "_attachments" in doc.keys():
         last_attachment_fname = doc["_attachments"].keys()[-1]
         last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
         return last_attachment
@@ -103,7 +104,7 @@
     req = urllib2.Request(url)
     print "Fetching %s (%s)" % (url, hash)
     if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-        print "Not a valid HTTP url"
+        print >> sys.stderr, "Not a valid HTTP url"
         return (None, None, None)
     doc = docsdb.get(hash)
     if doc == None:
@@ -111,10 +112,15 @@
     else:
         if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
             print "Uh oh, trying to scrape URL again too soon!" + hash
-            last_attachment_fname = doc["_attachments"].keys()[-1]
-            last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
-            content = last_attachment
-            return (doc['url'], doc['mime_type'], content.read())
+	    if "_attachments" in doc.keys():
+	            last_attachment_fname = doc["_attachments"].keys()[-1]
+	            last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
+        	    content = last_attachment.read()
+		    mime_type = doc['mime_type']
+	    else:
+		    content = None
+		    mime_type = None
+            return (doc['url'], mime_type, content)
 
     req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
     #if there is a previous version stored in couchdb, load caching helper tags
@@ -159,13 +165,13 @@
                 #store as attachment epoch-filename
 
     except (urllib2.URLError, socket.timeout) as e:
-        print "error!"
+        print >> sys.stderr,"error!"
         error = ""
         if hasattr(e, 'reason'):
             error = "error %s in downloading %s" % (str(e.reason), url)
         elif hasattr(e, 'code'):
             error = "error %s in downloading %s" % (e.code, url)
-        print error
+        print >> sys.stderr, error
         doc['error'] = error
         docsdb.save(doc)
         return (None, None, None)

--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -42,7 +42,6 @@
                         'data': {'request': '', 'session': '', 'more': ''}
 
                 }
-
-	        amonpy.exception(data)
+		#amonpy.exception(data)
 	pass
 

--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -18,13 +18,13 @@
                                         if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
                                         # http://www.crummy.com/software/BeautifulSoup/documentation.html
                                                 soup = BeautifulSoup(htcontent)
-                                                for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'):
+						rowtitle = soup.find(class_ = "wc-title").find("h1").string
+                                                if rowtitle != None:
+                                                   description = rowtitle + ": "
+                                                for row in soup.find(class_ ="wc-content").find_all('td'):
                                                         if row != None:
-								rowtitle = row.find('th').string
-                                                                if rowtitle != None:
-                                                                    description = description + "\n" + rowtitle + ": "
-                                                                for text in row.find('td').stripped_strings:
-                                                                    description = description + text
+                                                                for text in row.stripped_strings:
+                                                                    description = description + text + "\n"
                                                      		for atag in row.find_all("a"):
                                                                 	if atag.has_key('href'):
                                                                         	links.append(scrape.fullurl(link,atag['href']))
@@ -37,7 +37,7 @@
 	def getColumnCount(self):
 		return 2
 	def getTable(self,soup):
-		return soup.find(class_ = "ms-rteTable-GreyAlternating")
+		return soup.find(class_ = "ms-rteTable-default")
 	def getColumns(self,columns):
 		(date, title) = columns
 		return (title, date, title, title, None)