Experimental organization support
Experimental organization support


Former-commit-id: 0c86e9a2a6e066dc3f1550e7915892d9649ec125

--- a/admin/refreshDesignDoc.php
+++ b/admin/refreshDesignDoc.php
@@ -40,6 +40,8 @@
 $obj->views->byURL->map = "function(doc) {\n  emit(doc.url, doc);\n}";
 $obj->views->agency->map = "function(doc) {\n  emit(doc.agencyID, doc);\n}";
 $obj->views->byWebServer->map = "function(doc) {\n  emit(doc.web_server, doc);\n}";
+
+$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n  emit(doc._id, doc);\n}\n}";
 $obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n  emit(doc._id, doc._attachments);\n}\n}";
 $docdb->save($obj, true);
 

--- /dev/null
+++ b/documents/datagov-export.py
@@ -1,1 +1,223 @@
-
+import ckanclient
+import couchdb
+from ckanclient import CkanApiError
+import re
+import html2text # aaronsw :(
+import ckanapi # https://github.com/open-data/ckanapi
+
+
+class LoaderError(Exception):
+    pass
+
+# Instantiate the CKAN client.
+#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a'
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+    api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
+# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
+SYMBOLS = {
+    'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
+    'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
+                      'zetta', 'iotta'),
+    'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
+    'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
+                'zebi', 'yobi'),
+}
+
+def human2bytes(s):
+    """
+    Attempts to guess the string format based on default symbols
+    set and return the corresponding bytes as an integer.
+    When unable to recognize the format ValueError is raised.
+
+      >>> human2bytes('0 B')
+      0
+      >>> human2bytes('1 K')
+      1024
+      >>> human2bytes('1 M')
+      1048576
+      >>> human2bytes('1 Gi')
+      1073741824
+      >>> human2bytes('1 tera')
+      1099511627776
+
+      >>> human2bytes('0.5kilo')
+      512
+      >>> human2bytes('0.1  byte')
+      0
+      >>> human2bytes('1 k')  # k is an alias for K
+      1024
+      >>> human2bytes('12 foo')
+      Traceback (most recent call last):
+          ...
+      ValueError: can't interpret '12 foo'
+    """
+    init = s
+    num = ""
+    while s and s[0:1].isdigit() or s[0:1] == '.':
+        num += s[0]
+        s = s[1:]
+    num = float(num)
+    letter = s.strip()
+    for name, sset in SYMBOLS.items():
+        if letter in sset:
+            break
+    else:
+        if letter == 'k':
+            # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs
+            sset = SYMBOLS['customary']
+            letter = letter.upper()
+        else:
+            raise ValueError("can't interpret %r" % init)
+    prefix = {sset[0]: 1}
+    for i, s in enumerate(sset[1:]):
+        prefix[s] = 1 << (i + 1) * 10
+    return int(num * prefix[letter])
+
+# https://github.com/okfn/ckanext-importlib
+def munge(name):
+    # convert spaces to underscores
+    name = re.sub(' ', '_', name).lower()
+    # convert symbols to dashes
+    name = re.sub('[:]', '_-', name).lower()
+    name = re.sub('[/]', '-', name).lower()
+    # take out not-allowed characters
+    name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
+    # remove double underscores
+    name = re.sub('__', '_', name).lower()
+    return name
+
+#todo "{'name': [u'Url must be purely lowercase alphanumeric (ascii) characters and these symbols: -_']}"
+# http://data.gov.au/dataset/australian-domestic-regional-and-international-airline-activity-%E2%80%93-time-series/
+def name_munge(input_name):
+    return  munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+    #[:100]
+    #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
+
+
+def get_licence_id(licencename):
+    map = {
+        "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
+        "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
+        'Otherpleasespecify': 'notspecified',
+        '': 'notspecified',
+        "Publicly available data": 'notspecified',
+        "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
+        "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
+        'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
+        "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
+        'CreativeCommonsAttributionCCBY25': 'cc-by',
+        "PublicDomain": 'other-pd',
+    }
+    if licencename not in map.keys():
+        raise Exception(licencename + " not found");
+    return map[licencename];
+
+docsdb = couch['disclosr-documents']
+
+if __name__ == "__main__":
+    orgs_list = []
+    for doc in docsdb.view('app/datasets'):
+        print doc.id
+
+
+        if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+
+
+
+            # Collect the package metadata.
+            pkg_name = filter( lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','')[:100]);
+            print pkg_name
+            #add to or create organization using direct API
+            org_name = name_munge(doc.value['metadata']["Agency"][:100])
+            if org_name not in orgs_list:
+                orgs_list = ckandirect.action.organization_list()['result']
+                #print orgs_list
+                if org_name not in orgs_list:
+                    try:
+                        print "org not found, creating "+org_name
+                        ckandirect.action.organization_create(name = org_name,   title= doc.value['metadata']["Agency"],
+                                                              description= doc.value['metadata']["Agency"])
+                        orgs_list.append(org_name)
+                    except ckanapi.ValidationError, e:
+                        print e
+                        raise LoaderError('Unexpected status')
+                else:
+                    print "org found, adding dataset to "+org_name
+
+            org = ckandirect.action.organization_show(id=org_name)
+            # todo cache org names -> id mapping
+            tags = []
+            if doc.value['agencyID'] == "AGIMO":
+                if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+                    if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+                        tags = tags + doc.value['metadata']["Keywords / Tags"]
+                    else:
+                        tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+                if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
+                    if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
+                        tags = tags + doc.value['metadata']['data.gov.au Category']
+                    else:
+                        tags = tags + [doc.value['metadata']['data.gov.au Category']]
+                tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
+                #print tags
+                package_entity = {
+                    'name': pkg_name,
+                    'title': doc.value['metadata']['DCTERMS.Title'],
+                    'url': doc.value['metadata']['DCTERMS.Source.URI'],
+                    'tags': tags, #tags are mandatory?
+                    'author': doc.value['metadata']["DCTERMS.Creator"],
+                    'maintainer': doc.value['metadata']["DCTERMS.Creator"],
+                    'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+                    'notes': html2text.html2text(doc.value['metadata']['Description']),
+                    'owner_org': org["result"]["id"]
+                    #todo add missing key values like jurasdiction
+                }
+            if doc.value['agencyID'] == "qld":
+                package_entity = doc.value['metadata']
+
+            try:
+                #print package_entity
+                ckan.package_register_post(package_entity)
+            except CkanApiError, e:
+                if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
+                    print "package already exists"
+                else:
+                    print ckan.last_message
+                    raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
+                        ckan.last_status, pkg_name, e.args))
+            pkg = ckan.package_entity_get(pkg_name)
+
+            if 'Download' in doc.value['metadata'].keys():
+                try:
+
+                    resources = pkg.get('resources', [])
+                    if len(resources) < len(doc.value['metadata']['Download']):
+                        for resource in doc.value['metadata']['Download']:
+
+                            # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
+                            # (KML/KMZ) / (Shapefile) /(Other)
+                            format = "plain"
+                            if resource['format'] == '(XML)':
+                                format = 'xml'
+                            if resource['format'] == '(CSV/XLS)':
+                                format = 'csv'
+                            name = resource['href']
+                            if 'name' in resource.keys():
+                                name = resource['name']
+                            print resource
+                            ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
+                                format=format, size=human2bytes(resource['size'].replace(',', '')))
+                    else:
+                        print "resources already exist"
+                except CkanApiError, e:
+                    if ckan.last_status == 404:
+                        print "parent dataset does not exist"
+                    else:
+                        raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
+                            ckan.last_status, pkg_name, e.args))
+

--- /dev/null
+++ b/documents/datagov-merge.php
@@ -1,1 +1,26 @@
+<?php
 
+include_once("../include/common.inc.php");
+
+
+setlocale(LC_CTYPE, 'C');
+
+$db = $server->get_db('disclosr-documents');
+$datasets = Array();
+try {
+    $rows = $db->get_view("app", "datasets", null, true)->rows;
+
+    foreach ($rows as $row) {
+        //print_r($row);
+        if ($row->value->url != "http://data.gov.au/data/")
+        $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
+    }
+} catch (SetteeRestClientException $e) {
+    setteErrorHandler($e);
+}
+ksort($datasets);
+foreach ($datasets as $datasetname => $datasetkey) {
+    print "$datasetname => $datasetkey<br>\n";
+}
+?>
+

--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -13,7 +13,7 @@
     if atag.has_key('href'):
         url = scrape.fullurl(listurl, atag['href'])
         (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
-            url, "data", "AGIMO")
+            url, "data", "AGIMO", False)
         hash = scrape.mkhash(scrape.canonurl(url))
         doc = scrape.docsdb.get(hash)
         if "metadata" not in doc.keys() or True:
@@ -34,7 +34,7 @@
                             if last_title == "Description":
                                 doc['metadata'][last_title] = unidecode(str(child)).encode('ascii', 'ignore')
                             elif last_title == "Download":
-                                doc['metadata'][last_title] = {}
+                                doc['metadata'][last_title] = []
                                 for item in child.find_all("li"):
                                     link = item.find("a")
                                     format = item.find(property="dc:format")
@@ -42,7 +42,7 @@
                                             "format": format.string.strip(), "size": format.next_sibling.string.strip()}
                                     if link.string != None:
                                         linkobj["name"] = link.string.strip()
-                                    doc['metadata'][last_title][] = linkobj
+                                    doc['metadata'][last_title].append(linkobj)
 
                             else:
                                 atags = child.find_all('a')

--- /dev/null
+++ b/documents/dataqld.py
@@ -1,1 +1,28 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
 
+from unidecode import unidecode
+import ckanclient
+
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
+
+# Get the package list.
+package_list = ckan.package_register_get()
+for package_name in package_list:
+# Get the details of a package.
+    (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+        "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
+    hash = scrape.mkhash(scrape.canonurl(url))
+    print hash
+    doc = scrape.docsdb.get(hash)
+    if "metadata" not in doc.keys() or True:
+        ckan.package_entity_get(package_name)
+        package_entity = ckan.last_message
+        doc['type'] = "dataset"
+        doc['metadata'] = package_entity
+        print package_entity
+        scrape.docsdb.save(doc)
+

--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -1,1 +1,57 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
 
+from unidecode import unidecode
+
+items = 3950
+items = 1
+while True:
+    print str(items) + " (" +str(items/25) +" screens to go)"
+    listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
+    (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+        listurl, "gazette", "AGD", False)
+    for line in listhtml.split('\n'):
+        soup = BeautifulSoup(line)
+        #print line
+        for row in soup.find_all('tr'):
+            print line
+            if row.has_key('valign'):
+                i = 0
+                date = ""
+                id = ""
+                type = ""
+                description = ""
+                name = ""
+                url = ""
+                for col in soup.find_all('td'):
+                    #print ''.join(col.stripped_strings)
+                    if i == 0:
+                        date = ''.join(col.stripped_strings)
+                    if i == 1:
+                        id = ''.join(col.stripped_strings)
+                    if i == 2:
+                        type = ''.join(col.stripped_strings)
+                    if i == 3:
+                        description = ''.join(col.stripped_strings)
+                        for link in col.findAll('a'):
+                            if link.has_key("href"):
+                                url = link['href']
+                                name = ''.join(link.stripped_strings)
+                                print str(items) + " (" +str(items/25) +" screens to go)"
+                                print [date, id, type, description, name, url]
+                                itemurl = scrape.fullurl(listurl, url)
+                                (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+                                    itemurl, "gazette", "AGD", False)
+                                hash = scrape.mkhash(scrape.canonurl(itemurl))
+                                doc = scrape.docsdb.get(hash)
+                                doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
+                                scrape.docsdb.save(doc)
+                                #time.sleep(2)
+                    i = i + 1;
+
+    items = items - 25
+    if items <= 0:
+        break
+

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -72,7 +72,8 @@
             edate = date.today().strftime("%Y-%m-%d")
             doc = {'_id': dochash, 'agencyID': self.getAgencyID()
             , 'url': self.getURL(), 'docID': dochash,
-            "date": edate, "title": "Disclosure Log Updated", "description":  self.remove_control_chars(description), "diff": diff}
+            "date": edate, "title": "Disclosure Log Updated", 
+	    "description":  self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
             foidocsdb.save(doc)
         else:
             print "already saved"
@@ -199,11 +200,16 @@
         return table.find_all('tr')
 
     def getDate(self, content, entry, doc):
-        date = ''.join(content.stripped_strings).strip()
-        (a, b, c) = date.partition("(")
-        date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
-        print date
-        edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+        strdate = ''.join(content.stripped_strings).strip()
+        (a, b, c) = strdate.partition("(")
+        strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
+        print strdate
+        try:
+		edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+	except ValueError:
+		print >> sys.stderr, "ERROR date invalid %s " % strdate
+		print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip()
+		edate = date.today().strftime("%Y-%m-%d")  
         print edate
         doc.update({'date': edate})
         return
@@ -266,8 +272,7 @@
                                          'Summary of FOIrequest received by agency/minister',
                                          'Summary of FOI request received', 'Description of    FOI Request',
                                          "FOI request", 'Results 1 to 67 of 67']
-                            if doc['title'] not in badtitles\
-                            and doc['description'] != '':
+                            if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '':
                                 print "saving"
                                 foidocsdb.save(doc)
                         else:
@@ -277,6 +282,6 @@
                         print "header row"
 
                     else:
-                        print "ERROR number of columns incorrect"
+                        print >> sys.stderr, "ERROR number of columns incorrect"
                         print row
 

--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,10 +1,22 @@
-for f in scrapers/*.py; 
-	do echo "Processing $f file.."; 
-	python $f; 
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd $DIR
+echo "" > /tmp/disclosr-error
+for f in scrapers/*.py; do
+	echo "Processing $f file..";
+	md5=`md5sum /tmp/disclosr-error`
+	python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error;
+	md52=`md5sum /tmp/disclosr-error`
+	if [ "$md5" != "$md52" ]; then
+		echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error;
+	fi
 	if [ "$?" -ne "0" ]; then
 		echo "error";
-		sleep 2; 
+		sleep 1;
 	fi
 done
+if [ -s /tmp/disclosr-error ] ; then
+    echo "emailling logs..";
+    mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
+fi
 
 

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -7,10 +7,16 @@
 from urlparse import urljoin
 import time
 import os
+import sys
 import mimetypes
 import urllib
 import urlparse
 import socket
+
+#couch = couchdb.Server('http://192.168.1.148:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
+
 
 def mkhash(input):
     return hashlib.md5(input).hexdigest().encode("utf-8")
@@ -84,7 +90,7 @@
 def getLastAttachment(docsdb, url):
     hash = mkhash(url)
     doc = docsdb.get(hash)
-    if doc != None:
+    if doc != None and "_attachments" in doc.keys():
         last_attachment_fname = doc["_attachments"].keys()[-1]
         last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
         return last_attachment
@@ -98,21 +104,23 @@
     req = urllib2.Request(url)
     print "Fetching %s (%s)" % (url, hash)
     if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-        print "Not a valid HTTP url"
+        print >> sys.stderr, "Not a valid HTTP url"
         return (None, None, None)
     doc = docsdb.get(hash)
     if doc == None:
         doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'}
     else:
-        if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14 * 1000):
+        if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
             print "Uh oh, trying to scrape URL again too soon!" + hash
-            last_attachment_fname = doc["_attachments"].keys()[-1]
-            last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
-            content = last_attachment
-            return (doc['url'], doc['mime_type'], content.read())
-        if scrape_again == False:
-            print "Not scraping this URL again as requested"
-            return (doc['url'], doc['mime_type'], content.read())
+	    if "_attachments" in doc.keys():
+	            last_attachment_fname = doc["_attachments"].keys()[-1]
+	            last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
+        	    content = last_attachment.read()
+		    mime_type = doc['mime_type']
+	    else:
+		    content = None
+		    mime_type = None
+            return (doc['url'], mime_type, content)
 
     req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
     #if there is a previous version stored in couchdb, load caching helper tags
@@ -157,13 +165,13 @@
                 #store as attachment epoch-filename
 
     except (urllib2.URLError, socket.timeout) as e:
-        print "error!"
+        print >> sys.stderr,"error!"
         error = ""
         if hasattr(e, 'reason'):
             error = "error %s in downloading %s" % (str(e.reason), url)
         elif hasattr(e, 'code'):
             error = "error %s in downloading %s" % (e.code, url)
-        print error
+        print >> sys.stderr, error
         doc['error'] = error
         docsdb.save(doc)
         return (None, None, None)
@@ -207,9 +215,6 @@
                     #print linkurl
                     scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID)
 
-#couch = couchdb.Server('http://192.168.1.148:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
 # select database
 agencydb = couch['disclosr-agencies']
 docsdb = couch['disclosr-documents']

--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -26,8 +26,8 @@
 	ScraperImplementation().doScrape()
     except Exception, err:
         sys.stderr.write('ERROR: %s\n' % str(err))
-	print ‘Error Reason: ‘, err.__doc__
-	print ‘Exception: ‘, err.__class__
+	print "Error Reason: ", err.__doc__
+	print "Exception: ", err.__class__
 	print traceback.format_exc()
 	if amon_available:
                data = {
@@ -42,7 +42,6 @@
                         'data': {'request': '', 'session': '', 'more': ''}
 
                 }
-
-	        amonpy.exception(data)
+		#amonpy.exception(data)
 	pass
 

--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -18,13 +18,13 @@
                                         if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
                                         # http://www.crummy.com/software/BeautifulSoup/documentation.html
                                                 soup = BeautifulSoup(htcontent)
-                                                for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'):
+						rowtitle = soup.find(class_ = "wc-title").find("h1").string
+                                                if rowtitle != None:
+                                                   description = rowtitle + ": "
+                                                for row in soup.find(class_ ="wc-content").find_all('td'):
                                                         if row != None:
-								rowtitle = row.find('th').string
-                                                                if rowtitle != None:
-                                                                    description = description + "\n" + rowtitle + ": "
-                                                                for text in row.find('td').stripped_strings:
-                                                                    description = description + text
+                                                                for text in row.stripped_strings:
+                                                                    description = description + text + "\n"
                                                      		for atag in row.find_all("a"):
                                                                 	if atag.has_key('href'):
                                                                         	links.append(scrape.fullurl(link,atag['href']))
@@ -37,7 +37,7 @@
 	def getColumnCount(self):
 		return 2
 	def getTable(self,soup):
-		return soup.find(class_ = "ms-rteTable-GreyAlternating")
+		return soup.find(class_ = "ms-rteTable-default")
 	def getColumns(self,columns):
 		(date, title) = columns
 		return (title, date, title, title, None)

--- a/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
+++ b/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
@@ -7,7 +7,7 @@
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
         def getTable(self,soup):
-                return soup.find(id = "inner_content")       
+                return soup.find(class_="tborder")       
         def getColumnCount(self):
                 return 2
         def getColumns(self,columns):

--- a/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
+++ b/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
@@ -8,40 +8,14 @@
 from datetime import *
 
 #http://www.doughellmann.com/PyMOTW/abc/
-class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-        def getTable(self,soup):
-                return soup.find(class_ = "inner-column").table       
-        def getRows(self,table):
-                return table.tbody.find_all('tr',recursive=False)
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
         def getColumnCount(self):
-                return 3
-        def getColumns(self,columns):
-                (date, title, description) = columns
-                return (date, date, title, description, None)
-        def getDate(self, content, entry, doc):
-		i = 0
-		date = ""
-		for string in content.stripped_strings:
-    			if i ==1:
-				date = string
-			i = i+1
-                edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
-                print edate
-                doc.update({'date': edate})
-                return
-   	def getTitle(self, content, entry, doc):
-		i = 0
-		title = ""
-		for string in content.stripped_strings:
-    			if i < 2:
-				title = title + string
-			i = i+1
-                doc.update({'title': title})