datagov fixes
datagov fixes


Former-commit-id: ed3ba96db4beeb126f802a3168476e27f298aeb8

--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -12,8 +12,8 @@
 #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',    api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
 ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
     api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
 
 # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
 SYMBOLS = {
@@ -91,6 +91,7 @@
 
 def name_munge(input_name):
     return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+    #[:100]
     #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
 
 
@@ -117,9 +118,9 @@
 if __name__ == "__main__":
     for doc in docsdb.view('app/datasets'):
         print doc.id
-        if doc.value['url'] != "http://data.gov.au/data/":
+        if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
             # Collect the package metadata.
-            pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','');                                                                  _
+            pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); 
             tags = []
             if doc.value['agencyID'] == "AGIMO":
                 if len(doc.value['metadata']["Keywords / Tags"]) > 0:
@@ -185,6 +186,8 @@
                         }
                     print group_entity
                     ckan.group_register_post(group_entity)
+                elif ckan.last_status == 409:
+		    print "group already exists"
                 else:
                     raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
                         ckan.last_status, pkg_name, e.args))

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -72,8 +72,7 @@
             edate = date.today().strftime("%Y-%m-%d")
             doc = {'_id': dochash, 'agencyID': self.getAgencyID()
             , 'url': self.getURL(), 'docID': dochash,
-            "date": edate, "title": "Disclosure Log Updated", 
-	    "description":  self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
+            "date": edate, "title": "Disclosure Log Updated", "description":  self.remove_control_chars(description), "diff": diff}
             foidocsdb.save(doc)
         else:
             print "already saved"
@@ -202,7 +201,7 @@
     def getDate(self, content, entry, doc):
         date = ''.join(content.stripped_strings).strip()
         (a, b, c) = date.partition("(")
-        date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
+        date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012").replace("Janurary","January"))
         print date
         edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
         print edate
@@ -278,6 +277,6 @@
                         print "header row"
 
                     else:
-                        print >> sys.stderr, "ERROR number of columns incorrect"
+                        print "ERROR number of columns incorrect"
                         print row
 

--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,19 +1,13 @@
-echo "" > /tmp/disclosr-error
+rm /tmp/disclosr-error
 for f in scrapers/*.py; do
 	echo "Processing $f file..";
-	md5=`md5sum /tmp/disclosr-error`
-	python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error;
-	md52=`md5sum /tmp/disclosr-error`
-	if [ "$md5" != "$md52" ]; then
-		echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error;
-	fi
+	python $f 2>/tmp/disclosr-error;
 	if [ "$?" -ne "0" ]; then
 		echo "error";
-		sleep 1;
+		sleep 2;
 	fi
 done
 if [ -s /tmp/disclosr-error ] ; then
-    echo "emailling logs..";
     mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
 fi
 

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -7,7 +7,6 @@
 from urlparse import urljoin
 import time
 import os
-import sys
 import mimetypes
 import urllib
 import urlparse
@@ -104,7 +103,7 @@
     req = urllib2.Request(url)
     print "Fetching %s (%s)" % (url, hash)
     if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
-        print >> sys.stderr, "Not a valid HTTP url"
+        print "Not a valid HTTP url"
         return (None, None, None)
     doc = docsdb.get(hash)
     if doc == None:
@@ -160,13 +159,13 @@
                 #store as attachment epoch-filename
 
     except (urllib2.URLError, socket.timeout) as e:
-        print >> sys.stderr,"error!"
+        print "error!"
         error = ""
         if hasattr(e, 'reason'):
             error = "error %s in downloading %s" % (str(e.reason), url)
         elif hasattr(e, 'code'):
             error = "error %s in downloading %s" % (e.code, url)
-        print >> sys.stderr, error
+        print error
         doc['error'] = error
         docsdb.save(doc)
         return (None, None, None)

--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -18,13 +18,13 @@
                                         if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
                                         # http://www.crummy.com/software/BeautifulSoup/documentation.html
                                                 soup = BeautifulSoup(htcontent)
-						rowtitle = soup.find(class_ = "wc-title").find("h1").string
-                                                if rowtitle != None:
-                                                   description = rowtitle + ": "
-                                                for row in soup.find(class_ ="wc-content").find_all('td'):
+                                                for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'):
                                                         if row != None:
-                                                                for text in row.stripped_strings:
-                                                                    description = description + text + "\n"
+								rowtitle = row.find('th').string
+                                                                if rowtitle != None:
+                                                                    description = description + "\n" + rowtitle + ": "
+                                                                for text in row.find('td').stripped_strings:
+                                                                    description = description + text
                                                      		for atag in row.find_all("a"):
                                                                 	if atag.has_key('href'):
                                                                         	links.append(scrape.fullurl(link,atag['href']))
@@ -37,7 +37,7 @@
 	def getColumnCount(self):
 		return 2
 	def getTable(self,soup):
-		return soup.find(class_ = "ms-rteTable-default")
+		return soup.find(class_ = "ms-rteTable-GreyAlternating")
 	def getColumns(self,columns):
 		(date, title) = columns
 		return (title, date, title, title, None)

--- a/documents/scrapers/f2ab2908d8ee56ed8d995ef4187e75e6.py
+++ b/documents/scrapers/f2ab2908d8ee56ed8d995ef4187e75e6.py
@@ -10,7 +10,7 @@
                 (id, date, title, description, notes) = columns
                 return (id, date, title, description, notes)
         def getTable(self,soup):
-                return soup.find("table")
+                return soup.find(id = "content").table
 
 if __name__ == '__main__':
     print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)