refactor description parsing
refactor description parsing


Former-commit-id: 12d26f8b33a3ffcf9a8036b00437793bae515db4

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -3,22 +3,81 @@
 import scrape
 from bs4 import BeautifulSoup
 import parsedatetime as pdt
+from time import mktime
+from datetime import datetime
+import feedparser
 import abc
-class GenericOAICDisclogScraper(object):
-	__metaclass__ = abc.ABCMeta
+
+class GenericDisclogScraper(object):
+        __metaclass__ = abc.ABCMeta
+	agencyID = None
+	disclogURL = None
+        def getAgencyID(self):
+                """ disclosr agency id """
+		if self.agencyID == None:
+			self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
+                return self.agencyID
+
+        def getURL(self):
+                """ disclog URL"""
+		if self.disclogURL == None:
+			agency = scrape.agencydb.get(self.getAgencyID())
+			self.disclogURL = agency['FOIDocumentsURL']
+                return self.disclogURL
+
 	@abc.abstractmethod
-	def getAgencyID(self):
-		""" disclosr agency id """
+	def doScrape(self):
+		""" do the scraping """
 		return
 
 	@abc.abstractmethod
-	def getURL(self):
-		""" disclog URL"""
+        def getDescription(self, content, entry, doc):
+                """ get description"""
 		return
 
+
+
+class GenericRSSDisclogScraper(GenericDisclogScraper):
+
+       	def doScrape(self):
+               	foidocsdb = scrape.couch['disclosr-foidocuments']
+                (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
+		feed = feedparser.parse(content)		
+		for entry in feed.entries:
+			#print entry
+			print entry.id
+			hash = scrape.mkhash(entry.id)
+			#print hash
+		  	doc = foidocsdb.get(hash)
+			#print doc
+			if doc == None:
+                        	print "saving"
+				edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
+                                doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
+                                "date": edate,"title": entry.title}
+				self.getDescription(entry,entry, doc)
+                                foidocsdb.save(doc)
+                        else:
+                        	print "already saved"			
+        def getDescription(self, content, entry, doc):
+                """ get description from rss entry"""
+                doc.update({'description': content.summary})
+		return
+
+class GenericOAICDisclogScraper(GenericDisclogScraper):
+        __metaclass__ = abc.ABCMeta
 	@abc.abstractmethod
 	def getColumns(self,columns):
 		""" rearranges columns if required """
+		return
+        def getColumnCount(self):
+                return 5
+        def getDescription(self, content, entry, doc):
+                """ get description from rss entry"""
+		descriptiontxt = ""
+		for string in content.stripped_strings:
+                	descriptiontxt = descriptiontxt + " \n" + string
+                doc.update({'description': descriptiontxt})
 		return
 
 	def doScrape(self):
@@ -31,7 +90,7 @@
 				soup = BeautifulSoup(content)
 				for row in soup.table.find_all('tr'):
 					columns = row.find_all('td')
-					if len(columns) == 5:
+					if len(columns) == self.getColumnCount():
 						(id, date, description, title, notes) = self.getColumns(columns)
 						print id.string
 						hash = scrape.mkhash(url+id.string)
@@ -40,9 +99,6 @@
 							if atag.has_key('href'):
 								links.append(scrape.fullurl(url,atag['href']))
 						doc = foidocsdb.get(hash)
-						descriptiontxt = ""
-						for string in description.stripped_strings:
-							descriptiontxt = descriptiontxt + string
 							
 						if doc == None:
 							print "saving"
@@ -52,14 +108,20 @@
 								print dtdate
                                                         	edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
 							else:
-								edate = ""
-							doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
-			 				 "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}
+								edate = datetime.strptime(date.string, "%d %B %Y").strftime("%Y-%m-%d")
+							doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string,
+			 				 "date": edate,"title": title.string}
+							self.getDescription(description,row, doc)
+
+                                			if links != []:
+                                        			doc.update({'links': links})
+                                			if notes != None:
+                                        			doc.update({ 'notes': notes.string})
 							foidocsdb.save(doc)
 						else:
-							print "already saved"
+							print "already saved "+hash
 					
-					elif len(row.find_all('th')) == 5:
+					elif len(row.find_all('th')) == self.getColumnCount():
 						print "header row"
 					
 					else:

--- a/documents/index.php
+++ b/documents/index.php
@@ -10,8 +10,8 @@
 $agenciesdb = $server->get_db('disclosr-agencies');
 
 $idtoname = Array();
-foreach ($agenciesdb->get_view("app", "byName")->rows as $row) {
-    $idtoname[$row->value] = trim($row->key);
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+    $idtoname[$row->id] = trim($row->value->name);
 }
 $foidocsdb = $server->get_db('disclosr-foidocuments');
 try {
@@ -34,3 +34,4 @@
 }
 include_footer_documents();
 ?>
+

--- /dev/null
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -1,1 +1,48 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
 
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+        def getDescription(self,content, entry,doc):
+		link = None
+		for atag in entry.find_all('a'):
+			if atag.has_key('href'):
+				link = scrape.fullurl(url,atag['href'])			
+                (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+                if htcontent != None:
+                        if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                        # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                                soup = BeautifulSoup(content)
+                                links = []
+                                description = ""
+                                dldivs = soup.find('div',class_="download")
+                                if dldivs != None:
+                                        for atag in dldivs.find_all("a"):
+                                                if atag.has_key('href'):
+                                                        links.append(scrape.fullurl(url,atag['href']))
+                                nodldivs = soup.find('div',class_="incompleteNotification")
+                                if nodldivs != None and nodldivs.stripped_strings != None:
+                                        for text in nodldivs.stripped_strings:
+                                                description = description + text
+                                for row in soup.table.find_all('tr'):
+                                        if row != None:
+                                                description = description + "\n" + row.find('th').string + ": "
+                                                for text in row.find('div').stripped_strings:
+                                                         description = description + text
+                                if links != []:
+                                        doc.update({'links': links})
+                                if description != "":
+                                        doc.update({ 'description': description})
+	def getColumnCount(self):
+		return 2
+	def getColumns(self,columns):
+		(date, title) = columns
+		return (title, date, title, title, None)
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
+++ b/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
@@ -5,12 +5,6 @@
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-	def getAgencyID(self):
-		return "3cd40b1240e987cbcd3f0e67054ce259"
-
-	def getURL(self):
-		return "http://www.apvma.gov.au/about/foi/disclosure/index.php"
-
 	def getColumns(self,columns):
 		(id, date, description, title, notes) = columns
 		return (id, date, description, title, notes)
@@ -19,3 +13,4 @@
     print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
     print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
     ScraperImplementation().doScrape()
+

--- /dev/null
+++ b/documents/scrapers/820c3df09aa62f6ee7468c73bea0e323.py
@@ -1,1 +1,18 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
 
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+	def getColumnCount(self):
+		return 2
+	def getColumns(self,columns):
+		(date, title) = columns
+		return (title, date, title, title, None)
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
+++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
@@ -5,12 +5,6 @@
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-        def getAgencyID(self):
-                return "8c9421f852c441910bf1d93a57b31d64"
-
-        def getURL(self):
-                return "http://www.daff.gov.au/about/foi/ips/disclosure-log"
-
         def getColumns(self,columns):
                 (id, date, title, description, notes) = columns
                 return (id, date, description, title, notes)

--- /dev/null
+++ b/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.txt
@@ -1,1 +1,2 @@
+# does not have any disclog entries or table
 

--- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
+++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
@@ -1,11 +1,42 @@
 import sys,os
 sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
 import scrape
-foidocsdb = scrape.couch['disclosr-foidocuments']
-
-import feedparser
-feed = feedparser.parse( "http://foi.deewr.gov.au/disclosure-log/rss")
-print feed.entries[0]
-#foreach feed.entries
+from bs4 import BeautifulSoup
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
+	def getDescription(self,content, entry,doc):
+		(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False)
+                if htcontent != None:
+                        if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                        # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                               	soup = BeautifulSoup(content)
+				links = []
+				description = ""
+				dldivs = soup.find('div',class_="download")
+				if dldivs != None:
+                             		for atag in dldivs.find_all("a"):
+                                		if atag.has_key('href'):
+                                        		links.append(scrape.fullurl(url,atag['href']))
+				nodldivs = soup.find('div',class_="incompleteNotification")
+				if nodldivs != None and nodldivs.stripped_strings != None:
+                                      	for text in nodldivs.stripped_strings:
+	                                        description = description + text
+                               	for row in soup.table.find_all('tr'):
+					if row != None:
+						description = description + "\n" + row.find('th').string + ": " 
+						for text in row.find('div').stripped_strings:
+							 description = description + text
+				if links != []:
+					doc.update({'links': links})
+				if description != "":
+					doc.update({ 'description': description})
 
 
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+    ScraperImplementation().doScrape()
+
+

--- /dev/null
+++ b/documents/scrapers/c43ca6780764f4e61918e8836be74420.py
@@ -1,1 +1,16 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
 
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+	def getColumns(self,columns):
+		(id, date, title,description,notes) = columns
+		return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+    ScraperImplementation().doScrape()
+

--- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
+++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
@@ -1,1 +1,18 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
+        def getColumns(self,columns):
+                (id, date, title, description, notes) = columns
+                return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+    ScraperImplementation().doScrape()
+
 www.finance.gov.au/foi/disclosure-log/foi-rss.xml
+

--- a/documents/scrapers/rtk.py
+++ b/documents/scrapers/rtk.py
@@ -1,1 +1,18 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
+        def getColumns(self,columns):
+                (id, date, title, description, notes) = columns
+                return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+    ScraperImplementation().doScrape()
+
 http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful)
+

--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -69,7 +69,7 @@
 
             </p>
             <ul class="nav">
-              <li class="active"><a href="#">Home</a></li>
+              <li><a href="index.php">Home</a></li>
               <li><a href="disclogsList.php">List of Disclosure Logs</a></li>
               <li><a href="about.php">About</a></li>
               
@@ -127,13 +127,21 @@
 }
 
 function displayLogEntry($row, $idtoname) {
-    echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2>
-    <p>".$row->value->description." <br>Note: ".$row->value->notes."</p>";
+    echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2> <p>".str_replace("\n","<br>",$row->value->description);
+if (isset($row->value->notes)) {
+echo " <br>Note: ".$row->value->notes;
+}
+echo "</p>";
+
+if (isset($row->value->links)){
 echo "<h3>Links/Documents</h3><ul>";
 foreach ($row->value->links as $link) {
     echo "<li><a href='$link'>".$link."</a></li>";
 }
+
         echo "</ul>";
+}
         echo "<small><A href='".$row->value->url."'>View original source...</a> ID: ".$row->value->docID."</small>";
 echo"</div>";
 }
+

--- a/include/couchdb.inc.php
+++ b/include/couchdb.inc.php
@@ -3,7 +3,18 @@
 include $basePath . "schemas/schemas.inc.php";
 
 require ($basePath . 'couchdb/settee/src/settee.php');
-
+function createFOIDocumentsDesignDoc() {
+ /*      "map": "function(doc) {\n  emit(doc.web_server, 1);\n}",
+      "reduce": "function (key, values, rereduce) {\n    return sum(values);\n}"
+      },
+      "byAgency": {
+      "map": "function(doc) {\n  emit(doc.agencyID, 1);\n}",
+      "reduce": "function (key, values, rereduce) {\n    return sum(values);\n}"
+      },
+      "byURL": {
+      "map": "function(doc) {\n  emit(doc.url, doc);\n}"
+*/
+}
 function createDocumentsDesignDoc() {
     /* "views": {
       "web_server": {