beginnings rss scraper
beginnings rss scraper


Former-commit-id: 78b7f5ee0c86281368da5eb0ed92ce1ad9cc575d

--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -3,7 +3,40 @@
 import scrape
 from bs4 import BeautifulSoup
 import parsedatetime as pdt
+from time import mktime
+from datetime import datetime
+import feedparser
 import abc
+
+class GenericRSSDisclogScraper(object):
+        __metaclass__ = abc.ABCMeta
+        @abc.abstractmethod
+        def getAgencyID(self):
+                """ disclosr agency id """
+                return
+
+        @abc.abstractmethod
+        def getURL(self):
+                """ disclog URL"""
+                return
+       	def doScrape(self):
+               	foidocsdb = scrape.couch['disclosr-foidocuments']
+                (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
+		feed = feedparser.parse(content)		
+		for entry in feed.entries:
+			#print entry
+			print entry.id
+			hash = scrape.mkhash(entry.link)			
+		  	doc = foidocsdb.get(hash)
+			if doc == None:
+                        	print "saving"
+				edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
+                                doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
+                                "date": edate, "description": entry.summary,"title": entry.title}
+                                foidocsdb.save(doc)
+                        else:
+                        	print "already saved"			
+
 class GenericOAICDisclogScraper(object):
 	__metaclass__ = abc.ABCMeta
 	@abc.abstractmethod
@@ -42,7 +75,7 @@
 						doc = foidocsdb.get(hash)
 						descriptiontxt = ""
 						for string in description.stripped_strings:
-							descriptiontxt = descriptiontxt + string
+							descriptiontxt = descriptiontxt + " \n" + string
 							
 						if doc == None:
 							print "saving"
@@ -53,7 +86,7 @@
                                                         	edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
 							else:
 								edate = ""
-							doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
+							doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
 			 				 "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}
 							foidocsdb.save(doc)
 						else:

--- a/documents/index.php
+++ b/documents/index.php
@@ -10,8 +10,8 @@
 $agenciesdb = $server->get_db('disclosr-agencies');
 
 $idtoname = Array();
-foreach ($agenciesdb->get_view("app", "byName")->rows as $row) {
-    $idtoname[$row->value] = trim($row->key);
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+    $idtoname[$row->id] = trim($row->value->name);
 }
 $foidocsdb = $server->get_db('disclosr-foidocuments');
 try {
@@ -34,3 +34,4 @@
 }
 include_footer_documents();
 ?>
+

--- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
+++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
@@ -1,11 +1,19 @@
 import sys,os
 sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
-import scrape
-foidocsdb = scrape.couch['disclosr-foidocuments']
+import genericScrapers
+#RSS feed not detailed
 
-import feedparser
-feed = feedparser.parse( "http://foi.deewr.gov.au/disclosure-log/rss")
-print feed.entries[0]
-#foreach feed.entries
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
+        def getAgencyID(self):
+                return "be9996f0ac58f71f23d074e82d44ead3"
+
+        def getURL(self):
+                return "http://foi.deewr.gov.au/disclosure-log/rss"
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+    ScraperImplementation().doScrape()
 
 

--- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
+++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
@@ -1,1 +1,24 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
+        def getAgencyID(self):
+                return "be9996f0ac58f71f23d074e82d44ead3"
+
+        def getURL(self):
+                return "http://foi.deewr.gov.au/disclosure-log/rss"
+
+        def getColumns(self,columns):
+                (id, date, title, description, notes) = columns
+                return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+    ScraperImplementation().doScrape()
+
 www.finance.gov.au/foi/disclosure-log/foi-rss.xml
+

--- a/documents/scrapers/rtk.py
+++ b/documents/scrapers/rtk.py
@@ -1,1 +1,24 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
+        def getAgencyID(self):
+                return "be9996f0ac58f71f23d074e82d44ead3"
+
+        def getURL(self):
+                return "http://foi.deewr.gov.au/disclosure-log/rss"
+
+        def getColumns(self,columns):
+                (id, date, title, description, notes) = columns
+                return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+    ScraperImplementation().doScrape()
+
 http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful)
+

--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -127,13 +127,21 @@
 }
 
 function displayLogEntry($row, $idtoname) {
-    echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2>
-    <p>".$row->value->description." <br>Note: ".$row->value->notes."</p>";
+    echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2> <p>".$row->value->description;
+if (isset($row->value->notes)) {
+echo " <br>Note: ".$row->value->notes;
+}
+echo "</p>";
+
+if (isset($row->value->links)){
 echo "<h3>Links/Documents</h3><ul>";
 foreach ($row->value->links as $link) {
     echo "<li><a href='$link'>".$link."</a></li>";
 }
+
         echo "</ul>";
+}
         echo "<small><A href='".$row->value->url."'>View original source...</a> ID: ".$row->value->docID."</small>";
 echo"</div>";
 }
+

--- a/include/couchdb.inc.php
+++ b/include/couchdb.inc.php
@@ -3,7 +3,18 @@
 include $basePath . "schemas/schemas.inc.php";
 
 require ($basePath . 'couchdb/settee/src/settee.php');
-
+function createFOIDocumentsDesignDoc() {
+ /*      "map": "function(doc) {\n  emit(doc.web_server, 1);\n}",
+      "reduce": "function (key, values, rereduce) {\n    return sum(values);\n}"
+      },
+      "byAgency": {
+      "map": "function(doc) {\n  emit(doc.agencyID, 1);\n}",
+      "reduce": "function (key, values, rereduce) {\n    return sum(values);\n}"
+      },
+      "byURL": {
+      "map": "function(doc) {\n  emit(doc.url, doc);\n}"
+*/
+}
 function createDocumentsDesignDoc() {
     /* "views": {
       "web_server": {