refactor description parsing
[disclosr.git] / documents / scrapers / 227cb6eb7d2c9f8a6e846df7447d6caa.py
blob:a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py -> blob:b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -1,44 +1,48 @@
 import sys,os
 sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
 import genericScrapers
-#RSS feed not detailed
+import scrape
+from bs4 import BeautifulSoup
 
 #http://www.doughellmann.com/PyMOTW/abc/
 class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
         def getDescription(self,content, entry,doc):
 		link = None
+                links = []
+                description = ""
 		for atag in entry.find_all('a'):
-			if atag.has_key('href'):
-				link = scrape.fullurl(url,atag['href'])			
-                (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
-                if htcontent != None:
-                        if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
-                        # http://www.crummy.com/software/BeautifulSoup/documentation.html
-                                soup = BeautifulSoup(content)
-                                links = []
-                                description = ""
-                                dldivs = soup.find('div',class_="download")
-                                if dldivs != None:
-                                        for atag in dldivs.find_all("a"):
-                                                if atag.has_key('href'):
-                                                        links.append(scrape.fullurl(url,atag['href']))
-                                nodldivs = soup.find('div',class_="incompleteNotification")
-                                if nodldivs != None and nodldivs.stripped_strings != None:
-                                        for text in nodldivs.stripped_strings:
-                                                description = description + text
-                                for row in soup.table.find_all('tr'):
-                                        if row != None:
-                                                description = description + "\n" + row.find('th').string + ": "
-                                                for text in row.find('div').stripped_strings:
-                                                         description = description + text
-                                if links != []:
-                                        doc.update({'links': links})
-                                if description != "":
-                                        doc.update({ 'description': description})
+			if atag.has_attr('href'):
+				link = scrape.fullurl(self.getURL(),atag['href'])			
+                                (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+                                if htcontent != None:
+                                        if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                                        # http://www.crummy.com/software/BeautifulSoup/documentation.html
+                                                soup = BeautifulSoup(htcontent)
+						rowtitle = soup.find(class_ = "wc-title").find("h1").string
+                                                if rowtitle != None:
+                                                   description = rowtitle + ": "
+                                                for row in soup.find(class_ ="wc-content").find_all('td'):
+                                                        if row != None:
+                                                                for text in row.stripped_strings:
+                                                                    description = description + text + "\n"
+                                                     		for atag in row.find_all("a"):
+                                                                	if atag.has_attr('href'):
+                                                                        	links.append(scrape.fullurl(link,atag['href']))
+
+		if links != []:
+                 	doc.update({'links': links})
+                if description != "":
+                        doc.update({ 'description': description})
+	def getRows(self, table):
+		return table.find_all(class_ = "dl-row");
+	def findColumns(self, table):
+		return table.find_all('div');
 	def getColumnCount(self):
 		return 2
+	def getTable(self,soup):
+		return soup.find(class_ = "foi-dl-list")
 	def getColumns(self,columns):
-		(date, title) = columns
+		(title,date) = columns
 		return (title, date, title, title, None)
 
 if __name__ == '__main__':