fix agd scraper
[disclosr.git] / documents / genericScrapers.py
blob:a/documents/genericScrapers.py -> blob:b/documents/genericScrapers.py
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -2,12 +2,14 @@
 sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
 import scrape
 from bs4 import BeautifulSoup
-import parsedatetime as pdt
 from time import mktime
-from datetime import datetime
 import feedparser
 import abc
 import unicodedata, re
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import codecs
 
 class GenericDisclogScraper(object):
         __metaclass__ = abc.ABCMeta
@@ -54,7 +56,7 @@
 		  	doc = foidocsdb.get(hash)
 			#print doc
 			if doc == None:
-                        	print "saving"
+                        	print "saving "+ hash
 				edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
                                 doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
                                 "date": edate,"title": entry.title}
@@ -82,11 +84,32 @@
                 	descriptiontxt = descriptiontxt + " \n" + string
                 doc.update({'description': descriptiontxt})
 		return
+        def getTitle(self, content, entry, doc):
+                doc.update({'title': (''.join(content.stripped_strings))})
+		return
 	def getTable(self, soup):
 		return soup.table
+	def getRows(self, table):
+		return table.find_all('tr')
+	def getDate(self, content, entry, doc):
+		date = ''.join(content.stripped_strings).strip()
+		(a,b,c) = date.partition("(")
+		date = self.remove_control_chars(a.replace("Octber","October"))
+		print date
+		edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+		print edate
+		doc.update({'date': edate})
+		return
+	def getLinks(self, content, entry, doc):
+                links = []
+                for atag in entry.find_all("a"):
+                       	if atag.has_key('href'):
+                               	links.append(scrape.fullurl(content,atag['href']))
+                if links != []:
+	                doc.update({'links': links})
+		return
 
 	def doScrape(self):
-		cal = pdt.Calendar()
 		foidocsdb = scrape.couch['disclosr-foidocuments']
 		(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
 		if content != None:
@@ -94,38 +117,32 @@
 			# http://www.crummy.com/software/BeautifulSoup/documentation.html
 				soup = BeautifulSoup(content)
 				table = self.getTable(soup)
-				for row in table.find_all('tr'):
+				for row in self.getRows(table):
 					columns = row.find_all('td')
 					if len(columns) == self.getColumnCount():
-						(id, date, description, title, notes) = self.getColumns(columns)
-						print id.string
+						(id, date, title, description, notes) = self.getColumns(columns)
+						print self.remove_control_chars(''.join(id.stripped_strings))
 						if id.string == None:
-							hash = scrape.mkhash(self.remove_control_chars(url+date.string))
+							hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
 						else:
-							hash = scrape.mkhash(self.remove_control_chars(url+id.string))
-						links = []
-						for atag in row.find_all("a"):
-							if atag.has_key('href'):
-								links.append(scrape.fullurl(url,atag['href']))
+							hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
 						doc = foidocsdb.get(hash)
 							
 						if doc == None:
-							print "saving"
-                                                        dtresult = cal.parseDateText(date.string)
-							if len(dtresult) == 2:
-								(dtdate,dtr) = dtresult
-								print dtdate
-                                                        	edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
-							else:
-								edate = datetime.strptime(date.string.strip(), "%d %B %Y").strftime("%Y-%m-%d")
-							doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string,
-			 				 "date": edate,"title": title.string}
-                                			if links != []:
-                                        			doc.update({'links': links})
+							print "saving " +hash
+							doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))}
+							self.getLinks(self.getURL(),row,doc)
+                                			self.getTitle(title,row, doc)
+                                			self.getDate(date,row, doc)
 							self.getDescription(description,row, doc)
-                                			if notes != None:
-                                        			doc.update({ 'notes': notes.string})
-							foidocsdb.save(doc)
+							if notes != None:
+                                        			doc.update({ 'notes': (''.join(notes.stripped_strings))})
+                                                        badtitles = ['-','Summary of FOI Request','FOI request(in summary form)','Summary of FOI request received by the ASC',
+'Summary of FOI request received by agency/minister','Description of Documents Requested','FOI request','Description of FOI Request','Summary of request','Description','Summary',
+'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of    FOI Request',"FOI request",'Results 1 to 67 of 67']
+							if doc['title'] not in badtitles and doc['description'] != '':
+                                                            print "saving"
+                                                            foidocsdb.save(doc)
 						else:
 							print "already saved "+hash