--- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -7,11 +7,14 @@ from datetime import datetime import feedparser import abc +import unicodedata, re class GenericDisclogScraper(object): __metaclass__ = abc.ABCMeta agencyID = None disclogURL = None + def remove_control_chars(self, input): + return "".join([i for i in input if ord(i) in range(32, 127)]) def getAgencyID(self): """ disclosr agency id """ if self.agencyID == None: @@ -79,6 +82,8 @@ descriptiontxt = descriptiontxt + " \n" + string doc.update({'description': descriptiontxt}) return + def getTable(self, soup): + return soup.table def doScrape(self): cal = pdt.Calendar() @@ -88,12 +93,16 @@ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) - for row in soup.table.find_all('tr'): + table = self.getTable(soup) + for row in table.find_all('tr'): columns = row.find_all('td') if len(columns) == self.getColumnCount(): (id, date, description, title, notes) = self.getColumns(columns) print id.string - hash = scrape.mkhash(url+id.string) + if id.string == None: + hash = scrape.mkhash(self.remove_control_chars(url+date.string)) + else: + hash = scrape.mkhash(self.remove_control_chars(url+id.string)) links = [] for atag in row.find_all("a"): if atag.has_key('href'): @@ -108,13 +117,12 @@ print dtdate edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) else: - edate = datetime.strptime(date.string, "%d %B %Y").strftime("%Y-%m-%d") + edate = datetime.strptime(date.string.strip(), "%d %B %Y").strftime("%Y-%m-%d") doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string, "date": edate,"title": title.string} - self.getDescription(description,row, doc) - if links != []: doc.update({'links': links}) + self.getDescription(description,row, doc) if notes != None: doc.update({ 'notes': notes.string}) foidocsdb.save(doc)