1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 | import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import genericScrapers #RSS feed not detailed import scrape from bs4 import BeautifulSoup #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): def getDescription(self,content, entry,doc): (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False) if htcontent != None: if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) links = [] description = "" dldivs = soup.find('div',class_="download") if dldivs != None: for atag in dldivs.find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(url,atag['href'])) nodldivs = soup.find('div',class_="incompleteNotification") if nodldivs != None and nodldivs.stripped_strings != None: for text in nodldivs.stripped_strings: description = description + text for row in soup.table.find_all('tr'): if row != None: description = description + "\n" + row.find('th').string + ": " for text in row.find('div').stripped_strings: description = description + text if links != []: doc.update({'links': links}) if description != "": doc.update({ 'description': description}) if __name__ == '__main__': print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) ScraperImplementation().doScrape() |