1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import genericScrapers import scrape from bs4 import BeautifulSoup import codecs #http://www.doughellmann.com/PyMOTW/abc/ class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getDescription(self,content, entry,doc): link = None links = [] description = "" for atag in entry.find_all('a'): if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(htcontent) for text in soup.find(class_ = "mainContent").stripped_strings: description = description + text.encode('ascii', 'ignore') for atag in soup.find(id="SortingTable").find_all("a"): if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: doc.update({'links': links}) if description != "": doc.update({ 'description': description}) def getColumnCount(self): return 2 def getTable(self,soup): return soup.find(id = "TwoColumnSorting") def getColumns(self,columns): ( title, date) = columns return (title, date, title, title, None) class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getDescription(self,content, entry,doc): link = None links = [] description = "" for atag in entry.find_all('a'): if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(htcontent) for text in soup.find(id="content-item").stripped_strings: description = description + text + " \n" for atag in soup.find(id="content-item").find_all("a"): if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: doc.update({'links': links}) if description != "": doc.update({ 'description': description}) if links != []: doc.update({'links': links}) if description != "": doc.update({ 'description': description}) def getColumnCount(self): return 2 def getTable(self,soup): return soup.find(class_ = "doc-list") def getColumns(self,columns): (date, title) = columns return (title, date, title, title, None) if __name__ == '__main__': print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) NewScraperImplementation().doScrape() print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) osi = OldScraperImplementation() osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" osi.doScrape() |