import sys,os |
import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers |
import genericScrapers |
import scrape |
import scrape |
from bs4 import BeautifulSoup |
from bs4 import BeautifulSoup |
import codecs |
import codecs |
#http://www.doughellmann.com/PyMOTW/abc/ |
#http://www.doughellmann.com/PyMOTW/abc/ |
class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getDescription(self,content, entry,doc): |
def getDescription(self,content, entry,doc): |
link = None |
link = None |
links = [] |
links = [] |
description = "" |
description = "" |
for atag in entry.find_all('a'): |
for atag in entry.find_all('a'): |
if atag.has_key('href'): |
if atag.has_attr('href'): |
link = scrape.fullurl(self.getURL(),atag['href']) |
link = scrape.fullurl(self.getURL(),atag['href']) |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) |
if htcontent != None: |
if htcontent != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(htcontent) |
soup = BeautifulSoup(htcontent) |
for text in soup.find(class_ = "mainContent").stripped_strings: |
for text in soup.find(class_ = "mainContent").stripped_strings: |
description = description + text.encode('ascii', 'ignore') |
description = description + text.encode('ascii', 'ignore') |
|
|
for atag in soup.find(id="SortingTable").find_all("a"): |
for atag in soup.find(id="SortingTable").find_all("a"): |
if atag.has_key('href'): |
if atag.has_attr('href'): |
links.append(scrape.fullurl(link,atag['href'])) |
links.append(scrape.fullurl(link,atag['href'])) |
|
|
if links != []: |
if links != []: |
doc.update({'links': links}) |
doc.update({'links': links}) |
if description != "": |
if description != "": |
doc.update({ 'description': description}) |
doc.update({ 'description': description}) |
|
|
def getColumnCount(self): |
def getColumnCount(self): |
return 2 |
return 2 |
def getTable(self,soup): |
def getTable(self,soup): |
return soup.find(id = "TwoColumnSorting") |
return soup.find(id = "TwoColumnSorting") |
def getColumns(self,columns): |
def getColumns(self,columns): |
( title, date) = columns |
( title, date) = columns |
return (title, date, title, title, None) |
return (title, date, title, title, None) |
class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getDescription(self,content, entry,doc): |
def getDescription(self,content, entry,doc): |
link = None |
link = None |
links = [] |
links = [] |
description = "" |
description = "" |
for atag in entry.find_all('a'): |
for atag in entry.find_all('a'): |
if atag.has_key('href'): |
if atag.has_attr('href'): |
link = scrape.fullurl(self.getURL(),atag['href']) |
link = scrape.fullurl(self.getURL(),atag['href']) |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) |
if htcontent != None: |
if htcontent != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(htcontent) |
soup = BeautifulSoup(htcontent) |
for text in soup.find(id="content-item").stripped_strings: |
for text in soup.find(id="content-item").stripped_strings: |
description = description + text + " \n" |
description = description + text + " \n" |
for atag in soup.find(id="content-item").find_all("a"): |
for atag in soup.find(id="content-item").find_all("a"): |
if atag.has_key('href'): |
if atag.has_attr('href'): |
links.append(scrape.fullurl(link,atag['href'])) |
links.append(scrape.fullurl(link,atag['href'])) |
if links != []: |
if links != []: |
doc.update({'links': links}) |
doc.update({'links': links}) |
if description != "": |
if description != "": |
doc.update({ 'description': description}) |
doc.update({ 'description': description}) |
|
|
if links != []: |
if links != []: |
doc.update({'links': links}) |
doc.update({'links': links}) |
if description != "": |
if description != "": |
doc.update({ 'description': description}) |
doc.update({ 'description': description}) |
|
|
def getColumnCount(self): |
def getColumnCount(self): |
return 2 |
return 2 |
def getTable(self,soup): |
def getTable(self,soup): |
return soup.find(class_ = "doc-list") |
return soup.find(class_ = "doc-list") |
def getColumns(self,columns): |
def getColumns(self,columns): |
(date, title) = columns |
(date, title) = columns |
return (title, date, title, title, None) |
return (title, date, title, title, None) |
|
|
if __name__ == '__main__': |
if __name__ == '__main__': |
print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
NewScraperImplementation().doScrape() |
NewScraperImplementation().doScrape() |
print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
osi = OldScraperImplementation() |
osi = OldScraperImplementation() |
osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" |
osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" |
osi.doScrape() |
osi.doScrape() |
|
|