import sys,os |
import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape |
import genericScrapers |
foidocsdb = scrape.couch['disclosr-foidocuments'] |
#RSS feed not detailed |
|
|
#RSS feed not detailed |
#http://www.doughellmann.com/PyMOTW/abc/ |
from bs4 import BeautifulSoup |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
#http://www.apvma.gov.au/about/foi/disclosure/index.php |
def getColumns(self,columns): |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, "http://www.apvma.gov.au/about/foi/disclosure/index.php", "foidocuments", "3cd40b1240e987cbcd3f0e67054ce259") |
(id, date, description, title, notes) = columns |
if content != None: |
return (id, date, description, title, notes) |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
|
# http://www.crummy.com/software/BeautifulSoup/documentation.html |
|
soup = BeautifulSoup(content) |
|
for row in soup.table.find_all('tr'): |
|
columns = row.find_all('td') |
|
if len(columns) == 5: |
|
(id, date, description, title, notes) = columns |
|
print id |
|
elif len(row.find_all('th')) == 5: |
|
print "header row" |
|
else: |
|
print "ERROR number of columns incorrect" |
|
print row |
|
|
|
|
if __name__ == '__main__': |
|
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
|
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
|
ScraperImplementation().doScrape() |
|
|