upgrade scrapers to beautifulsoup4
upgrade scrapers to beautifulsoup4


Former-commit-id: 350a28ba24aef1eb018e2db0275aa62fe3369728

import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
   
#RSS feed not detailed #RSS feed not detailed
from BeautifulSoup import BeautifulSoup from bs4 import BeautifulSoup
#http://www.apvma.gov.au/about/foi/disclosure/index.php #http://www.apvma.gov.au/about/foi/disclosure/index.php
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, "http://www.apvma.gov.au/about/foi/disclosure/index.php", "foidocuments", "3cd40b1240e987cbcd3f0e67054ce259") (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, "http://www.apvma.gov.au/about/foi/disclosure/index.php", "foidocuments", "3cd40b1240e987cbcd3f0e67054ce259")
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
print soup.table.find_all('tr')[0].name for row in soup.table.find_all('tr'):
  columns = row.find_all('td')
  if len(columns) == 5:
  (id, date, description, title, notes) = columns
  print id
  elif len(row.find_all('th')) == 5:
  print "header row"
  else:
  print "ERROR number of columns incorrect"
  print row