more scraper work
[disclosr.git] / documents / scrapers / 3cd40b1240e987cbcd3f0e67054ce259.py
blob:a/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py -> blob:b/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
   
#RSS feed not detailed #RSS feed not detailed
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
#http://www.apvma.gov.au/about/foi/disclosure/index.php #http://www.apvma.gov.au/about/foi/disclosure/index.php
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, "http://www.apvma.gov.au/about/foi/disclosure/index.php", "foidocuments", "3cd40b1240e987cbcd3f0e67054ce259") agencyID = "3cd40b1240e987cbcd3f0e67054ce259"
  (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, "http://www.apvma.gov.au/about/foi/disclosure/index.php", "foidocuments", agencyID)
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
for row in soup.table.find_all('tr'): for row in soup.table.find_all('tr'):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) == 5: if len(columns) == 5:
(id, date, description, title, notes) = columns (id, date, description, title, notes) = columns
print id print id.string
  hash = scrape.mkhash(url+id.string)
  links = []
  for atag in row.find_all("a"):
  if atag.has_key('href'):
  links.append(scrape.fullurl(url,atag['href']))
  doc = foidocsdb.get(hash)
  descriptiontxt = ""
  for string in description.stripped_strings:
  descriptiontxt = descriptiontxt + string
  if doc == None:
  print "saving"
  doc = {'_id': hash, 'agencyID': agencyID, 'url': url, "links": links, 'docID': id.string, "date": date.string, "description": descriptiontxt,"title": title.string,"notes": notes.string}
  foidocsdb.save(doc)
  else:
  print "already saved"
elif len(row.find_all('th')) == 5: elif len(row.find_all('th')) == 5:
print "header row" print "header row"
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row