more scrapers
[disclosr.git] / documents / scrapers / d72744fb1e5d6e87af9a5ea16cc27fa5.py
blob:a/documents/scrapers/d72744fb1e5d6e87af9a5ea16cc27fa5.py -> blob:b/documents/scrapers/d72744fb1e5d6e87af9a5ea16cc27fa5.py
  import sys
  import os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from datetime import date
  from pyquery import PyQuery as pq
  from lxml import etree
  import urllib
  import dateutil
  from dateutil.parser import *
   
  class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
   
  def doScrape(self):
  foidocsdb = scrape.couch['disclosr-foidocuments']
  (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
  self.getURL(), "foidocuments", self.getAgencyID())
   
  d = pq(content.read())
  d.make_links_absolute(base_url = self.getURL())
  for item in d('.item-list').items():
  title= item('h3').text()
  print title
  links = item('a').map(lambda i, e: pq(e).attr('href'))
  description = title= item('ul').text()
  edate = date.today().strftime("%Y-%m-%d")
  print edate
  dochash = scrape.mkhash(self.remove_control_chars(title))
  doc = foidocsdb.get(dochash)
  if doc is None:
  print "saving " + dochash
  edate = date.today().strftime("%Y-%m-%d")
  doc = {'_id': dochash, 'agencyID': self.getAgencyID()
  , 'url': self.getURL(), 'docID': dochash,
  "links": links,
  "date": edate, "title": title, "description": description}
  #print doc
  foidocsdb.save(doc)
  else:
  print "already saved"
   
   
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ACMADisclogScraper,
  genericScrapers.GenericDisclogScraper)
  print 'Instance:', isinstance(ACMADisclogScraper(),
  genericScrapers.GenericDisclogScraper)
  ACMADisclogScraper().doScrape()