1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import genericScrapers import scrape from datetime import date from pyquery import PyQuery as pq from lxml import etree import urllib import dateutil from dateutil.parser import * class ACMADisclogScraper(genericScrapers.GenericDisclogScraper): def doScrape(self): foidocsdb = scrape.couch['disclosr-foidocuments'] (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) d = pq(content.read()) d.make_links_absolute(base_url = self.getURL()) for table in d('table').items(): title= table('thead').text() print title (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text()) links = table('a').map(lambda i, e: pq(e).attr('href')) description = descA+" "+descB edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") print edate dochash = scrape.mkhash(self.remove_control_chars(title)) doc = foidocsdb.get(dochash) if doc is None: print "saving " + dochash edate = date.today().strftime("%Y-%m-%d") doc = {'_id': dochash, 'agencyID': self.getAgencyID() , 'url': self.getURL(), 'docID': dochash, "links": links, "date": edate, "notes": notes, "title": title, "description": description} #print doc foidocsdb.save(doc) else: print "already saved" if __name__ == '__main__': print 'Subclass:', issubclass(ACMADisclogScraper, genericScrapers.GenericDisclogScraper) print 'Instance:', isinstance(ACMADisclogScraper(), genericScrapers.GenericDisclogScraper) ACMADisclogScraper().doScrape() |