more scrapers
more scrapers


Former-commit-id: 012f6a5c1fa63f9ddaeb47cb13297668fa35b23e

<?php <?php
   
include('template.inc.php'); include('template.inc.php');
include_header_documents(""); include_header_documents("");
include_once('../include/common.inc.php'); include_once('../include/common.inc.php');
   
echo "<table> echo "<table>
<tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>"; <tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>";
$agenciesdb = $server->get_db('disclosr-agencies'); $agenciesdb = $server->get_db('disclosr-agencies');
$docsdb = $server->get_db('disclosr-documents'); $docsdb = $server->get_db('disclosr-documents');
$agencies = 0; $agencies = 0;
$disclogs = 0; $disclogs = 0;
$red = 0; $red = 0;
$green = 0; $green = 0;
$orange = 0; $orange = 0;
try { try {
$rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows; $rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows;
   
   
if ($rows) { if ($rows) {
foreach ($rows as $row) { foreach ($rows as $row) {
  if (!isset($row->value->status) || $row->value->status != "suspended") {
echo "<tr><td><b>" . $row->value->name . "</b>"; echo "<tr><td><a href='" . $row->value->website ."'><b>". $row->value->name . "</b></a>";
if ($ENV == "DEV") if ($ENV == "DEV")
echo "<br>(" . $row->id . ")"; echo "<br>(" . $row->id . ")";
echo "</td>\n"; echo "</td>\n";
$agencies++; $agencies++;
   
echo "<td>"; echo "<td>";
if (isset($row->value->FOIDocumentsURL)) { if (isset($row->value->FOIDocumentsURL)) {
$disclogs++; $disclogs++;
echo '<a href="' . $row->value->FOIDocumentsURL . '">' echo '<a href="' . $row->value->FOIDocumentsURL . '">'
. $row->value->FOIDocumentsURL . '</a>'; . $row->value->FOIDocumentsURL . '</a>';
if ($ENV == "DEV") if ($ENV == "DEV")
echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">' echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">'
. 'view local copy</a>)</small>'; . 'view local copy</a>)</small>';
} else { } else {
echo "<font color='red'><abbr title='No'>✘</abbr></font>"; echo "<font color='red'><abbr title='No'>✘</abbr></font>";
} }
echo "</td>\n<td>"; echo "</td>\n<td>";
if (isset($row->value->FOIDocumentsURL)) { if (isset($row->value->FOIDocumentsURL)) {
if (file_exists("./scrapers/" . $row->id . '.py')) { if (file_exists("./scrapers/" . $row->id . '.py')) {
echo "<font color='green'><abbr title='Yes'>✔</abbr></font>"; echo "<font color='green'><abbr title='Yes'>✔</abbr></font>";
$green++; $green++;
} else if (file_exists("./scrapers/" . $row->id . '.txt')) { } else if (file_exists("./scrapers/" . $row->id . '.txt')) {
echo "<font color='orange'><abbr title='Work in progress'><b>▬</b></abbr></font>"; echo "<font color='orange'><abbr title='Work in progress'><b>▬</b></abbr></font>";
$orange++; $orange++;
} else { } else {
echo "<font color='red'><abbr title='No'>✘</abbr></font>"; echo "<font color='red'><abbr title='No'>✘</abbr></font>";
$red++; $red++;
} }
} }
echo "</td></tr>\n"; echo "</td></tr>\n";
} }
} }
  }
} catch (SetteeRestClientException $e) { } catch (SetteeRestClientException $e) {
setteErrorHandler($e); setteErrorHandler($e);
} }
echo "</table>"; echo "</table>";
echo $agencies." agencies, ".round(($disclogs/$agencies)*100)."% with disclosure logs; " echo $agencies." agencies, ".round(($disclogs/$agencies)*100)."% with disclosure logs; "
.round(($green/$disclogs)*100)."% logs with scrapers ".round(($red/$disclogs)*100)."% logs without scrapers ".round(($orange/$disclogs)*100)."% logs Work-In-Progress scrapers "; .round(($green/$disclogs)*100)."% logs with scrapers ".round(($red/$disclogs)*100)."% logs without scrapers ".round(($orange/$disclogs)*100)."% logs Work-In-Progress scrapers ";
   
include_footer_documents(); include_footer_documents();
?> ?>
   
import sys,os import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape import scrape
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from time import mktime from time import mktime
import feedparser import feedparser
import abc import abc
import unicodedata, re import unicodedata, re
import dateutil import dateutil
from dateutil.parser import * from dateutil.parser import *
from datetime import * from datetime import *
   
class GenericDisclogScraper(object): class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
agencyID = None agencyID = None
disclogURL = None disclogURL = None
def remove_control_chars(self, input): def remove_control_chars(self, input):
return "".join([i for i in input if ord(i) in range(32, 127)]) return "".join([i for i in input if ord(i) in range(32, 127)])
def getAgencyID(self): def getAgencyID(self):
""" disclosr agency id """ """ disclosr agency id """
if self.agencyID == None: if self.agencyID == None:
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
return self.agencyID return self.agencyID
   
def getURL(self): def getURL(self):
""" disclog URL""" """ disclog URL"""
if self.disclogURL == None: if self.disclogURL == None:
agency = scrape.agencydb.get(self.getAgencyID()) agency = scrape.agencydb.get(self.getAgencyID())
self.disclogURL = agency['FOIDocumentsURL'] self.disclogURL = agency['FOIDocumentsURL']
return self.disclogURL return self.disclogURL
   
@abc.abstractmethod @abc.abstractmethod
def doScrape(self): def doScrape(self):
""" do the scraping """ """ do the scraping """
return return
   
@abc.abstractmethod @abc.abstractmethod
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description""" """ get description"""
return return
   
   
   
class GenericRSSDisclogScraper(GenericDisclogScraper): class GenericRSSDisclogScraper(GenericDisclogScraper):
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
feed = feedparser.parse(content) feed = feedparser.parse(content)
for entry in feed.entries: for entry in feed.entries:
#print entry #print entry
print entry.id print entry.id
hash = scrape.mkhash(entry.id) hash = scrape.mkhash(entry.id)
#print hash #print hash
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
#print doc #print doc
if doc == None: if doc == None:
print "saving "+ hash print "saving "+ hash
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
"date": edate,"title": entry.title} "date": edate,"title": entry.title}
self.getDescription(entry,entry, doc) self.getDescription(entry,entry, doc)
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved" print "already saved"
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
doc.update({'description': content.summary}) doc.update({'description': content.summary})
return return
   
class GenericOAICDisclogScraper(GenericDisclogScraper): class GenericOAICDisclogScraper(GenericDisclogScraper):
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
@abc.abstractmethod @abc.abstractmethod
def getColumns(self,columns): def getColumns(self,columns):
""" rearranges columns if required """ """ rearranges columns if required """
return return
def getColumnCount(self): def getColumnCount(self):
return 5 return 5
def getDescription(self, content, entry, doc): def getDescription(self, content, entry, doc):
""" get description from rss entry""" """ get description from rss entry"""
descriptiontxt = "" descriptiontxt = ""
for string in content.stripped_strings: for string in content.stripped_strings:
descriptiontxt = descriptiontxt + " \n" + string descriptiontxt = descriptiontxt + " \n" + string
doc.update({'description': descriptiontxt}) doc.update({'description': descriptiontxt})
return return
def getTitle(self, content, entry, doc): def getTitle(self, content, entry, doc):
doc.update({'title': (''.join(content.stripped_strings))}) doc.update({'title': (''.join(content.stripped_strings))})
return return
def getTable(self, soup): def getTable(self, soup):
return soup.table return soup.table
def getRows(self, table): def getRows(self, table):
return table.find_all('tr') return table.find_all('tr')
def getDate(self, content, entry, doc): def getDate(self, content, entry, doc):
date = ''.join(content.stripped_strings).strip() date = ''.join(content.stripped_strings).strip()
date = date.replace("Octber","October") (a,b,c) = date.partition("(")
  date = a.replace("Octber","October")
print date print date
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
print edate print edate
doc.update({'date': edate}) doc.update({'date': edate})
return return
def getLinks(self, content, entry, doc): def getLinks(self, content, entry, doc):
links = [] links = []
for atag in entry.find_all("a"): for atag in entry.find_all("a"):
if atag.has_key('href'): if atag.has_key('href'):
links.append(scrape.fullurl(content,atag['href'])) links.append(scrape.fullurl(content,atag['href']))
if links != []: if links != []:
doc.update({'links': links}) doc.update({'links': links})
return return
   
def doScrape(self): def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments'] foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None: if content != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html # http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content) soup = BeautifulSoup(content)
table = self.getTable(soup) table = self.getTable(soup)
for row in self.getRows(table): for row in self.getRows(table):
columns = row.find_all('td') columns = row.find_all('td')
if len(columns) == self.getColumnCount(): if len(columns) == self.getColumnCount():
(id, date, title, description, notes) = self.getColumns(columns) (id, date, title, description, notes) = self.getColumns(columns)
print ''.join(id.stripped_strings) print ''.join(id.stripped_strings)
if id.string == None: if id.string == None:
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings))))
else: else:
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings))))
doc = foidocsdb.get(hash) doc = foidocsdb.get(hash)
if doc == None: if doc == None:
print "saving " +hash print "saving " +hash
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))}
self.getLinks(self.getURL(),row,doc) self.getLinks(self.getURL(),row,doc)
self.getTitle(title,row, doc) self.getTitle(title,row, doc)
self.getDate(date,row, doc) self.getDate(date,row, doc)
self.getDescription(description,row, doc) self.getDescription(description,row, doc)
if notes != None: if notes != None:
doc.update({ 'notes': (''.join(notes.stripped_strings))}) doc.update({ 'notes': (''.join(notes.stripped_strings))})
foidocsdb.save(doc) foidocsdb.save(doc)
else: else:
print "already saved "+hash print "already saved "+hash
elif len(row.find_all('th')) == self.getColumnCount(): elif len(row.find_all('th')) == self.getColumnCount():
print "header row" print "header row"
else: else:
print "ERROR number of columns incorrect" print "ERROR number of columns incorrect"
print row print row
   
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (id, title, date) = columns
  return (id, date, title, title, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date, title, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id="node-30609")
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (id, date, description) = columns
  return (id, date, description, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "centercontent").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (date, title, description) = columns
  return (date, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getTitle(self, content, entry, doc):
  doc.update({'title': content.stripped_strings.next()})
  return
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (date, id, description) = columns
  return (id, date, description, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date, title, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date,logdate, description) = columns
  return (id, date, description, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 3
  def getColumns(self,columns):
  (date, title, description) = columns
  return (date, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id="ctl00_ContentPlaceHolderMainNoAjax_EdtrTD1494_2").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (blank,id, title,date) = columns
  return (id, date, title, title, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, date, title, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id = "ctl00_PlaceHolderMain_ctl01__ControlWrapper_RichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(summary="This table lists the schedule of upcoming courses.")
  def getColumnCount(self):
  return 7
  def getColumns(self,columns):
  (id, date, title, description,link,deldate,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  def getTable(self,soup):
  return soup.find(id="main").table
  def getColumnCount(self):
  return 7
  def getColumns(self,columns):
  (id, date, title, description,link,deldate,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 5
  def getColumns(self,columns):
  (id, date, title, description,notes) = columns
  return (id, date, title, description, notes)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()
 
  c'est ne pas une table
 
  import sys,os
  sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
  import genericScrapers
  import scrape
  from bs4 import BeautifulSoup
 
  #http://www.doughellmann.com/PyMOTW/abc/
  class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
  #def getTable(self,soup):
  # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table
  def getColumnCount(self):
  return 4
  def getColumns(self,columns):
  (id, title, date, description) = columns
  return (id, date, title, description, None)
 
  if __name__ == '__main__':
  print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
  print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
  ScraperImplementation().doScrape()