From: Maxious About
--- /dev/null
+++ b/documents/agency.php
@@ -1,1 +1,41 @@
+get_db('disclosr-agencies');
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+ $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+
+include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency'));
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
+?>
+
+get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
+ foreach ($rows as $row) {
+ //print_r($rows);
+ echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey))
+ $startkey = $row->key;
+ $endkey = $row->key;
+ }
+ } else {
+ $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
+ if ($rows) {
+ foreach ($rows as $row) {
+ echo '' . $idtoname[$row->key] . " (" . $row->value . " records)
\n";
+ }
+ }
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+echo "next page ";
+include_footer_documents();
+?>
--- a/documents/charts.php
+++ b/documents/charts.php
@@ -1,6 +1,6 @@
get_db('disclosr-agencies');
@@ -15,29 +15,28 @@
Charts
Lorem ipsum.
-
+
+
+
+get_db('disclosr-agencies');
+
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+ $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+try {
+ $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
+ if ($rows) {
+ foreach ($rows as $key => $row) {
+ echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey)) $startkey = $row->key;
+ $endkey = $row->key;
+ }
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+echo "next page ";
+*/
+include_footer_documents();
+?>
+
--- a/documents/disclogsList.php
+++ b/documents/disclogsList.php
@@ -1,7 +1,7 @@
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -13,6 +13,8 @@
from datetime import *
import codecs
+import difflib
+
from StringIO import StringIO
from pdfminer.pdfparser import PDFDocument, PDFParser
@@ -49,6 +51,31 @@
""" do the scraping """
return
+class GenericHTMLDisclogScraper(GenericDisclogScraper):
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb,
+ self.getURL(), "foidocuments", self.getAgencyID())
+ content = rcontent.read()
+ dochash = scrape.mkhash(content)
+ doc = foidocsdb.get(dochash)
+ if doc is None:
+ print "saving " + dochash
+ description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries"
+ last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL())
+ if last_attach != None:
+ html_diff = difflib.HtmlDiff()
+ description = description + "\nChanges: "
+ description = description + html_diff.make_table(last_attach.read().split('\n'),
+ content.split('\n'))
+ edate = date.today().strftime("%Y-%m-%d")
+ doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+ , 'url': self.getURL(), 'docID': dochash,
+ "date": edate, "title": "Disclosure Log Updated", "description": description}
+ foidocsdb.save(doc)
+ else:
+ print "already saved"
class GenericPDFDisclogScraper(GenericDisclogScraper):
@@ -196,10 +223,9 @@
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID())
if content is not None:
- if mime_type is "text/html"\
- or mime_type is "application/xhtml+xml"\
- or mime_type is"application/xml":
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
+ print "parsing"
soup = BeautifulSoup(content)
table = self.getTable(soup)
for row in self.getRows(table):
@@ -217,11 +243,11 @@
dochash = scrape.mkhash(
self.remove_control_chars(
url + (''.join(id.stripped_strings))))
- doc = foidocsdb.get(hash)
+ doc = foidocsdb.get(dochash)
if doc is None:
- print "saving " + hash
- doc = {'_id': hash,
+ print "saving " + dochash
+ doc = {'_id': dochash,
'agencyID': self.getAgencyID(),
'url': self.getURL(),
'docID': (''.join(id.stripped_strings))}
--- a/documents/index.php
+++ b/documents/index.php
@@ -3,10 +3,11 @@
include('template.inc.php');
include_header_documents("");
include_once('../include/common.inc.php');
-$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99');
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
?>
+
get_db('disclosr-agencies');
$idtoname = Array();
@@ -15,17 +16,18 @@
}
$foidocsdb = $server->get_db('disclosr-foidocuments');
try {
- $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows;
+ $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
if ($rows) {
foreach ($rows as $key => $row) {
echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey)) $startkey = $row->key;
$endkey = $row->key;
}
}
} catch (SetteeRestClientException $e) {
setteErrorHandler($e);
}
-echo "next page";
+echo "next page ";
include_footer_documents();
?>
--- a/documents/rss.xml.php
+++ b/documents/rss.xml.php
@@ -8,21 +8,28 @@
//Creating an instance of FeedWriter class.
$TestFeed = new RSS2FeedWriter();
//Setting the channel elements
-//Use wrapper functions for common channelelements
-$TestFeed->setTitle('disclosurelo.gs Newest Entries - All');
-$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php');
-$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies');
-$TestFeed->setChannelElement('language', 'en-us');
-$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
-
-//Retriving informations from database
+////Retriving informations from database
$idtoname = Array();
$agenciesdb = $server->get_db('disclosr-agencies');
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name);
}
$foidocsdb = $server->get_db('disclosr-foidocuments');
-$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
+if (isset($_REQUEST['id'])) {
+ $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
+ $title = $idtoname[$_REQUEST['id']];
+} else {
+ $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
+ $title = 'All Agencies';
+}
+//Use wrapper functions for common channelelements
+$TestFeed->setTitle('disclosurelo.gs Newest Entries - '.$title);
+$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'.(isset($_REQUEST['id'])? '?id='.$_REQUEST['id'] : ''));
+$TestFeed->setDescription('disclosurelo.gs Newest Entries - '.$title);
+$TestFeed->setChannelElement('language', 'en-us');
+$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
+
+
//print_r($rows);
foreach ($rows as $row) {
//Create an empty FeedItem
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -76,6 +76,16 @@
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
addinfourl.code = code
return addinfourl
+
+def getLastAttachment(docsdb,url):
+ hash = mkhash(url)
+ doc = docsdb.get(hash)
+ if doc != None:
+ last_attachment_fname = doc["_attachments"].keys()[-1]
+ last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+ return last_attachment
+ else:
+ return None
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
url = canonurl(url)
--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -12,8 +12,8 @@
if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation,
- genericScrapers.GenericOAICDisclogScraper)
+ genericScrapers.GenericPDFDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(),
- genericScrapers.GenericOAICDisclogScraper)
+ genericScrapers.GenericPDFDisclogScraper)
ScraperImplementation().doScrape()
--- /dev/null
+++ b/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- /dev/null
+++ b/documents/scrapers/31685505438d393f45a90f442b8fa27f.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericPDFDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericPDFDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/31685505438d393f45a90f442b8fa27f.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-pdf
--- /dev/null
+++ b/documents/scrapers/3e2f110af49d62833a835bd257771ffb.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/3e2f110af49d62833a835bd257771ffb.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
--- /dev/null
+++ b/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
--- a/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt
+++ b/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt
@@ -1,1 +1,1 @@
-apsc has ACMA style disclog
+ACMA style
--- /dev/null
+++ b/documents/scrapers/525c3953187da08cd702359b2fc2997f.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/525c3953187da08cd702359b2fc2997f.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
--- /dev/null
+++ b/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+
--- /dev/null
+++ b/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
--- /dev/null
+++ b/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
--- /dev/null
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -1,1 +1,51 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from datetime import date
+from pyquery import PyQuery as pq
+from lxml import etree
+import urllib
+import dateutil
+from dateutil.parser import *
+class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+ self.getURL(), "foidocuments", self.getAgencyID())
+
+ d = pq(content.read())
+ d.make_links_absolute(base_url = self.getURL())
+ for table in d('table').items():
+ title= table('thead').text()
+ print title
+ (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
+ links = table('a').map(lambda i, e: pq(e).attr('href'))
+ description = descA+" "+descB
+ edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ print edate
+ dochash = scrape.mkhash(self.remove_control_chars(title))
+ doc = foidocsdb.get(dochash)
+ if doc is None:
+ print "saving " + dochash
+ edate = date.today().strftime("%Y-%m-%d")
+ doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+ , 'url': self.getURL(), 'docID': dochash,
+ "links": links,
+ "date": edate, "notes": notes, "title": title, "description": description}
+ #print doc
+ foidocsdb.save(doc)
+ else:
+ print "already saved"
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ACMADisclogScraper,
+ genericScrapers.GenericDisclogScraper)
+ print 'Instance:', isinstance(ACMADisclogScraper(),
+ genericScrapers.GenericDisclogScraper)
+ ACMADisclogScraper().doScrape()
+
--- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-acma style
+
--- /dev/null
+++ b/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+
--- /dev/null
+++ b/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+
--- /dev/null
+++ b/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+
--- a/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt
+++ b/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt
@@ -1,1 +1,1 @@
-uses RET disclog
+parent
--- /dev/null
+++ b/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+
--- /dev/null
+++ b/documents/scrapers/d72744fb1e5d6e87af9a5ea16cc27fa5.py
@@ -1,1 +1,50 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from datetime import date
+from pyquery import PyQuery as pq
+from lxml import etree
+import urllib
+import dateutil
+from dateutil.parser import *
+class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+ self.getURL(), "foidocuments", self.getAgencyID())
+
+ d = pq(content.read())
+ d.make_links_absolute(base_url = self.getURL())
+ for item in d('.item-list').items():
+ title= item('h3').text()
+ print title
+ links = item('a').map(lambda i, e: pq(e).attr('href'))
+ description = title= item('ul').text()
+ edate = date.today().strftime("%Y-%m-%d")
+ print edate
+ dochash = scrape.mkhash(self.remove_control_chars(title))
+ doc = foidocsdb.get(dochash)
+ if doc is None:
+ print "saving " + dochash
+ edate = date.today().strftime("%Y-%m-%d")
+ doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+ , 'url': self.getURL(), 'docID': dochash,
+ "links": links,
+ "date": edate, "title": title, "description": description}
+ #print doc
+ foidocsdb.save(doc)
+ else:
+ print "already saved"
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ACMADisclogScraper,
+ genericScrapers.GenericDisclogScraper)
+ print 'Instance:', isinstance(ACMADisclogScraper(),
+ genericScrapers.GenericDisclogScraper)
+ ACMADisclogScraper().doScrape()
+
--- /dev/null
+++ b/documents/scrapers/e770921522a49dc77de208cc724ce134.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/e770921522a49dc77de208cc724ce134.txt
+++ /dev/null
@@ -1,1 +1,1 @@
-no disclog
+
--- /dev/null
+++ b/documents/scrapers/f189459fc43f941e0d4ecfba52c666f3.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericHTMLDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/f189459fc43f941e0d4ecfba52c666f3.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-no disclog
--- a/documents/sitemap.xml.php
+++ b/documents/sitemap.xml.php
@@ -10,10 +10,18 @@
if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php")
echo "