From: Maxious Date: Sun, 09 Dec 2012 23:07:04 +0000 Subject: add innovation scrAper X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=d293099fa3381c9438e54748102c6feb0ad8a90c --- add innovation scrAper Former-commit-id: 80558a9217d1bcad0766200d0e1d42aa022ff501 --- --- a/couchdb/settee/src/classes/SetteeDatabase.class.php +++ b/couchdb/settee/src/classes/SetteeDatabase.class.php @@ -251,7 +251,7 @@ * * @return void */ - function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false) { + function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce=null) { $id = "_design/" . urlencode($design_doc); $view_name = urlencode($view_name); $id .= "/_view/$view_name"; @@ -269,6 +269,13 @@ if ($descending) { $data .= "&descending=true"; } + if ($reduce != null) { + if ($reduce == true) { + $data .= "&reduce=true"; + } else { + $data .= "&reduce=false"; + } + } if ($limit) { $data .= "&limit=".$limit; } @@ -281,9 +288,11 @@ } $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); + $full_uri = str_replace("%253Fgroup%253D","?group=",$full_uri); $full_uri = str_replace("%253Flimit%253D","?limit=",$full_uri); $ret = $this->rest_client->http_get($full_uri, $data); + //$ret['decoded'] = str_replace("?k","&k",$ret['decoded']); return $ret['decoded']; } --- /dev/null +++ b/documents/agency.php @@ -1,1 +1,41 @@ +get_db('disclosr-agencies'); +$idtoname = Array(); +foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { + $idtoname[$row->id] = trim($row->value->name); +} +$foidocsdb = $server->get_db('disclosr-foidocuments'); + +include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency')); +$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); +?> +
Read all the information released by Australian Federal Government agencies under the FOI Act in one place!
+RSS Icon All Agencies RSS Feed
+get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; + foreach ($rows as $row) { + //print_r($rows); + echo displayLogEntry($row, $idtoname); + if (!isset($startkey)) + $startkey = $row->key; + $endkey = $row->key; + } + } else { + $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows; + if ($rows) { + foreach ($rows as $row) { + echo '' . $idtoname[$row->key] . " (" . $row->value . " records)
\n"; + } + } + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +echo "next page "; +include_footer_documents(); +?> --- a/documents/charts.php +++ b/documents/charts.php @@ -27,7 +27,7 @@ get_view("app", "byDateMonthYear?group=true")->rows; + $rows = $foidocsdb->get_view("app", "byDateMonthYear?group=true",null, false,false,true)->rows; $dataValues = Array(); @@ -110,7 +110,7 @@ } get_view("app", "byAgencyID?group=true")->rows; + $rows = $foidocsdb->get_view("app", "byAgencyID?group=true",null, false,false,true)->rows; $dataValues = Array(); --- /dev/null +++ b/documents/date.php @@ -1,1 +1,34 @@ + +
Read all the information released by Australian Federal Government agencies under the FOI Act in one place!
+RSS Icon All Agencies RSS Feed
+get_db('disclosr-agencies'); + +$idtoname = Array(); +foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { + $idtoname[$row->id] = trim($row->value->name); +} +$foidocsdb = $server->get_db('disclosr-foidocuments'); +try { + $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows; + if ($rows) { + foreach ($rows as $key => $row) { + echo displayLogEntry($row, $idtoname); + if (!isset($startkey)) $startkey = $row->key; + $endkey = $row->key; + } + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +echo "next page "; +*/ +include_footer_documents(); +?> + --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -13,6 +13,8 @@ from datetime import * import codecs +import difflib + from StringIO import StringIO from pdfminer.pdfparser import PDFDocument, PDFParser @@ -49,6 +51,31 @@ """ do the scraping """ return +class GenericHTMLDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + content = rcontent.read() + dochash = scrape.mkhash(content) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" + last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) + if last_attach != None: + html_diff = difflib.HtmlDiff() + description = description + "\nChanges: " + description = description + html_diff.make_table(last_attach.read().split('\n'), + content.split('\n')) + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "date": edate, "title": "Disclosure Log Updated", "description": description} + foidocsdb.save(doc) + else: + print "already saved" class GenericPDFDisclogScraper(GenericDisclogScraper): --- a/documents/rss.xml.php +++ b/documents/rss.xml.php @@ -8,21 +8,28 @@ //Creating an instance of FeedWriter class. $TestFeed = new RSS2FeedWriter(); //Setting the channel elements -//Use wrapper functions for common channelelements -$TestFeed->setTitle('disclosurelo.gs Newest Entries - All'); -$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'); -$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies'); -$TestFeed->setChannelElement('language', 'en-us'); -$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); - -//Retriving informations from database +////Retriving informations from database $idtoname = Array(); $agenciesdb = $server->get_db('disclosr-agencies'); foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { $idtoname[$row->id] = trim($row->value->name); } $foidocsdb = $server->get_db('disclosr-foidocuments'); -$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows; +if (isset($_REQUEST['id'])) { + $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; + $title = $idtoname[$_REQUEST['id']]; +} else { + $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows; + $title = 'All Agencies'; +} +//Use wrapper functions for common channelelements +$TestFeed->setTitle('disclosurelo.gs Newest Entries - '.$title); +$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'.(isset($_REQUEST['id'])? '?id='.$_REQUEST['id'] : '')); +$TestFeed->setDescription('disclosurelo.gs Newest Entries - '.$title); +$TestFeed->setChannelElement('language', 'en-us'); +$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); + + //print_r($rows); foreach ($rows as $row) { //Create an empty FeedItem --- a/documents/scrape.py +++ b/documents/scrape.py @@ -76,6 +76,16 @@ addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) addinfourl.code = code return addinfourl + +def getLastAttachment(docsdb,url): + hash = mkhash(url) + doc = docsdb.get(hash) + if doc != None: + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + return last_attachment + else: + return None def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): url = canonurl(url) --- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py +++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py @@ -12,8 +12,8 @@ if __name__ == '__main__': print 'Subclass:', issubclass(ScraperImplementation, - genericScrapers.GenericOAICDisclogScraper) + genericScrapers.GenericPDFDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), - genericScrapers.GenericOAICDisclogScraper) + genericScrapers.GenericPDFDisclogScraper) ScraperImplementation().doScrape() --- /dev/null +++ b/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- /dev/null +++ b/documents/scrapers/31685505438d393f45a90f442b8fa27f.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericPDFDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericPDFDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/31685505438d393f45a90f442b8fa27f.txt +++ /dev/null @@ -1,2 +1,1 @@ -pdf --- /dev/null +++ b/documents/scrapers/3e2f110af49d62833a835bd257771ffb.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/3e2f110af49d62833a835bd257771ffb.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/41a166419503bb50e410c58be54c102f.py @@ -1,1 +1,27 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import scrape +from bs4 import BeautifulSoup +from datetime import date +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getTable(self,soup): + return soup.find(id= "ctl00_MSO_ContentDiv").table + + def getColumns(self,columns): + (id, title, description, notes) = columns + return (id, title, title, description, notes) + def getDate(self, content, entry, doc): + edate = date.today().strftime("%Y-%m-%d") + doc.update({'date': edate}) + return + def getColumnCount(self): + return 4 + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/41a166419503bb50e410c58be54c102f.txt +++ /dev/null @@ -1,1 +1,1 @@ -aspx + --- /dev/null +++ b/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- a/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt +++ b/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt @@ -1,1 +1,1 @@ -apsc has ACMA style disclog +ACMA style --- /dev/null +++ b/documents/scrapers/525c3953187da08cd702359b2fc2997f.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/525c3953187da08cd702359b2fc2997f.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py @@ -1,1 +1,51 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import scrape +from datetime import date +from pyquery import PyQuery as pq +from lxml import etree +import urllib +import dateutil +from dateutil.parser import * +class ACMADisclogScraper(genericScrapers.GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + + d = pq(content.read()) + d.make_links_absolute(base_url = self.getURL()) + for table in d('table').items(): + title= table('thead').text() + print title + (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text()) + links = table('a').map(lambda i, e: pq(e).attr('href')) + description = descA+" "+descB + edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + print edate + dochash = scrape.mkhash(self.remove_control_chars(title)) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "links": links, + "date": edate, "notes": notes, "title": title, "description": description} + #print doc + foidocsdb.save(doc) + else: + print "already saved" + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ACMADisclogScraper, + genericScrapers.GenericDisclogScraper) + print 'Instance:', isinstance(ACMADisclogScraper(), + genericScrapers.GenericDisclogScraper) + ACMADisclogScraper().doScrape() + --- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.txt +++ /dev/null @@ -1,1 +1,1 @@ -acma style + --- /dev/null +++ b/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- a/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt +++ b/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt @@ -1,1 +1,1 @@ -uses RET disclog +parent --- /dev/null +++ b/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/d72744fb1e5d6e87af9a5ea16cc27fa5.py @@ -1,1 +1,49 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import scrape +from datetime import date +from pyquery import PyQuery as pq +from lxml import etree +import urllib +import dateutil +from dateutil.parser import * +class ACMADisclogScraper(genericScrapers.GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + + d = pq(content.read()) + d.make_links_absolute(base_url = self.getURL()) + for item in d('.item-list').items(): + title= item('h3').text() + print title + links = item('a').map(lambda i, e: pq(e).attr('href')) + description = title= item('ul').text() + edate = date.today().strftime("%Y-%m-%d") + print edate + dochash = scrape.mkhash(self.remove_control_chars(title)) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "links": links, + "date": edate, "title": title, "description": description} + #print doc + foidocsdb.save(doc) + else: + print "already saved" + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ACMADisclogScraper, + genericScrapers.GenericDisclogScraper) + print 'Instance:', isinstance(ACMADisclogScraper(), + genericScrapers.GenericDisclogScraper) + ACMADisclogScraper().doScrape() + --- a/documents/scrapers/d72744fb1e5d6e87af9a5ea16cc27fa5.txt +++ /dev/null @@ -1,1 +1,1 @@ -acma style + --- /dev/null +++ b/documents/scrapers/e770921522a49dc77de208cc724ce134.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/e770921522a49dc77de208cc724ce134.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/f189459fc43f941e0d4ecfba52c666f3.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/f189459fc43f941e0d4ecfba52c666f3.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- a/documents/sitemap.xml.php +++ b/documents/sitemap.xml.php @@ -10,10 +10,18 @@ if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php") echo " " . local_url() . "$file0.6\n"; } - -$db = $server->get_db('disclosr-foidocuments'); +$agenciesdb = $server->get_db('disclosr-agencies'); try { - $rows = $db->get_view("app", "all")->rows; + $rows = $agenciesdb->get_view("app", "byCanonicalName")->rows; + foreach ($rows as $row) { + echo '' . local_url() . 'agency.php?id=' . $row->value->_id . "0.3\n"; + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +$foidocsdb = $server->get_db('disclosr-foidocuments'); +try { + $rows = $foidocsdb->get_view("app", "all")->rows; foreach ($rows as $row) { echo '' . local_url() . 'view.php?id=' . $row->value->_id . "0.3\n"; } --- a/documents/template.inc.php +++ b/documents/template.inc.php @@ -83,6 +83,7 @@ } function include_footer_documents() { + global $ENV; ?>
@@ -168,7 +169,7 @@ if (isset($row->value->links)) { $result .= '

Links/Documents

"; --- a/documents/view.php +++ b/documents/view.php @@ -1,6 +1,6 @@ value = $foidocsdb->get($_REQUEST['id']); include_header_documents($obj->value->title); -include_once('../include/common.inc.php'); + echo displayLogEntry($obj,$idtoname); } catch (SetteeRestClientException $e) {