From: Alex Sadleir Date: Sun, 17 Nov 2013 10:11:20 +0000 Subject: FOI stats importer fixed X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=64737d78435cf1f0d14da3f57e9a3dcbe420e0eb --- FOI stats importer fixed Former-commit-id: 81a6a149848e27565b7a7052d2a7ff4e5aaa9310 --- --- a/admin/importOAICFOIrequests.php +++ b/admin/importOAICFOIrequests.php @@ -17,13 +17,13 @@ if ($row >= 1) { // print_r($data); $name = trim($data[2]); - echo "$name
"; +// echo "$name
"; if ($data[0] != "TOTALS" && $data[0] != "") { if (isset($nametoid[$name])) { $id = $nametoid[$name]; $timePeriod = $data[0] . "-Q" . $data[1]; - echo "$timePeriod
"; +// echo "$timePeriod
"; unset($data[0]); unset($data[1]); unset($data[2]); @@ -38,10 +38,13 @@ $result = Array("source" => "http://data.gov.au/dataset/freedom-of-information-quarterly-request-and-review-statistical-data-2011-12/"); foreach ($data as $key => $datum) { if ($datum != 0) { +// tODO prefix header with "FOI" +if (isset($stats[$id][$timePeriod][$key])) $datum += $stats[$id][$timePeriod][$key]; $result[trim($headers[$key])] = $datum; } } $stats[$id][$timePeriod] = $result; +// TODO merge if already exists //print_r($stats); } else { echo "
ERROR NAME MISSING FROM ID LIST

$row" . PHP_EOL; @@ -57,21 +60,24 @@ } fclose($handle); } +echo "all stats loaded successfuly"; foreach ($stats as $id => $stat) { echo $id . "
" . PHP_EOL; - $doc = $db->get($id); + $doc = $db->get($id); echo $doc->name . "
" . PHP_EOL; - print_r($stat); - die(); +// print_r($stat); // print_r($doc); $changed = false; if (!isset($doc->statistics)) { $changed = true; $doc->statistics = Array(); + } else { + $doc->statistics = object_to_array($doc->statistics); } foreach ($stat as $timePeriod => $value) { - if (!isset($doc->statistics->foiRequests->$timePeriod) - || $doc->statistics->foiRequests->$timePeriod != $value) { + if (!isset($doc->statistics["foiRequests"][$timePeriod]) + || $doc->statistics["foiRequests"][$timePeriod] != $value + ) { $changed = true; $doc->statistics["foiRequests"][$timePeriod] = $value; } @@ -81,6 +87,7 @@ } else { echo "not changed" . "
" . PHP_EOL; } +//print_r($doc);die(); } ?> --- /dev/null +++ b/admin/massdelete.sh @@ -1,1 +1,10 @@ +for line in `curl "http://localhost:5984/disclosr-foidocuments/_design/app/_view/byAgencyID?reduce=false&keys=%5B\"5716ce0aacfe98f7d638b7a66b7f1040\"%5D&limit=600" | xargs -L1`; do +# echo $line + id=`echo $line | grep -Po '_id:.*?[^\\\],' | perl -pe 's/_id://; s/^//; s/,$//'` + rev=`echo $line | grep -Po 'rev:.*?[^\\\],'| perl -pe 's/rev://; s/^//; s/,$//'` + if [ -n "$id" ]; then + echo "curl -X DELETE http://localhost:5984/disclosr-foidocuments/$id?rev=$rev" + curl -X DELETE http://localhost:5984/disclosr-foidocuments/$id?rev=$rev + fi +done; --- a/admin/refreshDesignDoc.php +++ b/admin/refreshDesignDoc.php @@ -112,15 +112,25 @@ } }"; $obj->views->getStatistics->map = -"function(doc) { - if (doc.statistics) { - for (var statisticSet in doc.statistics) { -for (var statisticPeriod in doc.statistics[statisticSet]) { - emit([statisticSet,statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']); +" +function (doc) { + if (doc.statistics) { + for (var statisticSet in doc.statistics) { + for (var statisticPeriod in doc.statistics[statisticSet]) { + if (doc.statistics[statisticSet][statisticPeriod]['value']) { + emit([statisticSet, statisticPeriod], doc.statistics[statisticSet][statisticPeriod]['value']); + } else { + for (var statisticSubSet in doc.statistics[statisticSet][statisticPeriod]) { + if (statisticSubSet != 'source' && statisticSubSet != 'value') { + emit([statisticSubSet, statisticPeriod], doc.statistics[statisticSet][statisticPeriod][statisticSubSet]); + } + } + } + } + } + } } -} - } -}"; +"; $obj->views->getStatistics->reduce = '_sum'; // http://stackoverflow.com/questions/646628/javascript-startswith $obj->views->score->map = 'if(!String.prototype.startsWith){ --- a/documents/about.php +++ b/documents/about.php @@ -5,6 +5,7 @@ include_once('../include/common.inc.php'); ?>

About

+Written and managed by Alex Sadleir (maxious [at] lambdacomplex.org) --- a/documents/agency.php +++ b/documents/agency.php @@ -31,6 +31,12 @@ } else { $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows; if ($rows) { +function cmp($a, $b) +{ + global $idtoname; + return strcmp($idtoname[$a->key], $idtoname[$b->key]); +} +usort($rows, "cmp"); foreach ($rows as $row) { echo '' . $idtoname[$row->key] . " (" . $row->value . " records)
\n"; } --- a/documents/charts.php +++ b/documents/charts.php @@ -5,11 +5,20 @@ $agenciesdb = $server->get_db('disclosr-agencies'); $idtoname = Array(); +$idtofoirequestssuccessful = Array(); foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { $idtoname[$row->id] = trim($row->value->name); + $foirequestssuccessful = 0; +if(isset($row->value->statistics->foiRequests)) { + foreach ($row->value->statistics->foiRequests as $statperiod) { + $statperiod=object_to_array($statperiod); + if (isset($statperiod["Requests for other information granted in full"])) $foirequestssuccessful += $statperiod["Requests for other information granted in full"]; + if (isset($statperiod["Requests for other information granted in part"])) $foirequestssuccessful += $statperiod["Requests for other information granted in part"]; + } +} + $idtofoirequestssuccessful[$row->id] =$foirequestssuccessful; } $foidocsdb = $server->get_db('disclosr-foidocuments'); - ?>

Charts

@@ -28,7 +37,6 @@ get_view("app", "byDateMonthYear?group=true",null, false,false,true)->rows; - $dataValues = Array(); foreach ($rows as $row) { @@ -95,6 +103,7 @@ }; var d2 = []; + var d3 = []; var agencylabels = []; function agencytrackformatter(obj) { @@ -112,12 +121,17 @@ get_view("app", "byAgencyID?group=true",null, false,false,true)->rows; - +function cmp($a, $b) +{ + return $a->value > $b->value; +} +usort($rows, "cmp"); $dataValues = Array(); $i = 0; foreach ($rows as $row) { echo " d2.push([ $row->value,$i]);" . PHP_EOL; + echo " d3.push([ ".$idtofoirequestssuccessful[$row->key].",$i]);" . PHP_EOL; echo " agencylabels.push(['".str_replace("'","",$idtoname[$row->key])."']);" . PHP_EOL; $i++; @@ -154,7 +168,7 @@ autoscaleMargin: 1 }, legend: { - show: false + show: true } } ); --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -198,6 +198,19 @@ def getRows(self, table): return table.find_all('tr') + def findColumns(self, row): + return row.find_all('td') + + def getDocHash(self, id,date, url): + if id.string is None: + print "no id, using date as hash" + return scrape.mkhash( + self.remove_control_chars( + url + (''.join(date.stripped_strings)))) + else: + return scrape.mkhash( + self.remove_control_chars( + url + (''.join(id.stripped_strings)))) def getDate(self, content, entry, doc): strdate = ''.join(content.stripped_strings).strip() @@ -234,21 +247,13 @@ soup = BeautifulSoup(content) table = self.getTable(soup) for row in self.getRows(table): - columns = row.find_all('td') + columns = self.findColumns(row) if len(columns) is self.getColumnCount(): (id, date, title, description, notes) = self.getColumns(columns) print self.remove_control_chars( ''.join(id.stripped_strings)) - if id.string is None: - print "no id, using date as hash" - dochash = scrape.mkhash( - self.remove_control_chars( - url + (''.join(date.stripped_strings)))) - else: - dochash = scrape.mkhash( - self.remove_control_chars( - url + (''.join(id.stripped_strings)))) + dochash = self.getDocHash(id,date,url) doc = foidocsdb.get(dochash) if doc is None: --- a/documents/index.php +++ b/documents/index.php @@ -18,6 +18,7 @@ $idtoname[$row->id] = trim($row->value->name); } $foidocsdb = $server->get_db('disclosr-foidocuments'); +//print_r($foidocsdb); try { $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20, null, $enddocid)->rows; if ($rows) { --- a/documents/rss.xml.php +++ b/documents/rss.xml.php @@ -31,11 +31,12 @@ //print_r($rows); +$i =0; foreach ($rows as $row) { //Create an empty FeedItem $newItem = $TestFeed->createNewItem(); //Add elements to the feed item - $newItem->setTitle($row->value->title); + $newItem->setTitle(preg_replace('/[\x00-\x1F\x80-\xFF]/', '', $row->value->title)); $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id); $newItem->setDate(strtotime($row->value->date)); $newItem->setDescription(displayLogEntry($row, $idtoname)); @@ -43,6 +44,8 @@ $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true')); //Now add the feed item $TestFeed->addItem($newItem); +$i++; +if ($i > 50) break; } //OK. Everything is done. Now genarate the feed. $TestFeed->generateFeed(); --- a/documents/runScrapers.sh +++ b/documents/runScrapers.sh @@ -1,3 +1,4 @@ +#!/bin/bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" echo $DIR cd $DIR @@ -15,6 +16,7 @@ sleep 1; fi done +curl "localhost:5984/disclosr-foidocuments/_design/app/_view/byDate?startkey=\"9999-99-99\"&endkey=\"0000-00-00\"&descending=true&limit=20" if [ -s /tmp/disclosr-error ] ; then echo "emailling logs.."; mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ; --- a/documents/scrape.py +++ b/documents/scrape.py @@ -197,7 +197,7 @@ links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) linkurls = set([]) for link in links: - if link.has_key("href"): + if link.has_attr("href"): if link['href'].startswith("http"): # lets not do external links for now # linkurls.add(link['href']) --- a/documents/scrapers/0e46f8bd1414b1fdd4f0543d54a97500.py +++ b/documents/scrapers/0e46f8bd1414b1fdd4f0543d54a97500.py @@ -7,7 +7,7 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getTable(self,soup): - return soup.find(id = "maincontentcontainer").table + return soup.find(class_ = "contentcontainer").table def getColumnCount(self): return 5 def getColumns(self,columns): --- a/documents/scrapers/1803322b27286950cab0c543168b5f21.py +++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py @@ -16,7 +16,7 @@ links = [] description = "" for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(), atag['href']) (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -25,7 +25,7 @@ row = soup.find(id="content_div_148050") description = ''.join(row.stripped_strings) for atag in row.find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link, atag['href'])) if links != []: @@ -45,14 +45,5 @@ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) nsi = ScraperImplementation() - nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1" - nsi.doScrape() - nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2" - nsi.doScrape() - nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3" - nsi.doScrape() - nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4" - nsi.doScrape() - nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5" nsi.doScrape() --- a/documents/scrapers/1d404c4934f74feacd00dcb434e7c10a.py +++ b/documents/scrapers/1d404c4934f74feacd00dcb434e7c10a.py @@ -6,8 +6,8 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - #def getTable(self,soup): - # return soup.find(id = "cphMain_C001_Col01").table + def getTable(self,soup): + return soup.findAll('table')[1] def getColumnCount(self): return 5 def getColumns(self,columns): --- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py +++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py @@ -11,7 +11,7 @@ links = [] description = "" for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -26,20 +26,23 @@ for text in row.stripped_strings: description = description + text + "\n" for atag in row.find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: doc.update({'links': links}) if description != "": doc.update({ 'description': description}) - + def getRows(self, table): + return table.find_all(class_ = "dl-row"); + def findColumns(self, table): + return table.find_all('div'); def getColumnCount(self): return 2 def getTable(self,soup): - return soup.find(class_ = "ms-rteTable-default") + return soup.find(class_ = "foi-dl-list") def getColumns(self,columns): - (date, title) = columns + (title,date) = columns return (title, date, title, title, None) if __name__ == '__main__': --- a/documents/scrapers/41a166419503bb50e410c58be54c102f.py +++ b/documents/scrapers/41a166419503bb50e410c58be54c102f.py @@ -8,7 +8,7 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getTable(self,soup): - return soup.find(id= "ctl00_MSO_ContentDiv").table + return soup.find(class_ = "rgMasterTable") def getColumns(self,columns): (id, title, description, notes) = columns --- a/documents/scrapers/53d2884f8afd026096a27bd5051ec50e.py +++ b/documents/scrapers/53d2884f8afd026096a27bd5051ec50e.py @@ -16,7 +16,7 @@ link = None links = [] for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -24,7 +24,7 @@ # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(htcontent) for atag in soup.find(class_ = "article-content").find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: --- a/documents/scrapers/5716ce0aacfe98f7d638b7a66b7f1040.py +++ b/documents/scrapers/5716ce0aacfe98f7d638b7a66b7f1040.py @@ -6,6 +6,11 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getDocHash(self, id,date, url): + ''' url changes on ever request so ignore for hash ''' + return scrape.mkhash( + self.remove_control_chars( + ''.join(id.stripped_strings))) def getColumnCount(self): return 4 def getColumns(self,columns): --- a/documents/scrapers/601aedeef4344638d635bdd761e9fdba.py +++ b/documents/scrapers/601aedeef4344638d635bdd761e9fdba.py @@ -6,8 +6,8 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - #def getTable(self,soup): - # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table + def getTable(self,soup): + return soup.find(id = "main").table def getColumnCount(self): return 4 def getColumns(self,columns): --- a/documents/scrapers/69d59284ef0ccd2677394d82d3292abc.py +++ b/documents/scrapers/69d59284ef0ccd2677394d82d3292abc.py @@ -6,8 +6,6 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getTable(self,soup): - return soup.find(id = "centercontent").table def getColumnCount(self): return 5 def getColumns(self,columns): --- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py +++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py @@ -5,6 +5,8 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getTable(self,soup): + return soup.find(id = "page_content").table def getColumns(self,columns): (id, date, title, description, notes) = columns return (id, date, title, description, notes) --- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py +++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py @@ -11,7 +11,7 @@ links = [] description = "" for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -22,7 +22,7 @@ description = description + text.encode('ascii', 'ignore') for atag in soup.find(id="SortingTable").find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: @@ -43,7 +43,7 @@ links = [] description = "" for atag in entry.find_all('a'): - if atag.has_key('href'): + if atag.has_attr('href'): link = scrape.fullurl(self.getURL(),atag['href']) (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) if htcontent != None: @@ -53,7 +53,7 @@ for text in soup.find(id="content-item").stripped_strings: description = description + text + " \n" for atag in soup.find(id="content-item").find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(link,atag['href'])) if links != []: doc.update({'links': links}) --- a/documents/scrapers/ad033512610d8e36886ab6a795f26561.py +++ b/documents/scrapers/ad033512610d8e36886ab6a795f26561.py @@ -6,8 +6,8 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getTable(self,soup): - return soup.find(id = "_ctl0__ctl0_MainContentPlaceHolder_MainContentPlaceHolder_ContentSpan").findAll("table")[3] +# def getTable(self,soup): +# return soup.find(_class = "content").table def getColumnCount(self): return 5 def getColumns(self,columns): --- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py +++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py @@ -17,7 +17,7 @@ dldivs = soup.find('div',class_="download") if dldivs != None: for atag in dldivs.find_all("a"): - if atag.has_key('href'): + if atag.has_attr('href'): links.append(scrape.fullurl(url,atag['href'])) nodldivs = soup.find('div',class_="incompleteNotification") if nodldivs != None and nodldivs.stripped_strings != None: --- a/documents/scrapers/bf16d4ba0d306ee03e5a1d32aaba3da1.py +++ b/documents/scrapers/bf16d4ba0d306ee03e5a1d32aaba3da1.py @@ -7,7 +7,7 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getTable(self,soup): - return soup.find(summary="This table shows every FOI request to date.") + return soup def getColumnCount(self): return 5 def getColumns(self,columns): --- a/documents/scrapers/d1296c366287f7a9faedf235c7e6df01.py +++ b/documents/scrapers/d1296c366287f7a9faedf235c7e6df01.py @@ -6,8 +6,6 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getTable(self,soup): - return soup.find(id="main").table def getColumnCount(self): return 7 def getColumns(self,columns): --- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py +++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py @@ -1,16 +1,54 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import genericScrapers -#RSS feed not detailed +import dateutil +from dateutil.parser import * +from datetime import * +import scrape +from bs4 import BeautifulSoup +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): -#http://www.doughellmann.com/PyMOTW/abc/ -class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): - def getColumns(self,columns): - (id, date, title, description, notes) = columns - return (id, date, title, description, notes) + def __init__(self): + super(ScraperImplementation, self).__init__() + def getTable(self, soup): + return soup.find(id='zone-content') + + def getDescription(self,content, entry,doc): + link = None + links = [] + description = "" + for atag in entry.find_all('a'): + if atag.has_attr('href'): + link = scrape.fullurl(self.getURL(), atag['href']) + (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) + if htcontent != None: + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": + soup = BeautifulSoup(htcontent) + row = soup.find(id="foidetails") + if row == None: + row = soup.find(id="content").table + if row == None: + row = soup.find(id="content") + description = ''.join(row.stripped_strings) + for atag in row.find_all("a"): + if atag.has_attr('href'): + links.append(scrape.fullurl(link, atag['href'])) + + if links != []: + doc.update({'links': links}) + if description != "": + doc.update({ 'description': description}) + + def getColumnCount(self): + return 3 + + def getColumns(self, columns): + (id, title, date) = columns + return (id, date, title, title, None) + if __name__ == '__main__': - print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) - print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) ScraperImplementation().doScrape() --- a/documents/sitemap.xml.php +++ b/documents/sitemap.xml.php @@ -6,7 +6,7 @@ echo '' . "\n"; echo " " . local_url() . "index.php1.0\n"; foreach (scandir("./") as $file) { - if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php"&& $file != "viewDocument.php") + if (strpos($file, ".php") !== false && ($file != "index.php" && $file != "sitemap.xml.php"&& $file != "viewDocument.php")) { echo " " . local_url() . "$file0.6\n"; } } --- a/documents/template.inc.php +++ b/documents/template.inc.php @@ -77,7 +77,7 @@