<?php | <?php |
include('template.inc.php'); | include('template.inc.php'); |
include_header_documents(""); | include_header_documents(""); |
include_once('../include/common.inc.php'); | include_once('../include/common.inc.php'); |
echo "<table> | echo "<table> |
<tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>"; | <tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>"; |
$agenciesdb = $server->get_db('disclosr-agencies'); | $agenciesdb = $server->get_db('disclosr-agencies'); |
$docsdb = $server->get_db('disclosr-documents'); | $docsdb = $server->get_db('disclosr-documents'); |
$agencies = 0; | $agencies = 0; |
$disclogs = 0; | $disclogs = 0; |
$red = 0; | $red = 0; |
$green = 0; | $green = 0; |
$yellow = 0; | |
$orange = 0; | $orange = 0; |
try { | try { |
$rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows; | $rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows; |
if ($rows) { | if ($rows) { |
foreach ($rows as $row) { | foreach ($rows as $row) { |
if ((!isset($row->value->status) || $row->value->status != "suspended") && isset($row->value->foiEmail)) { | if ((!isset($row->value->status) || $row->value->status != "suspended") && isset($row->value->foiEmail)) { |
echo "<tr><td>"; | echo "<tr><td>"; |
if (isset($row->value->website)) echo "<a href='" . $row->value->website . "'>"; | if (isset($row->value->website)) echo "<a href='" . $row->value->website . "'>"; |
echo "<b>" . $row->value->name . "</b>"; | echo "<b>" . $row->value->name . "</b>"; |
if (isset($row->value->website)) echo "</a>"; | if (isset($row->value->website)) echo "</a>"; |
if ($ENV == "DEV") | if ($ENV == "DEV") |
echo "<br>(" . $row->id . ")"; | echo "<br>(" . $row->id . ")"; |
echo "</td>\n"; | echo "</td>\n"; |
$agencies++; | $agencies++; |
echo "<td>"; | echo "<td>"; |
if (isset($row->value->FOIDocumentsURL)) { | if (isset($row->value->FOIDocumentsURL)) { |
$disclogs++; | $disclogs++; |
echo '<a href="' . $row->value->FOIDocumentsURL . '">' | echo '<a href="' . $row->value->FOIDocumentsURL . '">' |
. $row->value->FOIDocumentsURL . '</a>'; | . $row->value->FOIDocumentsURL . '</a>'; |
if ($ENV == "DEV") | if ($ENV == "DEV") |
echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">' | echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">' |
. 'view local copy</a>)</small>'; | . 'view local copy</a>)</small>'; |
} else { | } else { |
echo "<font color='red'><abbr title='No'>✘</abbr></font>"; | echo "<font color='red'><abbr title='No'>✘</abbr></font>"; |
} | } |
echo "</td>\n<td>"; | echo "</td>\n<td>"; |
if (isset($row->value->FOIDocumentsURL)) { | if (isset($row->value->FOIDocumentsURL)) { |
if (file_exists("./scrapers/" . $row->id . '.py')) { | if (file_exists("./scrapers/" . $row->id . '.py')) { |
echo "<font color='green'><abbr title='Yes'>✔</abbr></font>"; | echo "<font color='green'><abbr title='Yes'>✔</abbr></font>"; |
$green++; | $green++; |
} else if (file_exists("./scrapers/" . $row->id . '.txt')) { | } else if (file_exists("./scrapers/" . $row->id . '.txt')) { |
if (trim(file_get_contents("./scrapers/" . $row->id . '.txt')) == "no disclog") { | |
echo "<font color='yellow'><abbr title='No log table exists at URL to scrape'><b>◎</b></abbr></font>"; | |
$yellow++; | |
} else { | |
echo file_get_contents("./scrapers/" . $row->id . '.txt'); | |
echo "<font color='orange'><abbr title='Work in progress'><b>▬</b></abbr></font>"; | echo "<font color='orange'><abbr title='Work in progress'><b>▬</b></abbr></font>"; |
$orange++; | $orange++; |
} | |
} else { | } else { |
echo "<font color='red'><abbr title='No'>✘</abbr></font>"; | echo "<font color='red'><abbr title='No'>✘</abbr></font>"; |
$red++; | $red++; |
} | } |
} | } |
echo "</td></tr>\n"; | echo "</td></tr>\n"; |
} | } |
} | } |
} | } |
} catch (SetteeRestClientException $e) { | } catch (SetteeRestClientException $e) { |
setteErrorHandler($e); | setteErrorHandler($e); |
} | } |
echo "</table>"; | echo "</table>"; |
echo $agencies . " agencies, " . round(($disclogs / $agencies) * 100) . "% with disclosure logs; " | echo $agencies . " agencies, " . round(($disclogs / $agencies) * 100) . "% with disclosure logs; " |
. round(($green / $disclogs) * 100) . "% logs with scrapers " . round(($red / $disclogs) * 100) . "% logs without scrapers " . round(($orange / $disclogs) * 100) . "% logs Work-In-Progress scrapers "; | . round(($green / $disclogs) * 100) . "% logs with scrapers " . round(($red / $disclogs) * 100) . "% logs without scrapers " . round(($orange / $disclogs) * 100) . "% logs Work-In-Progress scrapers "; |
include_footer_documents(); | include_footer_documents(); |
?> | ?> |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
from time import mktime | from time import mktime |
import feedparser | import feedparser |
import abc | import abc |
import unicodedata, re | import unicodedata, re |
import dateutil | import dateutil |
from dateutil.parser import * | from dateutil.parser import * |
from datetime import * | from datetime import * |
import codecs | |
class GenericDisclogScraper(object): | class GenericDisclogScraper(object): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
agencyID = None | agencyID = None |
disclogURL = None | disclogURL = None |
def remove_control_chars(self, input): | def remove_control_chars(self, input): |
return "".join([i for i in input if ord(i) in range(32, 127)]) | return "".join([i for i in input if ord(i) in range(32, 127)]) |
def getAgencyID(self): | def getAgencyID(self): |
""" disclosr agency id """ | """ disclosr agency id """ |
if self.agencyID == None: | if self.agencyID == None: |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") | self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") |
return self.agencyID | return self.agencyID |
def getURL(self): | def getURL(self): |
""" disclog URL""" | """ disclog URL""" |
if self.disclogURL == None: | if self.disclogURL == None: |
agency = scrape.agencydb.get(self.getAgencyID()) | agency = scrape.agencydb.get(self.getAgencyID()) |
self.disclogURL = agency['FOIDocumentsURL'] | self.disclogURL = agency['FOIDocumentsURL'] |
return self.disclogURL | return self.disclogURL |
@abc.abstractmethod | @abc.abstractmethod |
def doScrape(self): | def doScrape(self): |
""" do the scraping """ | """ do the scraping """ |
return | return |
@abc.abstractmethod | @abc.abstractmethod |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description""" | """ get description""" |
return | return |
class GenericRSSDisclogScraper(GenericDisclogScraper): | class GenericRSSDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) | (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
feed = feedparser.parse(content) | feed = feedparser.parse(content) |
for entry in feed.entries: | for entry in feed.entries: |
#print entry | #print entry |
print entry.id | print entry.id |
hash = scrape.mkhash(entry.id) | hash = scrape.mkhash(entry.id) |
#print hash | #print hash |
doc = foidocsdb.get(hash) | doc = foidocsdb.get(hash) |
#print doc | #print doc |
if doc == None: | if doc == None: |
print "saving "+ hash | print "saving "+ hash |
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") | edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, | doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, |
"date": edate,"title": entry.title} | "date": edate,"title": entry.title} |
self.getDescription(entry,entry, doc) | self.getDescription(entry,entry, doc) |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
doc.update({'description': content.summary}) | doc.update({'description': content.summary}) |
return | return |
class GenericOAICDisclogScraper(GenericDisclogScraper): | class GenericOAICDisclogScraper(GenericDisclogScraper): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
@abc.abstractmethod | @abc.abstractmethod |
def getColumns(self,columns): | def getColumns(self,columns): |
""" rearranges columns if required """ | """ rearranges columns if required """ |
return | return |
def getColumnCount(self): | def getColumnCount(self): |
return 5 | return 5 |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
descriptiontxt = "" | descriptiontxt = "" |
for string in content.stripped_strings: | for string in content.stripped_strings: |
descriptiontxt = descriptiontxt + " \n" + string | descriptiontxt = descriptiontxt + " \n" + string |
doc.update({'description': descriptiontxt}) | doc.update({'description': descriptiontxt}) |
return | return |
def getTitle(self, content, entry, doc): | def getTitle(self, content, entry, doc): |
doc.update({'title': (''.join(content.stripped_strings))}) | doc.update({'title': (''.join(content.stripped_strings))}) |
return | return |
def getTable(self, soup): | def getTable(self, soup): |
return soup.table | return soup.table |
def getRows(self, table): | def getRows(self, table): |
return table.find_all('tr') | return table.find_all('tr') |
def getDate(self, content, entry, doc): | def getDate(self, content, entry, doc): |
date = ''.join(content.stripped_strings).strip() | date = ''.join(content.stripped_strings).strip() |
(a,b,c) = date.partition("(") | (a,b,c) = date.partition("(") |
date = a.replace("Octber","October") | date = a.replace("Octber","October") |
print date | print date |
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") | edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") |
print edate | print edate |
doc.update({'date': edate}) | doc.update({'date': edate}) |
return | return |
def getLinks(self, content, entry, doc): | def getLinks(self, content, entry, doc): |
links = [] | links = [] |
for atag in entry.find_all("a"): | for atag in entry.find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(content,atag['href'])) | links.append(scrape.fullurl(content,atag['href'])) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
return | return |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) | (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
if content != None: | if content != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
table = self.getTable(soup) | table = self.getTable(soup) |
for row in self.getRows(table): | for row in self.getRows(table): |
columns = row.find_all('td') | columns = row.find_all('td') |
if len(columns) == self.getColumnCount(): | if len(columns) == self.getColumnCount(): |
(id, date, title, description, notes) = self.getColumns(columns) | (id, date, title, description, notes) = self.getColumns(columns) |
print ''.join(id.stripped_strings) | print ''.join(id.stripped_strings).encode('ascii', 'ignore') |
if id.string == None: | if id.string == None: |
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) | hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) |
else: | else: |
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) | hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) |
doc = foidocsdb.get(hash) | doc = foidocsdb.get(hash) |
if doc == None: | if doc == None: |
print "saving " +hash | print "saving " +hash |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} | doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} |
self.getLinks(self.getURL(),row,doc) | self.getLinks(self.getURL(),row,doc) |
self.getTitle(title,row, doc) | self.getTitle(title,row, doc) |
self.getDate(date,row, doc) | self.getDate(date,row, doc) |
self.getDescription(description,row, doc) | self.getDescription(description,row, doc) |
if notes != None: | if notes != None: |
doc.update({ 'notes': (''.join(notes.stripped_strings))}) | doc.update({ 'notes': (''.join(notes.stripped_strings))}) |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved "+hash | print "already saved "+hash |
elif len(row.find_all('th')) == self.getColumnCount(): | elif len(row.find_all('th')) == self.getColumnCount(): |
print "header row" | print "header row" |
else: | else: |
print "ERROR number of columns incorrect" | print "ERROR number of columns incorrect" |
print row | print row |
see parent dhs | no disclog |
acma style |
import sys,os | |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | |
import genericScrapers | |
import scrape | |
from bs4 import BeautifulSoup | |
import codecs | |
#http://www.doughellmann.com/PyMOTW/abc/ | |
class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | |
def getDescription(self,content, entry,doc): | |
link = None | |
links = [] | |
description = "" | |
for atag in entry.find_all('a'): | |
if atag.has_key('href'): | |
link = scrape.fullurl(self.getURL(),atag['href']) | |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) | |
if htcontent != None: | |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | |
soup = BeautifulSoup(htcontent) | |
for text in soup.find(id="divFullWidthColumn").stripped_strings: | |
description = description + text.encode('ascii', 'ignore') | |
for atag in soup.find(id="divFullWidthColumn").find_all("a"): | |
if atag.has_key('href'): | |
links.append(scrape.fullurl(link,atag['href'])) | |
if links != []: | |
doc.update({'links': links}) | |
if description != "": | |
doc.update({ 'description': description}) | |
def getColumnCount(self): | |
return 2 | |
def getTable(self,soup): | |
return soup.find(id = "TwoColumnSorting") | |
def getColumns(self,columns): | |
( title, date) = columns | |
return (title, date, title, title, None) | |
class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | |
def getDescription(self,content, entry,doc): | |
link = None | |
links = [] | |
description = "" | |
for atag in entry.find_all('a'): | |
if atag.has_key('href'): | |
link = scrape.fullurl(self.getURL(),atag['href']) | |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) | |
if htcontent != None: | |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | |
soup = BeautifulSoup(htcontent) | |
for text in soup.find(id="content-item").stripped_strings: | |
description = description + text + " \n" | |
for atag in soup.find(id="content-item").find_all("a"): | |
if atag.has_key('href'): | |
links.append(scrape.fullurl(link,atag['href'])) | |
if links != []: | |
doc.update({'links': links}) | |
if description != "": | |
doc.update({ 'description': description}) | |
if links != []: | |
doc.update({'links': links}) | |
if description != "": | |
doc.update({ 'description': description}) | |
def getColumnCount(self): | |
return 2 | |
def getTable(self,soup): | |
return soup.find(class_ = "doc-list") | |
def getColumns(self,columns): | |
(date, title) = columns | |
return (title, date, title, title, None) | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | |
print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | |
#NewScraperImplementation().doScrape() | |
print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | |
print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | |
osi = OldScraperImplementation() | |
osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" | |
osi.doScrape() | |
# old site too | |
import sys,os | |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | |
import genericScrapers | |
import scrape | |
from bs4 import BeautifulSoup | |
#http://www.doughellmann.com/PyMOTW/abc/ | |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | |
def getDescription(self,content, entry,doc): | |
link = None | |
links = [] | |
description = "" | |
for atag in entry.find_all('a'): | |
if atag.has_key('href'): | |
link = scrape.fullurl(self.getURL(),atag['href']) | |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) | |
if htcontent != None: | |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | |
soup = BeautifulSoup(htcontent) | |
for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'): | |
if row != None: | |
rowtitle = row.find('th').string | |
description = description + "\n" + rowtitle + ": " | |
for text in row.find('td').stripped_strings: | |
description = description + text | |
for atag in row.find_all("a"): | |
if atag.has_key('href'): | |
links.append(scrape.fullurl(link,atag['href'])) | |
if links != []: | |
doc.update({'links': links}) | |
if description != "": | |
doc.update({ 'description': description}) | |
def getColumnCount(self): | |
return 2 | |
def getTable(self,soup): | |
return soup.find(class_ = "ms-rteTable-GreyAlternating") | |
def getColumns(self,columns): | |
(date, title) = columns | |
return (title, date, title, title, None) | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | |
ScraperImplementation().doScrape() | |
# old site too http://archive.treasury.gov.au/content/foi_publications.asp | |
# does not have any disclog entries or table | no disclog |
no disclog yet | no disclog |
no log | no disclog |
acma style |
c'est ne pas une table | no disclog |