<?php | |
include('template.inc.php'); | |
include_header_documents(""); | |
include_once('../include/common.inc.php'); | |
?> | |
<h1>About</h1> | |
<?php | |
include_footer_documents(); | |
?> | |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
from time import mktime | from time import mktime |
import feedparser | import feedparser |
import abc | import abc |
import unicodedata, re | import unicodedata, re |
import dateutil | import dateutil |
from dateutil.parser import * | from dateutil.parser import * |
from datetime import * | from datetime import * |
import codecs | import codecs |
class GenericDisclogScraper(object): | class GenericDisclogScraper(object): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
agencyID = None | agencyID = None |
disclogURL = None | disclogURL = None |
def remove_control_chars(self, input): | def remove_control_chars(self, input): |
return "".join([i for i in input if ord(i) in range(32, 127)]) | return "".join([i for i in input if ord(i) in range(32, 127)]) |
def getAgencyID(self): | def getAgencyID(self): |
""" disclosr agency id """ | """ disclosr agency id """ |
if self.agencyID == None: | if self.agencyID == None: |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") | self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") |
return self.agencyID | return self.agencyID |
def getURL(self): | def getURL(self): |
""" disclog URL""" | """ disclog URL""" |
if self.disclogURL == None: | if self.disclogURL == None: |
agency = scrape.agencydb.get(self.getAgencyID()) | agency = scrape.agencydb.get(self.getAgencyID()) |
self.disclogURL = agency['FOIDocumentsURL'] | self.disclogURL = agency['FOIDocumentsURL'] |
return self.disclogURL | return self.disclogURL |
@abc.abstractmethod | @abc.abstractmethod |
def doScrape(self): | def doScrape(self): |
""" do the scraping """ | """ do the scraping """ |
return | return |
@abc.abstractmethod | @abc.abstractmethod |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description""" | """ get description""" |
return | return |
class GenericRSSDisclogScraper(GenericDisclogScraper): | class GenericRSSDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) | (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
feed = feedparser.parse(content) | feed = feedparser.parse(content) |
for entry in feed.entries: | for entry in feed.entries: |
#print entry | #print entry |
print entry.id | print entry.id |
hash = scrape.mkhash(entry.id) | hash = scrape.mkhash(entry.id) |
#print hash | #print hash |
doc = foidocsdb.get(hash) | doc = foidocsdb.get(hash) |
#print doc | #print doc |
if doc == None: | if doc == None: |
print "saving "+ hash | print "saving "+ hash |
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") | edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, | doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, |
"date": edate,"title": entry.title} | "date": edate,"title": entry.title} |
self.getDescription(entry,entry, doc) | self.getDescription(entry,entry, doc) |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
doc.update({'description': content.summary}) | doc.update({'description': content.summary}) |
return | return |
class GenericOAICDisclogScraper(GenericDisclogScraper): | class GenericOAICDisclogScraper(GenericDisclogScraper): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
@abc.abstractmethod | @abc.abstractmethod |
def getColumns(self,columns): | def getColumns(self,columns): |
""" rearranges columns if required """ | """ rearranges columns if required """ |
return | return |
def getColumnCount(self): | def getColumnCount(self): |
return 5 | return 5 |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
descriptiontxt = "" | descriptiontxt = "" |
for string in content.stripped_strings: | for string in content.stripped_strings: |
descriptiontxt = descriptiontxt + " \n" + string | descriptiontxt = descriptiontxt + " \n" + string |
doc.update({'description': descriptiontxt}) | doc.update({'description': descriptiontxt}) |
return | return |
def getTitle(self, content, entry, doc): | def getTitle(self, content, entry, doc): |
doc.update({'title': (''.join(content.stripped_strings))}) | doc.update({'title': (''.join(content.stripped_strings))}) |
return | return |
def getTable(self, soup): | def getTable(self, soup): |
return soup.table | return soup.table |
def getRows(self, table): | def getRows(self, table): |
return table.find_all('tr') | return table.find_all('tr') |
def getDate(self, content, entry, doc): | def getDate(self, content, entry, doc): |
date = ''.join(content.stripped_strings).strip() | date = ''.join(content.stripped_strings).strip() |
(a,b,c) = date.partition("(") | (a,b,c) = date.partition("(") |
date = self.remove_control_chars(a.replace("Octber","October")) | date = self.remove_control_chars(a.replace("Octber","October")) |
print date | print date |
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") | edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") |
print edate | print edate |
doc.update({'date': edate}) | doc.update({'date': edate}) |
return | return |
def getLinks(self, content, entry, doc): | def getLinks(self, content, entry, doc): |
links = [] | links = [] |
for atag in entry.find_all("a"): | for atag in entry.find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(content,atag['href'])) | links.append(scrape.fullurl(content,atag['href'])) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
return | return |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) | (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
if content != None: | if content != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
table = self.getTable(soup) | table = self.getTable(soup) |
for row in self.getRows(table): | for row in self.getRows(table): |
columns = row.find_all('td') | columns = row.find_all('td') |
if len(columns) == self.getColumnCount(): | if len(columns) == self.getColumnCount(): |
(id, date, title, description, notes) = self.getColumns(columns) | (id, date, title, description, notes) = self.getColumns(columns) |
print self.remove_control_chars(''.join(id.stripped_strings)) | print self.remove_control_chars(''.join(id.stripped_strings)) |
if id.string == None: | if id.string == None: |
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) | hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) |
else: | else: |
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) | hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) |
doc = foidocsdb.get(hash) | doc = foidocsdb.get(hash) |
if doc == None: | if doc == None: |
print "saving " +hash | print "saving " +hash |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} | doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} |
self.getLinks(self.getURL(),row,doc) | self.getLinks(self.getURL(),row,doc) |
self.getTitle(title,row, doc) | self.getTitle(title,row, doc) |
self.getDate(date,row, doc) | self.getDate(date,row, doc) |
self.getDescription(description,row, doc) | self.getDescription(description,row, doc) |
if notes != None: | if notes != None: |
doc.update({ 'notes': (''.join(notes.stripped_strings))}) | doc.update({ 'notes': (''.join(notes.stripped_strings))}) |
foidocsdb.save(doc) | badtitles = ['-','Summary of FOI Request','FOI request(in summary form)','Summary of FOI request received by the ASC', |
'Summary of FOI request received by agency/minister','Description of Documents Requested','FOI request','Description of FOI Request','Summary of request','Description','Summary', | |
'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67'] | |
if doc['title'] not in badtitles and doc['description'] != '': | |
print "saving" | |
foidocsdb.save(doc) | |
else: | else: |
print "already saved "+hash | print "already saved "+hash |
elif len(row.find_all('th')) == self.getColumnCount(): | elif len(row.find_all('th')) == self.getColumnCount(): |
print "header row" | print "header row" |
else: | else: |
print "ERROR number of columns incorrect" | print "ERROR number of columns incorrect" |
print row | print row |
<?php | <?php |
// Agency X updated Y, new files, diff of plain text/link text, | // Agency X updated Y, new files, diff of plain text/link text, |
// feed for just one agency or all | // feed for just one agency or all |
// This is a minimum example of using the Universal Feed Generator Class | // This is a minimum example of using the Universal Feed Generator Class |
include("../lib/FeedWriter/FeedTypes.php"); | include("../lib/FeedWriter/FeedTypes.php"); |
include_once('../include/common.inc.php'); | include_once('../include/common.inc.php'); |
//Creating an instance of FeedWriter class. | //Creating an instance of FeedWriter class. |
$TestFeed = new RSS2FeedWriter(); | $TestFeed = new RSS2FeedWriter(); |
//Setting the channel elements | //Setting the channel elements |
//Use wrapper functions for common channelelements | //Use wrapper functions for common channelelements |
$TestFeed->setTitle('Last Modified - All'); | $TestFeed->setTitle('Last Modified - All'); |
$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'); | $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'); |
$TestFeed->setDescription('Latest entries'); | $TestFeed->setDescription('Latest entries'); |
$TestFeed->setChannelElement('language', 'en-us'); | $TestFeed->setChannelElement('language', 'en-us'); |
$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); | $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); |
//Retriving informations from database | //Retriving informations from database |
$idtoname = Array(); | $idtoname = Array(); |
$agenciesdb = $server->get_db('disclosr-agencies'); | $agenciesdb = $server->get_db('disclosr-agencies'); |
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { | foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { |
$idtoname[$row->id] = trim($row->value->name); | $idtoname[$row->id] = trim($row->value->name); |
} | } |
$foidocsdb = $server->get_db('disclosr-foidocuments'); | $foidocsdb = $server->get_db('disclosr-foidocuments'); |
$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00', 50), true)->rows; | $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00', 50), true)->rows; |
//print_r($rows); | //print_r($rows); |
foreach ($rows as $row) { | foreach ($rows as $row) { |
//Create an empty FeedItem | //Create an empty FeedItem |
$newItem = $TestFeed->createNewItem(); | $newItem = $TestFeed->createNewItem(); |
//Add elements to the feed item | //Add elements to the feed item |
$newItem->setTitle($row->value->title); | $newItem->setTitle($row->value->title); |
$newItem->setLink("view.php?id=".$row->value->docID); | $newItem->setLink("view.php?id=".$row->value->_id); |
$newItem->setDate(date("c", strtotime($row->value->date))); | $newItem->setDate(date("c", strtotime($row->value->date))); |
$newItem->setDescription(displayLogEntry($row,$idtoname)); | $newItem->setDescription(displayLogEntry($row,$idtoname)); |
$newItem->setAuthor($idtoname[$row->value->agencyID]); | |
$newItem->addElement('guid', $row->value->_id,array('isPermaLink'=>'true')); | $newItem->addElement('guid', $row->value->_id,array('isPermaLink'=>'true')); |
//Now add the feed item | //Now add the feed item |
$TestFeed->addItem($newItem); | $TestFeed->addItem($newItem); |
} | } |
//OK. Everything is done. Now genarate the feed. | //OK. Everything is done. Now genarate the feed. |
$TestFeed->generateFeed(); | $TestFeed->generateFeed(); |
?> | ?> |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
#http://www.doughellmann.com/PyMOTW/abc/ | #http://www.doughellmann.com/PyMOTW/abc/ |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getDescription(self,content, entry,doc): | def getDescription(self,content, entry,doc): |
link = None | link = None |
links = [] | links = [] |
description = "" | description = "" |
for atag in entry.find_all('a'): | for atag in entry.find_all('a'): |
if atag.has_key('href'): | if atag.has_key('href'): |
link = scrape.fullurl(self.getURL(),atag['href']) | link = scrape.fullurl(self.getURL(),atag['href']) |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) | (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) |
if htcontent != None: | if htcontent != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(htcontent) | soup = BeautifulSoup(htcontent) |
for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'): | for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'): |
if row != None: | if row != None: |
rowtitle = row.find('th').string | rowtitle = row.find('th').string |
description = description + "\n" + rowtitle + ": " | if rowtitle != None: |
description = description + "\n" + rowtitle + ": " | |
for text in row.find('td').stripped_strings: | for text in row.find('td').stripped_strings: |
description = description + text | description = description + text |
for atag in row.find_all("a"): | for atag in row.find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(link,atag['href'])) | links.append(scrape.fullurl(link,atag['href'])) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
if description != "": | if description != "": |
doc.update({ 'description': description}) | doc.update({ 'description': description}) |
def getColumnCount(self): | def getColumnCount(self): |
return 2 | return 2 |
def getTable(self,soup): | def getTable(self,soup): |
return soup.find(class_ = "ms-rteTable-GreyAlternating") | return soup.find(class_ = "ms-rteTable-GreyAlternating") |
def getColumns(self,columns): | def getColumns(self,columns): |
(date, title) = columns | (date, title) = columns |
return (title, date, title, title, None) | return (title, date, title, title, None) |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
ScraperImplementation().doScrape() | ScraperImplementation().doScrape() |
<?php | <?php |
function include_header_documents($title) { | function include_header_documents($title) { |
?> | header('X-UA-Compatible: IE=edge,chrome=1'); |
<!doctype html> | ?> |
<!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> | <!doctype html> |
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> | <!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> |
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> | <!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> |
<!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> | <!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> |
<!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> | <!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> |
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> | <!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> |
<head> | <!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> |
<meta charset="utf-8"> | <head> |
<meta charset="utf-8"> | |
<!-- Use the .htaccess and remove these lines to avoid edge case issues. | <title>Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?></title> |
More info: h5bp.com/i/378 --> | <meta name="description" content=""> |
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> | |
<title>Australian Disclosure Logs<?php if ($title != "") echo " - $title";?></title> | <!-- Mobile viewport optimized: h5bp.com/viewport --> |
<meta name="description" content=""> | <meta name="viewport" content="width=device-width"> |
<link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php" /> | |
<!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> | |
<meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" /> | |
<!-- Mobile viewport optimized: h5bp.com/viewport --> | <!-- Le styles --> |
<meta name="viewport" content="width=device-width"> | <link href="css/bootstrap.min.css" rel="stylesheet"> |
<link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php" /> | <style type="text/css"> |
<!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> | body { |
<meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" /> | padding-top: 60px; |
padding-bottom: 40px; | |
} | |