From: Maxious Date: Tue, 04 Dec 2012 22:52:47 +0000 Subject: fix charts X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=84ef02edce8c178acf28c80d26c2bf789d75d268 --- fix charts Former-commit-id: d6e49522e61927665c8ba633dad5a13344f34841 --- --- a/documents/about.php +++ b/documents/about.php @@ -1,7 +1,7 @@

About

--- a/documents/charts.php +++ b/documents/charts.php @@ -1,6 +1,6 @@ get_db('disclosr-agencies'); @@ -15,29 +15,28 @@

Charts

Lorem ipsum.

-
+
+
--- /dev/null +++ b/documents/disclosr-documents.nja @@ -1,1 +1,7 @@ - +{ + "venv": "", + "project-type": "Import from sources", + "name": "disclosr-documents", + "license": "GNU General Public License v3", + "description": "" +} --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -1,155 +1,254 @@ -import sys,os +import sys +import os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import scrape from bs4 import BeautifulSoup from time import mktime import feedparser import abc -import unicodedata, re +import unicodedata +import re import dateutil from dateutil.parser import * from datetime import * import codecs +from StringIO import StringIO + +from pdfminer.pdfparser import PDFDocument, PDFParser +from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf +from pdfminer.pdfdevice import PDFDevice, TagExtractor +from pdfminer.converter import TextConverter +from pdfminer.cmapdb import CMapDB +from pdfminer.layout import LAParams + + class GenericDisclogScraper(object): - __metaclass__ = abc.ABCMeta - agencyID = None - disclogURL = None - def remove_control_chars(self, input): - return "".join([i for i in input if ord(i) in range(32, 127)]) - def getAgencyID(self): - """ disclosr agency id """ - if self.agencyID == None: - self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") - return self.agencyID - - def getURL(self): - """ disclog URL""" - if self.disclogURL == None: - agency = scrape.agencydb.get(self.getAgencyID()) - self.disclogURL = agency['FOIDocumentsURL'] - return self.disclogURL - - @abc.abstractmethod - def doScrape(self): - """ do the scraping """ - return - - @abc.abstractmethod - def getDescription(self, content, entry, doc): - """ get description""" - return - + __metaclass__ = abc.ABCMeta + agencyID = None + disclogURL = None + + def remove_control_chars(self, input): + return "".join([i for i in input if ord(i) in range(32, 127)]) + + def getAgencyID(self): + """ disclosr agency id """ + if self.agencyID is None: + self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") + return self.agencyID + + def getURL(self): + """ disclog URL""" + if self.disclogURL is None: + agency = scrape.agencydb.get(self.getAgencyID()) + self.disclogURL = agency['FOIDocumentsURL'] + return self.disclogURL + + @abc.abstractmethod + def doScrape(self): + """ do the scraping """ + return + + +class GenericPDFDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + laparams = LAParams() + rsrcmgr = PDFResourceManager(caching=True) + outfp = StringIO() + device = TextConverter(rsrcmgr, outfp, codec='utf-8', + laparams=laparams) + fp = StringIO() + fp.write(content.read()) + + process_pdf(rsrcmgr, device, fp, set(), caching=True, + check_extractable=True) + description = outfp.getvalue() + fp.close() + device.close() + outfp.close() + dochash = scrape.mkhash(description) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "date": edate, "title": "Disclosure Log Updated", "description": description} + foidocsdb.save(doc) + else: + print "already saved" + + +class GenericDOCXDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb + , self.getURL(), "foidocuments", self.getAgencyID()) + mydoc = zipfile.ZipFile(file) + xmlcontent = mydoc.read('word/document.xml') + document = etree.fromstring(xmlcontent) + ## Fetch all the text out of the document we just created + paratextlist = getdocumenttext(document) + # Make explicit unicode version + newparatextlist = [] + for paratext in paratextlist: + newparatextlist.append(paratext.encode("utf-8")) + ## Print our documnts test with two newlines under each paragraph + description = '\n\n'.join(newparatextlist).strip(' \t\n\r') + dochash = scrape.mkhash(description) + doc = foidocsdb.get(dochash) + + if doc is None: + print "saving " + dochash + edate = time().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "date": edate, "title": "Disclosure Log Updated", "description": description} + foidocsdb.save(doc) + else: + print "already saved" class GenericRSSDisclogScraper(GenericDisclogScraper): - def doScrape(self): - foidocsdb = scrape.couch['disclosr-foidocuments'] - (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) - feed = feedparser.parse(content) - for entry in feed.entries: - #print entry - print entry.id - hash = scrape.mkhash(entry.id) - #print hash - doc = foidocsdb.get(hash) - #print doc - if doc == None: - print "saving "+ hash - edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") - doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, - "date": edate,"title": entry.title} - self.getDescription(entry,entry, doc) - foidocsdb.save(doc) + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + feed = feedparser.parse(content) + for entry in feed.entries: + #print entry + print entry.id + dochash = scrape.mkhash(entry.id) + doc = foidocsdb.get(dochash) + #print doc + if doc is None: + print "saving " + dochash + edate = datetime.fromtimestamp( + mktime(entry.published_parsed)).strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID(), + 'url': entry.link, 'docID': entry.id, + "date": edate, "title": entry.title} + self.getDescription(entry, entry, doc) + foidocsdb.save(doc) + else: + print "already saved" + + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + doc.update({'description': content.summary}) + return + + +class GenericOAICDisclogScraper(GenericDisclogScraper): + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def getColumns(self, columns): + """ rearranges columns if required """ + return + + def getColumnCount(self): + return 5 + + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + descriptiontxt = "" + for string in content.stripped_strings: + descriptiontxt = descriptiontxt + " \n" + string + doc.update({'description': descriptiontxt}) + + def getTitle(self, content, entry, doc): + doc.update({'title': (''.join(content.stripped_strings))}) + + def getTable(self, soup): + return soup.table + + def getRows(self, table): + return table.find_all('tr') + + def getDate(self, content, entry, doc): + date = ''.join(content.stripped_strings).strip() + (a, b, c) = date.partition("(") + date = self.remove_control_chars(a.replace("Octber", "October")) + print date + edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + print edate + doc.update({'date': edate}) + return + + def getLinks(self, content, entry, doc): + links = [] + for atag in entry.find_all("a"): + if atag.has_key('href'): + links.append(scrape.fullurl(content, atag['href'])) + if links != []: + doc.update({'links': links}) + return + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + if content is not None: + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": + # http://www.crummy.com/software/BeautifulSoup/documentation.html + print "parsing" + soup = BeautifulSoup(content) + table = self.getTable(soup) + for row in self.getRows(table): + columns = row.find_all('td') + if len(columns) is self.getColumnCount(): + (id, date, title, + description, notes) = self.getColumns(columns) + print self.remove_control_chars( + ''.join(id.stripped_strings)) + if id.string is None: + dochash = scrape.mkhash( + self.remove_control_chars( + url + (''.join(date.stripped_strings)))) else: - print "already saved" - def getDescription(self, content, entry, doc): - """ get description from rss entry""" - doc.update({'description': content.summary}) - return - -class GenericOAICDisclogScraper(GenericDisclogScraper): - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def getColumns(self,columns): - """ rearranges columns if required """ - return - def getColumnCount(self): - return 5 - def getDescription(self, content, entry, doc): - """ get description from rss entry""" - descriptiontxt = "" - for string in content.stripped_strings: - descriptiontxt = descriptiontxt + " \n" + string - doc.update({'description': descriptiontxt}) - return - def getTitle(self, content, entry, doc): - doc.update({'title': (''.join(content.stripped_strings))}) - return - def getTable(self, soup): - return soup.table - def getRows(self, table): - return table.find_all('tr') - def getDate(self, content, entry, doc): - date = ''.join(content.stripped_strings).strip() - (a,b,c) = date.partition("(") - date = self.remove_control_chars(a.replace("Octber","October")) - print date - edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") - print edate - doc.update({'date': edate}) - return - def getLinks(self, content, entry, doc): - links = [] - for atag in entry.find_all("a"): - if atag.has_key('href'): - links.append(scrape.fullurl(content,atag['href'])) - if links != []: - doc.update({'links': links}) - return - - def doScrape(self): - foidocsdb = scrape.couch['disclosr-foidocuments'] - (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) - if content != None: - if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": - # http://www.crummy.com/software/BeautifulSoup/documentation.html - soup = BeautifulSoup(content) - table = self.getTable(soup) - for row in self.getRows(table): - columns = row.find_all('td') - if len(columns) == self.getColumnCount(): - (id, date, title, description, notes) = self.getColumns(columns) - print self.remove_control_chars(''.join(id.stripped_strings)) - if id.string == None: - hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) - else: - hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) - doc = foidocsdb.get(hash) - - if doc == None: - print "saving " +hash - doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} - self.getLinks(self.getURL(),row,doc) - self.getTitle(title,row, doc) - self.getDate(date,row, doc) - self.getDescription(description,row, doc) - if notes != None: - doc.update({ 'notes': (''.join(notes.stripped_strings))}) - badtitles = ['-','Summary of FOI Request','FOI request(in summary form)','Summary of FOI request received by the ASC', -'Summary of FOI request received by agency/minister','Description of Documents Requested','FOI request','Description of FOI Request','Summary of request','Description','Summary', + dochash = scrape.mkhash( + self.remove_control_chars( + url + (''.join(id.stripped_strings)))) + doc = foidocsdb.get(dochash) + + if doc is None: + print "saving " + dochash + doc = {'_id': dochash, + 'agencyID': self.getAgencyID(), + 'url': self.getURL(), + 'docID': (''.join(id.stripped_strings))} + self.getLinks(self.getURL(), row, doc) + self.getTitle(title, row, doc) + self.getDate(date, row, doc) + self.getDescription(description, row, doc) + if notes is not None: + doc.update({ 'notes': ( + ''.join(notes.stripped_strings))}) + badtitles = ['-','Summary of FOI Request' + , 'FOI request(in summary form)' + , 'Summary of FOI request received by the ASC', +'Summary of FOI request received by agency/minister', +'Description of Documents Requested','FOI request', +'Description of FOI Request','Summary of request','Description','Summary', 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67'] - if doc['title'] not in badtitles and doc['description'] != '': + if doc['title'] not in badtitles\ + and doc['description'] != '': print "saving" foidocsdb.save(doc) - else: - print "already saved "+hash - - elif len(row.find_all('th')) == self.getColumnCount(): - print "header row" - - else: - print "ERROR number of columns incorrect" - print row - + else: + print "already saved " + dochash + + elif len(row.find_all('th')) is self.getColumnCount(): + print "header row" + + else: + print "ERROR number of columns incorrect" + print row + --- a/documents/index.php +++ b/documents/index.php @@ -3,10 +3,11 @@ include('template.inc.php'); include_header_documents(""); include_once('../include/common.inc.php'); -$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99'); +$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); ?> +
Read all the information released by Australian Federal Government agencies under the FOI Act in one place!
+RSS Icon All Agencies RSS Feed
get_db('disclosr-agencies'); $idtoname = Array(); @@ -15,17 +16,18 @@ } $foidocsdb = $server->get_db('disclosr-foidocuments'); try { - $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows; + $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows; if ($rows) { foreach ($rows as $key => $row) { echo displayLogEntry($row, $idtoname); + if (!isset($startkey)) $startkey = $row->key; $endkey = $row->key; } } } catch (SetteeRestClientException $e) { setteErrorHandler($e); } -echo "next page"; +echo "next page "; include_footer_documents(); ?> --- a/documents/rss.xml.php +++ b/documents/rss.xml.php @@ -9,11 +9,12 @@ $TestFeed = new RSS2FeedWriter(); //Setting the channel elements //Use wrapper functions for common channelelements -$TestFeed->setTitle('Last Modified - All'); +$TestFeed->setTitle('disclosurelo.gs Newest Entries - All'); $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'); -$TestFeed->setDescription('Latest entries'); - $TestFeed->setChannelElement('language', 'en-us'); - $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); +$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies'); +$TestFeed->setChannelElement('language', 'en-us'); +$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); + //Retriving informations from database $idtoname = Array(); $agenciesdb = $server->get_db('disclosr-agencies'); @@ -21,17 +22,18 @@ $idtoname[$row->id] = trim($row->value->name); } $foidocsdb = $server->get_db('disclosr-foidocuments'); -$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00', 50), true)->rows; +$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows; //print_r($rows); foreach ($rows as $row) { //Create an empty FeedItem $newItem = $TestFeed->createNewItem(); //Add elements to the feed item $newItem->setTitle($row->value->title); - $newItem->setLink("view.php?id=".$row->value->_id); - $newItem->setDate(date("c", strtotime($row->value->date))); - $newItem->setDescription(displayLogEntry($row,$idtoname)); - $newItem->addElement('guid', $row->value->_id,array('isPermaLink'=>'true')); + $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id); + $newItem->setDate(strtotime($row->value->date)); + $newItem->setDescription(displayLogEntry($row, $idtoname)); + $newItem->setAuthor($idtoname[$row->value->agencyID]); + $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true')); //Now add the feed item $TestFeed->addItem($newItem); } --- a/documents/scrape.py +++ b/documents/scrape.py @@ -8,186 +8,188 @@ import time import os import mimetypes -import re import urllib import urlparse def mkhash(input): - return hashlib.md5(input).hexdigest().encode("utf-8") + return hashlib.md5(input).hexdigest().encode("utf-8") def canonurl(url): - r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' - if the URL looks invalid. - >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws - 'http://xn--hgi.ws/' - """ - # strip spaces at the ends and ensure it's prefixed with 'scheme://' - url = url.strip() - if not url: - return '' - if not urlparse.urlsplit(url).scheme: - url = 'http://' + url - - # turn it into Unicode - #try: - # url = unicode(url, 'utf-8') - #except UnicodeDecodeError: - # return '' # bad UTF-8 chars in URL - - # parse the URL into its components - parsed = urlparse.urlsplit(url) - scheme, netloc, path, query, fragment = parsed - - # ensure scheme is a letter followed by letters, digits, and '+-.' chars - if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): - return '' - scheme = str(scheme) - - # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] - match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) - if not match: - return '' - domain, port = match.groups() - netloc = domain + (port if port else '') - netloc = netloc.encode('idna') - - # ensure path is valid and convert Unicode chars to %-encoded - if not path: - path = '/' # eg: 'http://google.com' -> 'http://google.com/' - path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') - - # ensure query is valid - query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') - - # ensure fragment is valid - fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) - - # piece it all back together, truncating it to a maximum of 4KB - url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) - return url[:4096] + r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' + if the URL looks invalid. + >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws + 'http://xn--hgi.ws/' + """ + # strip spaces at the ends and ensure it's prefixed with 'scheme://' + url = url.strip() + if not url: + return '' + if not urlparse.urlsplit(url).scheme: + url = 'http://' + url + + # turn it into Unicode + #try: + # url = unicode(url, 'utf-8') + #except UnicodeDecodeError: + # return '' # bad UTF-8 chars in URL + + # parse the URL into its components + parsed = urlparse.urlsplit(url) + scheme, netloc, path, query, fragment = parsed + + # ensure scheme is a letter followed by letters, digits, and '+-.' chars + if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): + return '' + scheme = str(scheme) + + # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] + match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) + if not match: + return '' + domain, port = match.groups() + netloc = domain + (port if port else '') + netloc = netloc.encode('idna') + + # ensure path is valid and convert Unicode chars to %-encoded + if not path: + path = '/' # eg: 'http://google.com' -> 'http://google.com/' + path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') + + # ensure query is valid + query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') + + # ensure fragment is valid + fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) + + # piece it all back together, truncating it to a maximum of 4KB + url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) + return url[:4096] def fullurl(url,href): - href = href.replace(" ","%20") - href = re.sub('#.*$','',href) - return urljoin(url,href) + href = href.replace(" ","%20") + href = re.sub('#.*$','',href) + return urljoin(url,href) #http://diveintopython.org/http_web_services/etags.html -class NotModifiedHandler(urllib2.BaseHandler): - def http_error_304(self, req, fp, code, message, headers): - addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) - addinfourl.code = code - return addinfourl +class NotModifiedHandler(urllib2.BaseHandler): + def http_error_304(self, req, fp, code, message, headers): + addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) + addinfourl.code = code + return addinfourl def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): - url = canonurl(url) - hash = mkhash(url) - req = urllib2.Request(url) - print "Fetching %s (%s)" % (url,hash) - if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": - print "Not a valid HTTP url" - return (None,None,None) - doc = docsdb.get(hash) - if doc == None: - doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} - else: - if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): - print "Uh oh, trying to scrape URL again too soon!" - last_attachment_fname = doc["_attachments"].keys()[-1] - last_attachment = docsdb.get_attachment(doc,last_attachment_fname) - content = last_attachment - return (doc['url'],doc['mime_type'],content) - if scrape_again == False: - print "Not scraping this URL again as requested" - return (None,None,None) - - time.sleep(3) # wait 3 seconds to give webserver time to recover - - req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") - #if there is a previous version stored in couchdb, load caching helper tags - if doc.has_key('etag'): - req.add_header("If-None-Match", doc['etag']) - if doc.has_key('last_modified'): - req.add_header("If-Modified-Since", doc['last_modified']) - - opener = urllib2.build_opener(NotModifiedHandler()) - try: - url_handle = opener.open(req) - doc['url'] = url_handle.geturl() # may have followed a redirect to a new url - headers = url_handle.info() # the addinfourls have the .info() too - doc['etag'] = headers.getheader("ETag") - doc['last_modified'] = headers.getheader("Last-Modified") - doc['date'] = headers.getheader("Date") - doc['page_scraped'] = time.time() - doc['web_server'] = headers.getheader("Server") - doc['via'] = headers.getheader("Via") - doc['powered_by'] = headers.getheader("X-Powered-By") - doc['file_size'] = headers.getheader("Content-Length") - content_type = headers.getheader("Content-Type") - if content_type != None: - doc['mime_type'] = content_type.split(";")[0] - else: - (type,encoding) = mimetypes.guess_type(url) - doc['mime_type'] = type - if hasattr(url_handle, 'code'): - if url_handle.code == 304: - print "the web page has not been modified" - return (None,None,None) - else: - content = url_handle.read() - docsdb.save(doc) - doc = docsdb.get(hash) # need to get a _rev - docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) - return (doc['url'], doc['mime_type'], content) - #store as attachment epoch-filename - - except urllib2.URLError as e: - error = "" - if hasattr(e, 'reason'): - error = "error %s in downloading %s" % (str(e.reason), url) - elif hasattr(e, 'code'): - error = "error %s in downloading %s" % (e.code, url) - print error - doc['error'] = error - docsdb.save(doc) - return (None,None,None) + url = canonurl(url) + hash = mkhash(url) + req = urllib2.Request(url) + print "Fetching %s (%s)" % (url,hash) + if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": + print "Not a valid HTTP url" + return (None,None,None) + doc = docsdb.get(hash) + if doc == None: + doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} + else: + if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): + print "Uh oh, trying to scrape URL again too soon!"+hash + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + content = last_attachment + return (doc['url'],doc['mime_type'],content) + if scrape_again == False: + print "Not scraping this URL again as requested" + return (None,None,None) + + req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") + #if there is a previous version stored in couchdb, load caching helper tags + if doc.has_key('etag'): + req.add_header("If-None-Match", doc['etag']) + if doc.has_key('last_modified'): + req.add_header("If-Modified-Since", doc['last_modified']) + + opener = urllib2.build_opener(NotModifiedHandler()) + try: + url_handle = opener.open(req) + doc['url'] = url_handle.geturl() # may have followed a redirect to a new url + headers = url_handle.info() # the addinfourls have the .info() too + doc['etag'] = headers.getheader("ETag") + doc['last_modified'] = headers.getheader("Last-Modified") + doc['date'] = headers.getheader("Date") + doc['page_scraped'] = time.time() + doc['web_server'] = headers.getheader("Server") + doc['via'] = headers.getheader("Via") + doc['powered_by'] = headers.getheader("X-Powered-By") + doc['file_size'] = headers.getheader("Content-Length") + content_type = headers.getheader("Content-Type") + if content_type != None: + doc['mime_type'] = content_type.split(";")[0] + else: + (type,encoding) = mimetypes.guess_type(url) + doc['mime_type'] = type + if hasattr(url_handle, 'code'): + if url_handle.code == 304: + print "the web page has not been modified"+hash + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + content = last_attachment + return (doc['url'],doc['mime_type'],content) + else: + print "new webpage loaded" + content = url_handle.read() + docsdb.save(doc) + doc = docsdb.get(hash) # need to get a _rev + docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) + return (doc['url'], doc['mime_type'], content) + #store as attachment epoch-filename + + except urllib2.URLError as e: + print "error!" + error = "" + if hasattr(e, 'reason'): + error = "error %s in downloading %s" % (str(e.reason), url) + elif hasattr(e, 'code'): + error = "error %s in downloading %s" % (e.code, url) + print error + doc['error'] = error + docsdb.save(doc) + return (None,None,None) def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): - (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) - badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] - if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": - if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": - # http://www.crummy.com/software/BeautifulSoup/documentation.html - soup = BeautifulSoup(content) - navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) - for nav in navIDs: - print "Removing element", nav['id'] - nav.extract() - navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) - for nav in navClasses: - print "Removing element", nav['class'] - nav.extract() - links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) - linkurls = set([]) - for link in links: - if link.has_key("href"): - if link['href'].startswith("http"): - # lets not do external links for now - # linkurls.add(link['href']) - None - if link['href'].startswith("mailto"): - # not http - None - if link['href'].startswith("javascript"): - # not http - None - else: - # remove anchors and spaces in urls - linkurls.add(fullurl(url,link['href'])) - for linkurl in linkurls: - #print linkurl - scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) + (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) + badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] + if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": + # http://www.crummy.com/software/BeautifulSoup/documentation.html + soup = BeautifulSoup(content) + navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) + for nav in navIDs: + print "Removing element", nav['id'] + nav.extract() + navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) + for nav in navClasses: + print "Removing element", nav['class'] + nav.extract() + links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) + linkurls = set([]) + for link in links: + if link.has_key("href"): + if link['href'].startswith("http"): + # lets not do external links for now + # linkurls.add(link['href']) + None + if link['href'].startswith("mailto"): + # not http + None + if link['href'].startswith("javascript"): + # not http + None + else: + # remove anchors and spaces in urls + linkurls.add(fullurl(url,link['href'])) + for linkurl in linkurls: + #print linkurl + scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) #couch = couchdb.Server('http://192.168.1.148:5984/') couch = couchdb.Server('http://127.0.0.1:5984/') @@ -196,20 +198,20 @@ docsdb = couch['disclosr-documents'] if __name__ == "__main__": - for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? - agency = agencydb.get(row.id) - print agency['name'] - for key in agency.keys(): - if key == "FOIDocumentsURL" and "status" not in agency.keys: - scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) - if key == 'website' and False: - scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) - agency['metadata']['lastScraped'] = time.time() - if key.endswith('URL') and False: - print key - depth = 1 - if 'scrapeDepth' in agency.keys(): - depth = agency['scrapeDepth'] - scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) - agencydb.save(agency) - + for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? + agency = agencydb.get(row.id) + print agency['name'] + for key in agency.keys(): + if key == "FOIDocumentsURL" and "status" not in agency.keys: + scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) + if key == 'website' and False: + scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) + agency['metadata']['lastScraped'] = time.time() + if key.endswith('URL') and False: + print key + depth = 1 + if 'scrapeDepth' in agency.keys(): + depth = agency['scrapeDepth'] + scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) + agencydb.save(agency) + --- /dev/null +++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.txt +++ /dev/null @@ -1,2 +1,1 @@ -pdf --- /dev/null +++ b/documents/scrapers/00a294de663db69062ca09aede7c0487.py @@ -1,1 +1,47 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import dateutil +from dateutil.parser import * +from datetime import * + +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + def getDate(self, content, entry, doc): + date = ''.join(entry.find('th').stripped_strings).strip() + (a, b, c) = date.partition("(") + date = self.remove_control_chars(a.replace("Octber", "October")) + print date + edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + print edate + doc.update({'date': edate}) + return + def getColumnCount(self): + return 4 + + def getTable(self, soup): + return soup.find(summary="List of Defence documents released under Freedom of Information requets") + + def getColumns(self, columns): + (id, description, access, notes) = columns + return (id, None, description, description, notes) + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + + nsi = ScraperImplementation() + nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201213.cfm" + nsi.doScrape() + + nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201112.cfm" + nsi.doScrape() + + nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201011.cfm" + nsi.doScrape() + + --- a/documents/scrapers/00a294de663db69062ca09aede7c0487.txt +++ /dev/null @@ -1,2 +1,1 @@ -multipage --- a/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.txt +++ /dev/null @@ -1,2 +1,1 @@ -ACMA style --- /dev/null +++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py @@ -1,1 +1,58 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import dateutil +from dateutil.parser import * +from datetime import * +import scrape +from bs4 import BeautifulSoup +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def __init__(self): + super(ScraperImplementation, self).__init__() + + def getDescription(self,content, entry,doc): + link = None + links = [] + description = "" + for atag in entry.find_all('a'): + if atag.has_key('href'): + link = scrape.fullurl(self.getURL(), atag['href']) + (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) + if htcontent != None: + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": + soup = BeautifulSoup(htcontent) + row = soup.find(id="content_div_148050") + description = ''.join(row.stripped_strings) + for atag in row.find_all("a"): + if atag.has_key('href'): + links.append(scrape.fullurl(link, atag['href'])) + + if links != []: + doc.update({'links': links}) + if description != "": + doc.update({ 'description': description}) + def getColumnCount(self): + return 4 + + def getColumns(self, columns): + (id, date, datepub, title) = columns + return (id, date, title, title, None) + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + + nsi = ScraperImplementation() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1" + nsi.doScrape() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2" + nsi.doScrape() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3" + nsi.doScrape() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4" + nsi.doScrape() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5" + nsi.doScrape() + --- a/documents/scrapers/1803322b27286950cab0c543168b5f21.txt +++ /dev/null @@ -1,2 +1,1 @@ -multipage log --- /dev/null +++ b/documents/scrapers/6afdde1d4ff1ad8d8cfe1a8675ea83bd.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/6afdde1d4ff1ad8d8cfe1a8675ea83bd.txt +++ /dev/null @@ -1,2 +1,1 @@ -PDF --- /dev/null +++ b/documents/scrapers/8317df630946937864d31a4728ad8ee8.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/8317df630946937864d31a4728ad8ee8.txt +++ /dev/null @@ -1,2 +1,1 @@ -pdf --- /dev/null +++ b/documents/scrapers/8796220032faf94501bd366763263685.py @@ -1,1 +1,37 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import dateutil +from dateutil.parser import * +from datetime import * + +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + def getColumnCount(self): + return 6 + + def getColumns(self, columns): + (id, date, title, description, datepub, notes) = columns + return (id, date, title, description, notes) + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + + nsi = ScraperImplementation() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/pmo/2011-12.cfm" + nsi.doScrape() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2011-12.cfm" + nsi.doScrape() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2012-13.cfm" + nsi.doScrape() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omsi/2011-12.cfm" + nsi.doScrape() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omps/2012-13.cfm" + nsi.doScrape() + --- a/documents/scrapers/8796220032faf94501bd366763263685.txt +++ /dev/null @@ -1,2 +1,1 @@ -multiple pages --- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py +++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py @@ -3,7 +3,7 @@ import genericScrapers import scrape from bs4 import BeautifulSoup -import codecs +import codecs #http://www.doughellmann.com/PyMOTW/abc/ class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getDescription(self,content, entry,doc): @@ -20,7 +20,7 @@ soup = BeautifulSoup(htcontent) for text in soup.find(id="divFullWidthColumn").stripped_strings: description = description + text.encode('ascii', 'ignore') - + for atag in soup.find(id="divFullWidthColumn").find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(link,atag['href'])) @@ -76,11 +76,10 @@ if __name__ == '__main__': print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) - #NewScraperImplementation().doScrape() + NewScraperImplementation().doScrape() print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) osi = OldScraperImplementation() osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" osi.doScrape() -# old site too --- /dev/null +++ b/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.py @@ -1,1 +1,35 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import dateutil +from dateutil.parser import * +from datetime import * + +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + def getColumnCount(self): + return 2 + + def getColumns(self, columns): + (date, title) = columns + return (title, date, title, title, None) + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + + nsi = ScraperImplementation() + nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2012.htm" + nsi.doScrape() + nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2011.htm" + nsi.doScrape() + nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2010.htm" + nsi.doScrape() + nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2009.htm" + nsi.doScrape() + --- a/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.txt +++ /dev/null @@ -1,2 +1,1 @@ -multipage immi --- /dev/null +++ b/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt +++ /dev/null @@ -1,3 +1,1 @@ -# pdf -http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf --- a/documents/template.inc.php +++ b/documents/template.inc.php @@ -1,164 +1,180 @@ - - - - - - - - - + header('X-UA-Compatible: IE=edge,chrome=1'); + ?> + + + + + + + + + - Australian Disclosure Logs<?php if ($title != "") echo " - $title";?> - + Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?> + - - - - - + + + + + - - - - + + + + - - - + + + - - - - - - - -
- -
-
+ + +