From: maxious Date: Mon, 10 Dec 2012 12:00:55 +0000 Subject: prod fixes X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=25a390fb11b5c08aac05de5d89358b64cf7f9830 --- prod fixes Former-commit-id: 130b8c05fff32afd5b4e3f8a9faadac5381bd456 --- --- a/couchdb/settee/src/classes/SetteeDatabase.class.php +++ b/couchdb/settee/src/classes/SetteeDatabase.class.php @@ -251,7 +251,7 @@ * * @return void */ - function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false) { + function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce=null) { $id = "_design/" . urlencode($design_doc); $view_name = urlencode($view_name); $id .= "/_view/$view_name"; @@ -269,6 +269,13 @@ if ($descending) { $data .= "&descending=true"; } + if ($reduce != null) { + if ($reduce == true) { + $data .= "&reduce=true"; + } else { + $data .= "&reduce=false"; + } + } if ($limit) { $data .= "&limit=".$limit; } @@ -281,9 +288,11 @@ } $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); + $full_uri = str_replace("%253Fgroup%253D","?group=",$full_uri); $full_uri = str_replace("%253Flimit%253D","?limit=",$full_uri); $ret = $this->rest_client->http_get($full_uri, $data); + //$ret['decoded'] = str_replace("?k","&k",$ret['decoded']); return $ret['decoded']; } --- a/documents/about.php +++ b/documents/about.php @@ -1,7 +1,7 @@

About

--- /dev/null +++ b/documents/agency.php @@ -1,1 +1,41 @@ +get_db('disclosr-agencies'); +$idtoname = Array(); +foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { + $idtoname[$row->id] = trim($row->value->name); +} +$foidocsdb = $server->get_db('disclosr-foidocuments'); + +include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency')); +$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); +?> +
Read all the information released by Australian Federal Government agencies under the FOI Act in one place!
+RSS Icon All Agencies RSS Feed
+get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; + foreach ($rows as $row) { + //print_r($rows); + echo displayLogEntry($row, $idtoname); + if (!isset($startkey)) + $startkey = $row->key; + $endkey = $row->key; + } + } else { + $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows; + if ($rows) { + foreach ($rows as $row) { + echo '' . $idtoname[$row->key] . " (" . $row->value . " records)
\n"; + } + } + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +echo "next page "; +include_footer_documents(); +?> --- a/documents/charts.php +++ b/documents/charts.php @@ -1,6 +1,6 @@ get_db('disclosr-agencies'); @@ -15,29 +15,28 @@

Charts

Lorem ipsum.

-
+
+
+
Read all the information released by Australian Federal Government agencies under the FOI Act in one place!
+RSS Icon All Agencies RSS Feed
+get_db('disclosr-agencies'); + +$idtoname = Array(); +foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { + $idtoname[$row->id] = trim($row->value->name); +} +$foidocsdb = $server->get_db('disclosr-foidocuments'); +try { + $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows; + if ($rows) { + foreach ($rows as $key => $row) { + echo displayLogEntry($row, $idtoname); + if (!isset($startkey)) $startkey = $row->key; + $endkey = $row->key; + } + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +echo "next page "; +*/ +include_footer_documents(); +?> + --- a/documents/disclogsList.php +++ b/documents/disclogsList.php @@ -1,7 +1,7 @@ --- /dev/null +++ b/documents/disclosr-documents.nja @@ -1,1 +1,7 @@ - +{ + "venv": "", + "project-type": "Import from sources", + "name": "disclosr-documents", + "license": "GNU General Public License v3", + "description": "" +} --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -13,11 +13,9 @@ from datetime import * import codecs +import difflib + from StringIO import StringIO - -from docx import * -from lxml import etree -import zipfile from pdfminer.pdfparser import PDFDocument, PDFParser from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf @@ -39,20 +37,45 @@ """ disclosr agency id """ if self.agencyID is None: self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") - return self.agencyID + return self.agencyID def getURL(self): """ disclog URL""" if self.disclogURL is None: agency = scrape.agencydb.get(self.getAgencyID()) self.disclogURL = agency['FOIDocumentsURL'] - return self.disclogURL + return self.disclogURL @abc.abstractmethod def doScrape(self): """ do the scraping """ return +class GenericHTMLDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + content = rcontent + dochash = scrape.mkhash(content) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" + last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) + if last_attach != None: + html_diff = difflib.HtmlDiff() + description = description + "\nChanges: " + description = description + html_diff.make_table(last_attach.read().split('\n'), + content.split('\n')) + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "date": edate, "title": "Disclosure Log Updated", "description": description} + foidocsdb.save(doc) + else: + print "already saved" class GenericPDFDisclogScraper(GenericDisclogScraper): @@ -62,14 +85,15 @@ self.getURL(), "foidocuments", self.getAgencyID()) laparams = LAParams() rsrcmgr = PDFResourceManager(caching=True) - outfp = StringIO.StringIO() + outfp = StringIO() device = TextConverter(rsrcmgr, outfp, codec='utf-8', laparams=laparams) - fp = StringIO.StringIO() + fp = StringIO() fp.write(content) - description = output.getvalue() + process_pdf(rsrcmgr, device, fp, set(), caching=True, check_extractable=True) + description = outfp.getvalue() fp.close() device.close() outfp.close() @@ -77,11 +101,10 @@ doc = foidocsdb.get(dochash) if doc is None: print "saving " + dochash - edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d") + edate = date.today().strftime("%Y-%m-%d") doc = {'_id': dochash, 'agencyID': self.getAgencyID() , 'url': self.getURL(), 'docID': dochash, - "date": edate, "title": "Disclosure Log Updated"} - self.getDescription(entry, entry, doc) + "date": edate, "title": "Disclosure Log Updated", "description": description} foidocsdb.save(doc) else: print "already saved" @@ -103,17 +126,16 @@ for paratext in paratextlist: newparatextlist.append(paratext.encode("utf-8")) ## Print our documnts test with two newlines under each paragraph - description = '\n\n'.join(newparatextlist) + description = '\n\n'.join(newparatextlist).strip(' \t\n\r') dochash = scrape.mkhash(description) doc = foidocsdb.get(dochash) if doc is None: print "saving " + dochash - edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d") + edate = time().strftime("%Y-%m-%d") doc = {'_id': dochash, 'agencyID': self.getAgencyID() , 'url': self.getURL(), 'docID': dochash, - "date": edate, "title": "Disclosure Log Updated"} - self.getDescription(entry, entry, doc) + "date": edate, "title": "Disclosure Log Updated", "description": description} foidocsdb.save(doc) else: print "already saved" @@ -201,10 +223,9 @@ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) if content is not None: - if mime_type is "text/html"\ - or mime_type is "application/xhtml+xml"\ - or mime_type is"application/xml": + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html + print "parsing" soup = BeautifulSoup(content) table = self.getTable(soup) for row in self.getRows(table): @@ -222,11 +243,11 @@ dochash = scrape.mkhash( self.remove_control_chars( url + (''.join(id.stripped_strings)))) - doc = foidocsdb.get(hash) + doc = foidocsdb.get(dochash) if doc is None: - print "saving " + hash - doc = {'_id': hash, + print "saving " + dochash + doc = {'_id': dochash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} --- a/documents/index.php +++ b/documents/index.php @@ -3,10 +3,11 @@ include('template.inc.php'); include_header_documents(""); include_once('../include/common.inc.php'); -$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99'); +$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); ?> +
Read all the information released by Australian Federal Government agencies under the FOI Act in one place!
+RSS Icon All Agencies RSS Feed
get_db('disclosr-agencies'); $idtoname = Array(); @@ -15,17 +16,18 @@ } $foidocsdb = $server->get_db('disclosr-foidocuments'); try { - $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows; + $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows; if ($rows) { foreach ($rows as $key => $row) { echo displayLogEntry($row, $idtoname); + if (!isset($startkey)) $startkey = $row->key; $endkey = $row->key; } } } catch (SetteeRestClientException $e) { setteErrorHandler($e); } -echo "next page"; +echo "next page "; include_footer_documents(); ?> --- a/documents/rss.xml.php +++ b/documents/rss.xml.php @@ -8,21 +8,28 @@ //Creating an instance of FeedWriter class. $TestFeed = new RSS2FeedWriter(); //Setting the channel elements -//Use wrapper functions for common channelelements -$TestFeed->setTitle('disclosurelo.gs Newest Entries - All'); -$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'); -$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies'); -$TestFeed->setChannelElement('language', 'en-us'); -$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); - -//Retriving informations from database +////Retriving informations from database $idtoname = Array(); $agenciesdb = $server->get_db('disclosr-agencies'); foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { $idtoname[$row->id] = trim($row->value->name); } $foidocsdb = $server->get_db('disclosr-foidocuments'); -$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows; +if (isset($_REQUEST['id'])) { + $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; + $title = $idtoname[$_REQUEST['id']]; +} else { + $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows; + $title = 'All Agencies'; +} +//Use wrapper functions for common channelelements +$TestFeed->setTitle('disclosurelo.gs Newest Entries - '.$title); +$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'.(isset($_REQUEST['id'])? '?id='.$_REQUEST['id'] : '')); +$TestFeed->setDescription('disclosurelo.gs Newest Entries - '.$title); +$TestFeed->setChannelElement('language', 'en-us'); +$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); + + //print_r($rows); foreach ($rows as $row) { //Create an empty FeedItem --- a/documents/runScrapers.sh +++ b/documents/runScrapers.sh @@ -1,3 +1,10 @@ -for f in scrapers/*.py; do echo "Processing $f file.."; python $f; done +for f in scrapers/*.py; + do echo "Processing $f file.."; + python $f; + if [ "$?" -ne "0" ]; then + echo "error"; + sleep 2; + fi +done --- a/documents/scrape.py +++ b/documents/scrape.py @@ -8,186 +8,198 @@ import time import os import mimetypes -import re import urllib import urlparse def mkhash(input): - return hashlib.md5(input).hexdigest().encode("utf-8") + return hashlib.md5(input).hexdigest().encode("utf-8") def canonurl(url): - r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' - if the URL looks invalid. - >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws - 'http://xn--hgi.ws/' - """ - # strip spaces at the ends and ensure it's prefixed with 'scheme://' - url = url.strip() - if not url: - return '' - if not urlparse.urlsplit(url).scheme: - url = 'http://' + url - - # turn it into Unicode - #try: - # url = unicode(url, 'utf-8') - #except UnicodeDecodeError: - # return '' # bad UTF-8 chars in URL - - # parse the URL into its components - parsed = urlparse.urlsplit(url) - scheme, netloc, path, query, fragment = parsed - - # ensure scheme is a letter followed by letters, digits, and '+-.' chars - if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): - return '' - scheme = str(scheme) - - # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] - match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) - if not match: - return '' - domain, port = match.groups() - netloc = domain + (port if port else '') - netloc = netloc.encode('idna') - - # ensure path is valid and convert Unicode chars to %-encoded - if not path: - path = '/' # eg: 'http://google.com' -> 'http://google.com/' - path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') - - # ensure query is valid - query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') - - # ensure fragment is valid - fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) - - # piece it all back together, truncating it to a maximum of 4KB - url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) - return url[:4096] + r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' + if the URL looks invalid. + >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws + 'http://xn--hgi.ws/' + """ + # strip spaces at the ends and ensure it's prefixed with 'scheme://' + url = url.strip() + if not url: + return '' + if not urlparse.urlsplit(url).scheme: + url = 'http://' + url + + # turn it into Unicode + #try: + # url = unicode(url, 'utf-8') + #except UnicodeDecodeError: + # return '' # bad UTF-8 chars in URL + + # parse the URL into its components + parsed = urlparse.urlsplit(url) + scheme, netloc, path, query, fragment = parsed + + # ensure scheme is a letter followed by letters, digits, and '+-.' chars + if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): + return '' + scheme = str(scheme) + + # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] + match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) + if not match: + return '' + domain, port = match.groups() + netloc = domain + (port if port else '') + netloc = netloc.encode('idna') + + # ensure path is valid and convert Unicode chars to %-encoded + if not path: + path = '/' # eg: 'http://google.com' -> 'http://google.com/' + path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') + + # ensure query is valid + query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') + + # ensure fragment is valid + fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) + + # piece it all back together, truncating it to a maximum of 4KB + url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) + return url[:4096] def fullurl(url,href): - href = href.replace(" ","%20") - href = re.sub('#.*$','',href) - return urljoin(url,href) + href = href.replace(" ","%20") + href = re.sub('#.*$','',href) + return urljoin(url,href) #http://diveintopython.org/http_web_services/etags.html -class NotModifiedHandler(urllib2.BaseHandler): - def http_error_304(self, req, fp, code, message, headers): - addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) - addinfourl.code = code - return addinfourl +class NotModifiedHandler(urllib2.BaseHandler): + def http_error_304(self, req, fp, code, message, headers): + addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) + addinfourl.code = code + return addinfourl + +def getLastAttachment(docsdb,url): + hash = mkhash(url) + doc = docsdb.get(hash) + if doc != None: + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + return last_attachment + else: + return None def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): - url = canonurl(url) - hash = mkhash(url) - req = urllib2.Request(url) - print "Fetching %s (%s)" % (url,hash) - if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": - print "Not a valid HTTP url" - return (None,None,None) - doc = docsdb.get(hash) - if doc == None: - doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} - else: - if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): - print "Uh oh, trying to scrape URL again too soon!" - last_attachment_fname = doc["_attachments"].keys()[-1] - last_attachment = docsdb.get_attachment(doc,last_attachment_fname) - content = last_attachment - return (doc['url'],doc['mime_type'],content) - if scrape_again == False: - print "Not scraping this URL again as requested" - return (None,None,None) - - time.sleep(3) # wait 3 seconds to give webserver time to recover - - req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") - #if there is a previous version stored in couchdb, load caching helper tags - if doc.has_key('etag'): - req.add_header("If-None-Match", doc['etag']) - if doc.has_key('last_modified'): - req.add_header("If-Modified-Since", doc['last_modified']) - - opener = urllib2.build_opener(NotModifiedHandler()) - try: - url_handle = opener.open(req) - doc['url'] = url_handle.geturl() # may have followed a redirect to a new url - headers = url_handle.info() # the addinfourls have the .info() too - doc['etag'] = headers.getheader("ETag") - doc['last_modified'] = headers.getheader("Last-Modified") - doc['date'] = headers.getheader("Date") - doc['page_scraped'] = time.time() - doc['web_server'] = headers.getheader("Server") - doc['via'] = headers.getheader("Via") - doc['powered_by'] = headers.getheader("X-Powered-By") - doc['file_size'] = headers.getheader("Content-Length") - content_type = headers.getheader("Content-Type") - if content_type != None: - doc['mime_type'] = content_type.split(";")[0] - else: - (type,encoding) = mimetypes.guess_type(url) - doc['mime_type'] = type - if hasattr(url_handle, 'code'): - if url_handle.code == 304: - print "the web page has not been modified" - return (None,None,None) - else: - content = url_handle.read() - docsdb.save(doc) - doc = docsdb.get(hash) # need to get a _rev - docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) - return (doc['url'], doc['mime_type'], content) - #store as attachment epoch-filename - - except urllib2.URLError as e: - error = "" - if hasattr(e, 'reason'): - error = "error %s in downloading %s" % (str(e.reason), url) - elif hasattr(e, 'code'): - error = "error %s in downloading %s" % (e.code, url) - print error - doc['error'] = error - docsdb.save(doc) - return (None,None,None) + url = canonurl(url) + hash = mkhash(url) + req = urllib2.Request(url) + print "Fetching %s (%s)" % (url,hash) + if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": + print "Not a valid HTTP url" + return (None,None,None) + doc = docsdb.get(hash) + if doc == None: + doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} + else: + if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): + print "Uh oh, trying to scrape URL again too soon!"+hash + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + content = last_attachment + return (doc['url'],doc['mime_type'],content.read()) + if scrape_again == False: + print "Not scraping this URL again as requested" + return (doc['url'],doc['mime_type'],content.read()) + + req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") + #if there is a previous version stored in couchdb, load caching helper tags + if doc.has_key('etag'): + req.add_header("If-None-Match", doc['etag']) + if doc.has_key('last_modified'): + req.add_header("If-Modified-Since", doc['last_modified']) + + opener = urllib2.build_opener(NotModifiedHandler()) + try: + url_handle = opener.open(req) + doc['url'] = url_handle.geturl() # may have followed a redirect to a new url + headers = url_handle.info() # the addinfourls have the .info() too + doc['etag'] = headers.getheader("ETag") + doc['last_modified'] = headers.getheader("Last-Modified") + doc['date'] = headers.getheader("Date") + doc['page_scraped'] = time.time() + doc['web_server'] = headers.getheader("Server") + doc['via'] = headers.getheader("Via") + doc['powered_by'] = headers.getheader("X-Powered-By") + doc['file_size'] = headers.getheader("Content-Length") + content_type = headers.getheader("Content-Type") + if content_type != None: + doc['mime_type'] = content_type.split(";")[0] + else: + (type,encoding) = mimetypes.guess_type(url) + doc['mime_type'] = type + if hasattr(url_handle, 'code'): + if url_handle.code == 304: + print "the web page has not been modified"+hash + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + content = last_attachment + return (doc['url'],doc['mime_type'],content.read()) + else: + print "new webpage loaded" + content = url_handle.read() + docsdb.save(doc) + doc = docsdb.get(hash) # need to get a _rev + docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) + return (doc['url'], doc['mime_type'], content) + #store as attachment epoch-filename + + except urllib2.URLError as e: + print "error!" + error = "" + if hasattr(e, 'reason'): + error = "error %s in downloading %s" % (str(e.reason), url) + elif hasattr(e, 'code'): + error = "error %s in downloading %s" % (e.code, url) + print error + doc['error'] = error + docsdb.save(doc) + return (None,None,None) def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): - (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) - badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] - if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": - if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": - # http://www.crummy.com/software/BeautifulSoup/documentation.html - soup = BeautifulSoup(content) - navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) - for nav in navIDs: - print "Removing element", nav['id'] - nav.extract() - navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) - for nav in navClasses: - print "Removing element", nav['class'] - nav.extract() - links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) - linkurls = set([]) - for link in links: - if link.has_key("href"): - if link['href'].startswith("http"): - # lets not do external links for now - # linkurls.add(link['href']) - None - if link['href'].startswith("mailto"): - # not http - None - if link['href'].startswith("javascript"): - # not http - None - else: - # remove anchors and spaces in urls - linkurls.add(fullurl(url,link['href'])) - for linkurl in linkurls: - #print linkurl - scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) + (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) + badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] + if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": + # http://www.crummy.com/software/BeautifulSoup/documentation.html + soup = BeautifulSoup(content) + navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) + for nav in navIDs: + print "Removing element", nav['id'] + nav.extract() + navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) + for nav in navClasses: + print "Removing element", nav['class'] + nav.extract() + links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) + linkurls = set([]) + for link in links: + if link.has_key("href"): + if link['href'].startswith("http"): + # lets not do external links for now + # linkurls.add(link['href']) + None + if link['href'].startswith("mailto"): + # not http + None + if link['href'].startswith("javascript"): + # not http + None + else: + # remove anchors and spaces in urls + linkurls.add(fullurl(url,link['href'])) + for linkurl in linkurls: + #print linkurl + scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) #couch = couchdb.Server('http://192.168.1.148:5984/') couch = couchdb.Server('http://127.0.0.1:5984/') @@ -196,20 +208,20 @@ docsdb = couch['disclosr-documents'] if __name__ == "__main__": - for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? - agency = agencydb.get(row.id) - print agency['name'] - for key in agency.keys(): - if key == "FOIDocumentsURL" and "status" not in agency.keys: - scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) - if key == 'website' and False: - scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) - agency['metadata']['lastScraped'] = time.time() - if key.endswith('URL') and False: - print key - depth = 1 - if 'scrapeDepth' in agency.keys(): - depth = agency['scrapeDepth'] - scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) - agencydb.save(agency) - + for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? + agency = agencydb.get(row.id) + print agency['name'] + for key in agency.keys(): + if key == "FOIDocumentsURL" and "status" not in agency.keys: + scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) + if key == 'website' and False: + scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) + agency['metadata']['lastScraped'] = time.time() + if key.endswith('URL') and False: + print key + depth = 1 + if 'scrapeDepth' in agency.keys(): + depth = agency['scrapeDepth'] + scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) + agencydb.save(agency) + --- /dev/null +++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericPDFDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericPDFDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.txt +++ /dev/null @@ -1,2 +1,1 @@ -pdf --- /dev/null +++ b/documents/scrapers/00a294de663db69062ca09aede7c0487.py @@ -1,1 +1,47 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import dateutil +from dateutil.parser import * +from datetime import * + +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + def getDate(self, content, entry, doc): + date = ''.join(entry.find('th').stripped_strings).strip() + (a, b, c) = date.partition("(") + date = self.remove_control_chars(a.replace("Octber", "October")) + print date + edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + print edate + doc.update({'date': edate}) + return + def getColumnCount(self): + return 4 + + def getTable(self, soup): + return soup.find(summary="List of Defence documents released under Freedom of Information requets") + + def getColumns(self, columns): + (id, description, access, notes) = columns + return (id, None, description, description, notes) + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + + nsi = ScraperImplementation() + nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201213.cfm" + nsi.doScrape() + + nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201112.cfm" + nsi.doScrape() + + nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201011.cfm" + nsi.doScrape() + + --- a/documents/scrapers/00a294de663db69062ca09aede7c0487.txt +++ /dev/null @@ -1,2 +1,1 @@ -multipage --- /dev/null +++ b/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.txt +++ /dev/null @@ -1,2 +1,1 @@ -ACMA style --- /dev/null +++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py @@ -1,1 +1,58 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import dateutil +from dateutil.parser import * +from datetime import * +import scrape +from bs4 import BeautifulSoup +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def __init__(self): + super(ScraperImplementation, self).__init__() + + def getDescription(self,content, entry,doc): + link = None + links = [] + description = "" + for atag in entry.find_all('a'): + if atag.has_key('href'): + link = scrape.fullurl(self.getURL(), atag['href']) + (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) + if htcontent != None: + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": + soup = BeautifulSoup(htcontent) + row = soup.find(id="content_div_148050") + description = ''.join(row.stripped_strings) + for atag in row.find_all("a"): + if atag.has_key('href'): + links.append(scrape.fullurl(link, atag['href'])) + + if links != []: + doc.update({'links': links}) + if description != "": + doc.update({ 'description': description}) + def getColumnCount(self): + return 4 + + def getColumns(self, columns): + (id, date, datepub, title) = columns + return (id, date, title, title, None) + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + + nsi = ScraperImplementation() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1" + nsi.doScrape() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2" + nsi.doScrape() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3" + nsi.doScrape() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4" + nsi.doScrape() + nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5" + nsi.doScrape() + --- a/documents/scrapers/1803322b27286950cab0c543168b5f21.txt +++ /dev/null @@ -1,2 +1,1 @@ -multipage log --- /dev/null +++ b/documents/scrapers/31685505438d393f45a90f442b8fa27f.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericPDFDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericPDFDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/31685505438d393f45a90f442b8fa27f.txt +++ /dev/null @@ -1,2 +1,1 @@ -pdf --- /dev/null +++ b/documents/scrapers/3e2f110af49d62833a835bd257771ffb.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/3e2f110af49d62833a835bd257771ffb.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/4d2af2dcc72f1703bbf04b13b03720a8.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- a/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt +++ b/documents/scrapers/50601505ef69483121a6d130bb0515e4.txt @@ -1,1 +1,1 @@ -apsc has ACMA style disclog +ACMA style --- /dev/null +++ b/documents/scrapers/525c3953187da08cd702359b2fc2997f.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/525c3953187da08cd702359b2fc2997f.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/627f116dfe42c9f27ad6747be0aa44e2.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/6afdde1d4ff1ad8d8cfe1a8675ea83bd.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/6afdde1d4ff1ad8d8cfe1a8675ea83bd.txt +++ /dev/null @@ -1,2 +1,1 @@ -PDF --- /dev/null +++ b/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/795e7a8afb39a420360aa207b0cb1306.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/7b39ce7f362a0af9a711eaf223943eea.txt +++ /dev/null @@ -1,2 +1,1 @@ -no disclog --- /dev/null +++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py @@ -1,1 +1,51 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import scrape +from datetime import date +from pyquery import PyQuery as pq +from lxml import etree +import urllib +import dateutil +from dateutil.parser import * +class ACMADisclogScraper(genericScrapers.GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + + d = pq(content) + d.make_links_absolute(base_url = self.getURL()) + for table in d('table').items(): + title= table('thead').text() + print title + (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text()) + links = table('a').map(lambda i, e: pq(e).attr('href')) + description = descA+" "+descB + edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + print edate + dochash = scrape.mkhash(self.remove_control_chars(title)) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "links": links, + "date": edate, "notes": notes, "title": title, "description": description} + #print doc + foidocsdb.save(doc) + else: + print "already saved" + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ACMADisclogScraper, + genericScrapers.GenericDisclogScraper) + print 'Instance:', isinstance(ACMADisclogScraper(), + genericScrapers.GenericDisclogScraper) + ACMADisclogScraper().doScrape() + --- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.txt +++ /dev/null @@ -1,1 +1,1 @@ -acma style + --- /dev/null +++ b/documents/scrapers/8317df630946937864d31a4728ad8ee8.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/8317df630946937864d31a4728ad8ee8.txt +++ /dev/null @@ -1,2 +1,1 @@ -pdf --- /dev/null +++ b/documents/scrapers/8796220032faf94501bd366763263685.py @@ -1,1 +1,37 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import dateutil +from dateutil.parser import * +from datetime import * + +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + def getColumnCount(self): + return 6 + + def getColumns(self, columns): + (id, date, title, description, datepub, notes) = columns + return (id, date, title, description, notes) + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + + nsi = ScraperImplementation() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/pmo/2011-12.cfm" + nsi.doScrape() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2011-12.cfm" + nsi.doScrape() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2012-13.cfm" + nsi.doScrape() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omsi/2011-12.cfm" + nsi.doScrape() + nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omps/2012-13.cfm" + nsi.doScrape() + --- a/documents/scrapers/8796220032faf94501bd366763263685.txt +++ /dev/null @@ -1,2 +1,1 @@ -multiple pages --- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py +++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py @@ -3,7 +3,7 @@ import genericScrapers import scrape from bs4 import BeautifulSoup -import codecs +import codecs #http://www.doughellmann.com/PyMOTW/abc/ class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getDescription(self,content, entry,doc): @@ -20,7 +20,7 @@ soup = BeautifulSoup(htcontent) for text in soup.find(id="divFullWidthColumn").stripped_strings: description = description + text.encode('ascii', 'ignore') - + for atag in soup.find(id="divFullWidthColumn").find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(link,atag['href'])) @@ -76,11 +76,10 @@ if __name__ == '__main__': print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) - #NewScraperImplementation().doScrape() + NewScraperImplementation().doScrape() print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) osi = OldScraperImplementation() osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" osi.doScrape() -# old site too --- /dev/null +++ b/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/9f4815bfdcb918a036e4bb43a30f8d77.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.py @@ -1,1 +1,35 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import dateutil +from dateutil.parser import * +from datetime import * + +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + def getColumnCount(self): + return 2 + + def getColumns(self, columns): + (date, title) = columns + return (title, date, title, title, None) + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + + nsi = ScraperImplementation() + nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2012.htm" + nsi.doScrape() + nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2011.htm" + nsi.doScrape() + nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2010.htm" + nsi.doScrape() + nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2009.htm" + nsi.doScrape() + --- a/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.txt +++ /dev/null @@ -1,2 +1,1 @@ -multipage immi --- /dev/null +++ b/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/bf6e587f166040b63681cd2ff76fbfdf.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- a/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt +++ b/documents/scrapers/c1302c8d7cbbd911f0d4d8a4128f8079.txt @@ -1,1 +1,1 @@ -uses RET disclog +parent --- /dev/null +++ b/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt +++ /dev/null @@ -1,3 +1,1 @@ -# pdf -http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf --- /dev/null +++ b/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/cb7f40e3495b682de6eee61bf09c1cfc.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/d72744fb1e5d6e87af9a5ea16cc27fa5.py @@ -1,1 +1,50 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import scrape +from datetime import date +from pyquery import PyQuery as pq +from lxml import etree +import urllib +import dateutil +from dateutil.parser import * +class ACMADisclogScraper(genericScrapers.GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + + d = pq(content) + d.make_links_absolute(base_url = self.getURL()) + for item in d('.item-list').items(): + title= item('h3').text() + print title + links = item('a').map(lambda i, e: pq(e).attr('href')) + description = title= item('ul').text() + edate = date.today().strftime("%Y-%m-%d") + print edate + dochash = scrape.mkhash(self.remove_control_chars(title)) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "links": links, + "date": edate, "title": title, "description": description} + #print doc + foidocsdb.save(doc) + else: + print "already saved" + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ACMADisclogScraper, + genericScrapers.GenericDisclogScraper) + print 'Instance:', isinstance(ACMADisclogScraper(), + genericScrapers.GenericDisclogScraper) + ACMADisclogScraper().doScrape() + --- /dev/null +++ b/documents/scrapers/e770921522a49dc77de208cc724ce134.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/e770921522a49dc77de208cc724ce134.txt +++ /dev/null @@ -1,1 +1,1 @@ -no disclog + --- /dev/null +++ b/documents/scrapers/f189459fc43f941e0d4ecfba52c666f3.py @@ -1,1 +1,19 @@ +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers + +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): + + def __init__(self): + super(ScraperImplementation, self).__init__() + + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, + genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), + genericScrapers.GenericHTMLDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/sc