[submodule "couchdb/couchdb-lucene"] | [submodule "couchdb/couchdb-lucene"] |
path = couchdb/couchdb-lucene | path = couchdb/couchdb-lucene |
url = https://github.com/rnewson/couchdb-lucene.git | url = https://github.com/rnewson/couchdb-lucene.git |
[submodule "couchdb/settee"] | [submodule "couchdb/settee"] |
path = couchdb/settee | path = couchdb/settee |
url = https://github.com/inadarei/settee.git | url = https://github.com/inadarei/settee.git |
[submodule "lib/php-diff"] | [submodule "lib/php-diff"] |
path = lib/php-diff | path = lib/php-diff |
url = https://github.com/chrisboulton/php-diff.git | url = https://github.com/chrisboulton/php-diff.git |
[submodule "lib/Requests"] | [submodule "lib/Requests"] |
path = lib/Requests | path = lib/Requests |
url = https://github.com/rmccue/Requests.git | url = https://github.com/rmccue/Requests.git |
[submodule "js/flotr2"] | [submodule "js/flotr2"] |
path = js/flotr2 | path = js/flotr2 |
url = https://github.com/HumbleSoftware/Flotr2.git | url = https://github.com/HumbleSoftware/Flotr2.git |
[submodule "lib/phpquery"] | [submodule "lib/phpquery"] |
path = lib/phpquery | path = lib/phpquery |
url = https://github.com/TobiaszCudnik/phpquery.git | url = https://github.com/TobiaszCudnik/phpquery.git |
[submodule "js/sigma"] | [submodule "js/sigma"] |
path = js/sigma | path = js/sigma |
url = https://github.com/jacomyal/sigma.js.git | url = https://github.com/jacomyal/sigma.js.git |
[submodule "js/bubbletree"] | [submodule "js/bubbletree"] |
path = js/bubbletree | path = js/bubbletree |
url = https://github.com/okfn/bubbletree.git | url = https://github.com/okfn/bubbletree.git |
[submodule "lib/querypath"] | [submodule "lib/querypath"] |
path = lib/querypath | path = lib/querypath |
url = https://github.com/technosophos/querypath.git | url = https://github.com/technosophos/querypath.git |
[submodule "lib/amon-php"] | [submodule "lib/amon-php"] |
path = lib/amon-php | path = lib/amon-php |
url = https://github.com/martinrusev/amon-php.git | url = https://github.com/martinrusev/amon-php.git |
[submodule "documents/lib/parsedatetime"] | |
path = documents/lib/parsedatetime | |
url = git://github.com/bear/parsedatetime.git | |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape | import scrape |
from bs4 import BeautifulSoup | |
import parsedatetime as pdt | |
from time import mktime | |
from datetime import datetime | |
import feedparser | |
import abc | |
from bs4 import BeautifulSoup | class GenericRSSDisclogScraper(object): |
import abc | __metaclass__ = abc.ABCMeta |
import dateutil.parser | @abc.abstractmethod |
def getAgencyID(self): | |
""" disclosr agency id """ | |
return | |
@abc.abstractmethod | |
def getURL(self): | |
""" disclog URL""" | |
return | |
def doScrape(self): | |
foidocsdb = scrape.couch['disclosr-foidocuments'] | |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) | |
feed = feedparser.parse(content) | |
for entry in feed.entries: | |
#print entry | |
print entry.id | |
hash = scrape.mkhash(entry.link) | |
doc = foidocsdb.get(hash) | |
if doc == None: | |
print "saving" | |
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") | |
doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, | |
"date": edate, "description": entry.summary,"title": entry.title} | |
foidocsdb.save(doc) | |
else: | |
print "already saved" | |
class GenericOAICDisclogScraper(object): | class GenericOAICDisclogScraper(object): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
@abc.abstractmethod | @abc.abstractmethod |
def getAgencyID(self): | def getAgencyID(self): |
""" disclosr agency id """ | """ disclosr agency id """ |
return | return |
@abc.abstractmethod | @abc.abstractmethod |
def getURL(self): | def getURL(self): |
""" disclog URL""" | """ disclog URL""" |
return | return |
@abc.abstractmethod | @abc.abstractmethod |
def getColumns(self,columns): | def getColumns(self,columns): |
""" rearranges columns if required """ | """ rearranges columns if required """ |
return | return |
def doScrape(self): | def doScrape(self): |
cal = pdt.Calendar() | |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) | (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
if content != None: | if content != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
for row in soup.table.find_all('tr'): | for row in soup.table.find_all('tr'): |
columns = row.find_all('td') | columns = row.find_all('td') |
if len(columns) == 5: | if len(columns) == 5: |
(id, date, description, title, notes) = self.getColumns(columns) | (id, date, description, title, notes) = self.getColumns(columns) |
print id.string | print id.string |
hash = scrape.mkhash(url+id.string) | hash = scrape.mkhash(url+id.string) |
links = [] | links = [] |
for atag in row.find_all("a"): | for atag in row.find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(url,atag['href'])) | links.append(scrape.fullurl(url,atag['href'])) |
doc = foidocsdb.get(hash) | doc = foidocsdb.get(hash) |
descriptiontxt = "" | descriptiontxt = "" |
for string in description.stripped_strings: | for string in description.stripped_strings: |
descriptiontxt = descriptiontxt + string | descriptiontxt = descriptiontxt + " \n" + string |
if doc == None: | if doc == None: |
print "saving" | print "saving" |
edate = dateutil.parser.parse(date.string).date().strftime("%Y-%m-%d") | dtresult = cal.parseDateText(date.string) |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, | if len(dtresult) == 2: |
(dtdate,dtr) = dtresult | |
print dtdate | |
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) | |
else: | |
edate = "" | |
doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, | |
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} | "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
elif len(row.find_all('th')) == 5: | elif len(row.find_all('th')) == 5: |
print "header row" | print "header row" |
else: | else: |
print "ERROR number of columns incorrect" | print "ERROR number of columns incorrect" |
print row | print row |
<?php | <?php |
include('template.inc.php'); | include('template.inc.php'); |
include_header_documents(""); | include_header_documents(""); |
include_once('../include/common.inc.php'); | include_once('../include/common.inc.php'); |
?> | ?> |
<?php | <?php |
$agenciesdb = $server->get_db('disclosr-agencies'); | $agenciesdb = $server->get_db('disclosr-agencies'); |
$idtoname = Array(); | $idtoname = Array(); |
foreach ($agenciesdb->get_view("app", "byName")->rows as $row) { | foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { |
$idtoname[$row->value] = trim($row->key); | $idtoname[$row->id] = trim($row->value->name); |
} | } |
$foidocsdb = $server->get_db('disclosr-foidocuments'); | $foidocsdb = $server->get_db('disclosr-foidocuments'); |
try { | try { |
$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00'), true)->rows; | $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00'), true)->rows; |
if ($rows) { | if ($rows) { |
foreach ($rows as $row) { | foreach ($rows as $row) { |
//print_r($row); | //print_r($row); |
displayLogEntry($row,$idtoname); | displayLogEntry($row,$idtoname); |
/* 1/1/11 title (Dept dfggdfgdf) | /* 1/1/11 title (Dept dfggdfgdf) |
description: | description: |
source link: | source link: |
documents: | documents: |
#1 title link */ | #1 title link */ |
} | } |
} | } |
} catch (SetteeRestClientException $e) { | } catch (SetteeRestClientException $e) { |
setteErrorHandler($e); | setteErrorHandler($e); |
} | } |
include_footer_documents(); | include_footer_documents(); |
?> | ?> |
#http://packages.python.org/CouchDB/client.html | #http://packages.python.org/CouchDB/client.html |
import couchdb | import couchdb |
import urllib2 | import urllib2 |
from BeautifulSoup import BeautifulSoup | from BeautifulSoup import BeautifulSoup |
import re | import re |
import hashlib | import hashlib |
from urlparse import urljoin | from urlparse import urljoin |
import time | import time |
import os | import os |
import mimetypes | import mimetypes |
import re | import re |
import urllib | import urllib |
import urlparse | import urlparse |
def mkhash(input): | def mkhash(input): |
return hashlib.md5(input).hexdigest().encode("utf-8") | return hashlib.md5(input).hexdigest().encode("utf-8") |
def canonurl(url): | def canonurl(url): |
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' | r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' |
if the URL looks invalid. | if the URL looks invalid. |
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws | >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws |
'http://xn--hgi.ws/' | 'http://xn--hgi.ws/' |
""" | """ |
# strip spaces at the ends and ensure it's prefixed with 'scheme://' | # strip spaces at the ends and ensure it's prefixed with 'scheme://' |
url = url.strip() | url = url.strip() |
if not url: | if not url: |
return '' | return '' |
if not urlparse.urlsplit(url).scheme: | if not urlparse.urlsplit(url).scheme: |
url = 'http://' + url | url = 'http://' + url |
# turn it into Unicode | # turn it into Unicode |
#try: | #try: |
# url = unicode(url, 'utf-8') | # url = unicode(url, 'utf-8') |
#except UnicodeDecodeError: | #except UnicodeDecodeError: |
# return '' # bad UTF-8 chars in URL | # return '' # bad UTF-8 chars in URL |
# parse the URL into its components | # parse the URL into its components |
parsed = urlparse.urlsplit(url) | parsed = urlparse.urlsplit(url) |
scheme, netloc, path, query, fragment = parsed | scheme, netloc, path, query, fragment = parsed |
# ensure scheme is a letter followed by letters, digits, and '+-.' chars | # ensure scheme is a letter followed by letters, digits, and '+-.' chars |
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): | if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): |
return '' | return '' |
scheme = str(scheme) | scheme = str(scheme) |
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] | # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] |
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) | match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) |
if not match: | if not match: |
return '' | return '' |
domain, port = match.groups() | domain, port = match.groups() |
netloc = domain + (port if port else '') | netloc = domain + (port if port else '') |
netloc = netloc.encode('idna') | netloc = netloc.encode('idna') |
# ensure path is valid and convert Unicode chars to %-encoded | # ensure path is valid and convert Unicode chars to %-encoded |
if not path: | if not path: |
path = '/' # eg: 'http://google.com' -> 'http://google.com/' | path = '/' # eg: 'http://google.com' -> 'http://google.com/' |
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') | path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') |
# ensure query is valid | # ensure query is valid |
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') | query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') |
# ensure fragment is valid | # ensure fragment is valid |
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) | fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) |
# piece it all back together, truncating it to a maximum of 4KB | # piece it all back together, truncating it to a maximum of 4KB |
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) | url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) |
return url[:4096] | return url[:4096] |
def fullurl(url,href): | def fullurl(url,href): |
href = href.replace(" ","%20") | href = href.replace(" ","%20") |
href = re.sub('#.*$','',href) | href = re.sub('#.*$','',href) |
return urljoin(url,href) | return urljoin(url,href) |
#http://diveintopython.org/http_web_services/etags.html | #http://diveintopython.org/http_web_services/etags.html |
class NotModifiedHandler(urllib2.BaseHandler): | class NotModifiedHandler(urllib2.BaseHandler): |
def http_error_304(self, req, fp, code, message, headers): | def http_error_304(self, req, fp, code, message, headers): |
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) | addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
addinfourl.code = code | addinfourl.code = code |
return addinfourl | return addinfourl |
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): | def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): |
url = canonurl(url) | url = canonurl(url) |
hash = mkhash(url) | hash = mkhash(url) |
req = urllib2.Request(url) | req = urllib2.Request(url) |
print "Fetching %s" % url | print "Fetching %s" % url |
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": | if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": |
print "Not a valid HTTP url" | print "Not a valid HTTP url" |
return (None,None,None) | return (None,None,None) |
doc = docsdb.get(hash) | doc = docsdb.get(hash) |
if doc == None: | if doc == None: |
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} | doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} |
else: | else: |
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): | if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): |
print "Uh oh, trying to scrape URL again too soon!" | print "Uh oh, trying to scrape URL again too soon!" |
last_attachment_fname = doc["_attachments"].keys()[-1] | last_attachment_fname = doc["_attachments"].keys()[-1] |
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) | last_attachment = docsdb.get_attachment(doc,last_attachment_fname) |
return (doc['url'],doc['mime_type'],last_attachment.read()) | return (doc['url'],doc['mime_type'],last_attachment.read()) |
if scrape_again == False: | if scrape_again == False: |
print "Not scraping this URL again as requested" | print "Not scraping this URL again as requested" |
return (None,None,None) | return (None,None,None) |
time.sleep(3) # wait 3 seconds to give webserver time to recover | time.sleep(3) # wait 3 seconds to give webserver time to recover |
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") | req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") |
#if there is a previous version stored in couchdb, load caching helper tags | #if there is a previous version stored in couchdb, load caching helper tags |
if doc.has_key('etag'): | if doc.has_key('etag'): |
req.add_header("If-None-Match", doc['etag']) | req.add_header("If-None-Match", doc['etag']) |
if doc.has_key('last_modified'): | if doc.has_key('last_modified'): |
req.add_header("If-Modified-Since", doc['last_modified']) | req.add_header("If-Modified-Since", doc['last_modified']) |
opener = urllib2.build_opener(NotModifiedHandler()) | opener = urllib2.build_opener(NotModifiedHandler()) |
try: | try: |
url_handle = opener.open(req) | url_handle = opener.open(req) |
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url | doc['url'] = url_handle.geturl() # may have followed a redirect to a new url |
headers = url_handle.info() # the addinfourls have the .info() too | headers = url_handle.info() # the addinfourls have the .info() too |
doc['etag'] = headers.getheader("ETag") | doc['etag'] = headers.getheader("ETag") |
doc['last_modified'] = headers.getheader("Last-Modified") | doc['last_modified'] = headers.getheader("Last-Modified") |
doc['date'] = headers.getheader("Date") | doc['date'] = headers.getheader("Date") |
doc['page_scraped'] = time.time() | doc['page_scraped'] = time.time() |
doc['web_server'] = headers.getheader("Server") | doc['web_server'] = headers.getheader("Server") |
doc['via'] = headers.getheader("Via") | doc['via'] = headers.getheader("Via") |
doc['powered_by'] = headers.getheader("X-Powered-By") | doc['powered_by'] = headers.getheader("X-Powered-By") |
doc['file_size'] = headers.getheader("Content-Length") | doc['file_size'] = headers.getheader("Content-Length") |
content_type = headers.getheader("Content-Type") | content_type = headers.getheader("Content-Type") |
if content_type != None: | if content_type != None: |
doc['mime_type'] = content_type.split(";")[0] | doc['mime_type'] = content_type.split(";")[0] |
else: | else: |
(type,encoding) = mimetypes.guess_type(url) | (type,encoding) = mimetypes.guess_type(url) |
doc['mime_type'] = type | doc['mime_type'] = type |
if hasattr(url_handle, 'code'): | if hasattr(url_handle, 'code'): |
if url_handle.code == 304: | if url_handle.code == 304: |
print "the web page has not been modified" | print "the web page has not been modified" |
return (None,None,None) | return (None,None,None) |
else: | else: |
content = url_handle.read() | content = url_handle.read() |
docsdb.save(doc) | docsdb.save(doc) |
doc = docsdb.get(hash) # need to get a _rev | doc = docsdb.get(hash) # need to get a _rev |
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) | docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) |
return (doc['url'], doc['mime_type'], content) | return (doc['url'], doc['mime_type'], content) |
#store as attachment epoch-filename | #store as attachment epoch-filename |
except urllib2.URLError as e: | except urllib2.URLError as e: |
error = "" | error = "" |
if hasattr(e, 'reason'): | if hasattr(e, 'reason'): |
error = "error %s in downloading %s" % (str(e.reason), url) | error = "error %s in downloading %s" % (str(e.reason), url) |
elif hasattr(e, 'code'): | elif hasattr(e, 'code'): |
error = "error %s in downloading %s" % (e.code, url) | error = "error %s in downloading %s" % (e.code, url) |
print error | print error |
doc['error'] = error | doc['error'] = error |
docsdb.save(doc) | docsdb.save(doc) |
return (None,None,None) | return (None,None,None) |
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): | def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): |
(url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) | (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) |
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] | badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] |
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": | if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) | navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) |
for nav in navIDs: | for nav in navIDs: |
print "Removing element", nav['id'] | print "Removing element", nav['id'] |
nav.extract() | nav.extract() |
navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) | navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) |
for nav in navClasses: | for nav in navClasses: |
print "Removing element", nav['class'] | print "Removing element", nav['class'] |
nav.extract() | nav.extract() |
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) | links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) |
linkurls = set([]) | linkurls = set([]) |
for link in links: | for link in links: |
if link.has_key("href"): | if link.has_key("href"): |
if link['href'].startswith("http"): | if link['href'].startswith("http"): |
# lets not do external links for now | # lets not do external links for now |
# linkurls.add(link['href']) | # linkurls.add(link['href']) |
None | None |
if link['href'].startswith("mailto"): | if link['href'].startswith("mailto"): |
# not http | # not http |
None | None |
if link['href'].startswith("javascript"): | if link['href'].startswith("javascript"): |
# not http | # not http |
None | None |
else: | else: |
# remove anchors and spaces in urls | # remove anchors and spaces in urls |
linkurls.add(fullurl(url,link['href'])) | linkurls.add(fullurl(url,link['href'])) |
for linkurl in linkurls: | for linkurl in linkurls: |
#print linkurl | #print linkurl |
scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) | scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) |
#couch = couchdb.Server('http://192.168.1.148:5984/') | #couch = couchdb.Server('http://192.168.1.148:5984/') |
couch = couchdb.Server('http://192.168.1.148:5984/') | couch = couchdb.Server('http://127.0.0.1:5984/') |
# select database | # select database |
agencydb = couch['disclosr-agencies'] | agencydb = couch['disclosr-agencies'] |
docsdb = couch['disclosr-documents'] | docsdb = couch['disclosr-documents'] |
if __name__ == "__main__": | if __name__ == "__main__": |
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? | for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? |
agency = agencydb.get(row.id) | agency = agencydb.get(row.id) |
print agency['name'] | print agency['name'] |
for key in agency.keys(): | for key in agency.keys(): |
if key == "FOIDocumentsURL" and "status" not in agency.keys: | if key == "FOIDocumentsURL" and "status" not in agency.keys: |
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) | scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) |
if key == 'website' and False: | if key == 'website' and False: |
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) | scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) |
if key.endswith('URL') and False: | if key.endswith('URL') and False: |
print key | print key |
depth = 1 | depth = 1 |
if 'scrapeDepth' in agency.keys(): | if 'scrapeDepth' in agency.keys(): |
depth = agency['scrapeDepth'] | depth = agency['scrapeDepth'] |
scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) | scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) |
agency['metadata']['lastScraped'] = time.time() | agency['metadata']['lastScraped'] = time.time() |
agencydb.save(agency) | agencydb.save(agency) |
# multiple pages need to be scraped initially, each entry has a subpage | |
http://www.ipaustralia.gov.au/about-us/freedom-of-information/foi-disclosure-log/?page=35&sessionId=3644188 | http://www.ipaustralia.gov.au/about-us/freedom-of-information/foi-disclosure-log/?page=35&sessionId=3644188 |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape | import genericScrapers |
foidocsdb = scrape.couch['disclosr-foidocuments'] | #RSS feed not detailed |
#rss feed has only one entry | #http://www.doughellmann.com/PyMOTW/abc/ |
http://www.daff.gov.au/about/foi/ips/disclosure-log | class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getAgencyID(self): | |
return "8c9421f852c441910bf1d93a57b31d64" | |
def getURL(self): | |
return "http://www.daff.gov.au/about/foi/ips/disclosure-log" | |
def getColumns(self,columns): | |
(id, date, title, description, notes) = columns | |
return (id, date, description, title, notes) | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | |
ScraperImplementation().doScrape() | |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape | import genericScrapers |
foidocsdb = scrape.couch['disclosr-foidocuments'] | #RSS feed not detailed |
import feedparser | #http://www.doughellmann.com/PyMOTW/abc/ |
feed = feedparser.parse( "http://foi.deewr.gov.au/disclosure-log/rss") | class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): |
print feed.entries[0] | def getAgencyID(self): |
#foreach feed.entries | return "be9996f0ac58f71f23d074e82d44ead3" |
def getURL(self): | |
return "http://foi.deewr.gov.au/disclosure-log/rss" | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) | |
ScraperImplementation().doScrape() | |
http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf | http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf |
import sys,os | |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | |
import genericScrapers | |
#RSS feed not detailed | |
#http://www.doughellmann.com/PyMOTW/abc/ | |
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): | |
def getAgencyID(self): | |
return "be9996f0ac58f71f23d074e82d44ead3" | |
def getURL(self): | |
return "http://foi.deewr.gov.au/disclosure-log/rss" | |
def getColumns(self,columns): | |
(id, date, title, description, notes) = columns | |
return (id, date, description, title, notes) | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) | |
ScraperImplementation().doScrape() | |
www.finance.gov.au/foi/disclosure-log/foi-rss.xml | www.finance.gov.au/foi/disclosure-log/foi-rss.xml |
import sys,os | |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | |
import genericScrapers | |
#RSS feed not detailed | |
#http://www.doughellmann.com/PyMOTW/abc/ | |
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): | |
def getAgencyID(self): | |
return "be9996f0ac58f71f23d074e82d44ead3" | |
def getURL(self): | |
return "http://foi.deewr.gov.au/disclosure-log/rss" | |
def getColumns(self,columns): | |
(id, date, title, description, notes) = columns | |
return (id, date, description, title, notes) | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) | |
ScraperImplementation().doScrape() | |
http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful) | http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful) |
<?php | <?php |
function include_header_documents($title) { | function include_header_documents($title) { |
?> | ?> |
<!doctype html> | <!doctype html> |
<!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> | <!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> |
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> | <!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> |
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> | <!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> |
<!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> | <!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> |
<!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> | <!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> |
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> | <!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> |
<head> | <head> |
<meta charset="utf-8"> | <meta charset="utf-8"> |
<!-- Use the .htaccess and remove these lines to avoid edge case issues. | <!-- Use the .htaccess and remove these lines to avoid edge case issues. |
More info: h5bp.com/i/378 --> | More info: h5bp.com/i/378 --> |
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> | <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> |
<title>Australian Disclosure Logs<?php if ($title != "") echo " - $title";?></title> | <title>Australian Disclosure Logs<?php if ($title != "") echo " - $title";?></title> |
<meta name="description" content=""> | <meta name="description" content=""> |
<!-- Mobile viewport optimized: h5bp.com/viewport --> | <!-- Mobile viewport optimized: h5bp.com/viewport --> |
<meta name="viewport" content="width=device-width"> | <meta name="viewport" content="width=device-width"> |
<!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> | <!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> |
<meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" /> | <meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" /> |
<!-- Le styles --> | <!-- Le styles --> |
<link href="css/bootstrap.min.css" rel="stylesheet"> | <link href="css/bootstrap.min.css" rel="stylesheet"> |
<style type="text/css"> | <style type="text/css"> |
body { | body { |
padding-top: 60px; | padding-top: 60px; |
padding-bottom: 40px; | padding-bottom: 40px; |
} | } |
.sidebar-nav { | .sidebar-nav { |
padding: 9px 0; | padding: 9px 0; |
} | } |
</style> | </style> |
<link href="css/bootstrap-responsive.min.css" rel="stylesheet"> | <link href="css/bootstrap-responsive.min.css" rel="stylesheet"> |
<!-- HTML5 shim, for IE6-8 support of HTML5 elements --> | <!-- HTML5 shim, for IE6-8 support of HTML5 elements --> |
<!--[if lt IE 9]> | <!--[if lt IE 9]> |
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> | <script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> |
<![endif]--> | <![endif]--> |
<!-- More ideas for your <head> here: h5bp.com/d/head-Tips --> | <!-- More ideas for your <head> here: h5bp.com/d/head-Tips --> |
<!-- All JavaScript at the bottom, except this Modernizr build. | <!-- All JavaScript at the bottom, except this Modernizr build. |
Modernizr enables HTML5 elements & feature detects for optimal performance. | Modernizr enables HTML5 elements & feature detects for optimal performance. |
Create your own custom Modernizr build: www.modernizr.com/download/ --> | Create your own custom Modernizr build: www.modernizr.com/download/ --> |
<script src="js/libs/modernizr-2.5.3.min.js"></script> | <script src="js/libs/modernizr-2.5.3.min.js"></script> |
</head> | </head> |
<body> | <body> |
<div class="navbar navbar-inverse navbar-fixed-top"> | <div class="navbar navbar-inverse navbar-fixed-top"> |
<div class="navbar-inner"> | <div class="navbar-inner"> |
<div class="container-fluid"> | <div class="container-fluid"> |
<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> | <a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> |
<span class="icon-bar"></span> | <span class="icon-bar"></span> |
<span class="icon-bar"></span> | <span class="icon-bar"></span> |
<span class="icon-bar"></span> | <span class="icon-bar"></span> |
</a> | </a> |
<a class="brand" href="#">Australian Disclosure Logs</a> | <a class="brand" href="#">Australian Disclosure Logs</a> |
<div class="nav-collapse collapse"> | <div class="nav-collapse collapse"> |
<p class="navbar-text pull-right"> | <p class="navbar-text pull-right"> |
Check out our subsites on: | Check out our subsites on: |
<a href="http://orgs.disclosurelo.gs">Government Agencies</a> | <a href="http://orgs.disclosurelo.gs">Government Agencies</a> |
• <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a> | • <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a> |
• <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a> | • <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a> |
</p> | </p> |
<ul class="nav"> | <ul class="nav"> |
<li class="active"><a href="#">Home</a></li> | <li class="active"><a href="#">Home</a></li> |
<li><a href="disclogsList.php">List of Disclosure Logs</a></li> | <li><a href="disclogsList.php">List of Disclosure Logs</a></li> |
<li><a href="about.php">About</a></li> | <li><a href="about.php">About</a></li> |
</ul> | </ul> |
</div><!--/.nav-collapse --> | </div><!--/.nav-collapse --> |
</div> | </div> |
</div> | </div> |
</div> | </div> |
<div class="container"> | <div class="container"> |
<?php | <?php |
} | } |
function include_footer_documents() { | function include_footer_documents() { |
?> | ?> |
</div> <!-- /container --> | </div> <!-- /container --> |
<hr> | <hr> |
<footer> | <footer> |
<p>© Company 2012</p> | <p>© Company 2012</p> |
</footer> | </footer> |
<script type="text/javascript"> | <script type="text/javascript"> |
var _gaq = _gaq || []; | var _gaq = _gaq || []; |
_gaq.push(['_setAccount', 'UA-12341040-4']); | _gaq.push(['_setAccount', 'UA-12341040-4']); |
_gaq.push(['_setDomainName', 'disclosurelo.gs']); | _gaq.push(['_setDomainName', 'disclosurelo.gs']); |
_gaq.push(['_setAllowLinker', true]); | _gaq.push(['_setAllowLinker', true]); |
_gaq.push(['_trackPageview']); | _gaq.push(['_trackPageview']); |
(function() { | (function() { |
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; | var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; |
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; | ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; |
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); | var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); |
})(); | })(); |
</script> | </script> |
<!-- Le javascript | <!-- Le javascript |
================================================== --> | ================================================== --> |
<!-- Placed at the end of the document so the pages load faster | <!-- Placed at the end of the document so the pages load faster |
<script src="js/jquery.js"></script> | <script src="js/jquery.js"></script> |
<script src="js/bootstrap-transition.js"></script> | <script src="js/bootstrap-transition.js"></script> |
<script src="js/bootstrap-alert.js"></script> | <script src="js/bootstrap-alert.js"></script> |
<script src="js/bootstrap-modal.js"></script> | <script src="js/bootstrap-modal.js"></script> |
<script src="js/bootstrap-dropdown.js"></script> | <script src="js/bootstrap-dropdown.js"></script> |
<script src="js/bootstrap-scrollspy.js"></script> | <script src="js/bootstrap-scrollspy.js"></script> |
<script src="js/bootstrap-tab.js"></script> | <script src="js/bootstrap-tab.js"></script> |
<script src="js/bootstrap-tooltip.js"></script> | <script src="js/bootstrap-tooltip.js"></script> |
<script src="js/bootstrap-popover.js"></script> | <script src="js/bootstrap-popover.js"></script> |
<script src="js/bootstrap-button.js"></script> | <script src="js/bootstrap-button.js"></script> |
<script src="js/bootstrap-collapse.js"></script> | <script src="js/bootstrap-collapse.js"></script> |
<script src="js/bootstrap-carousel.js"></script> | <script src="js/bootstrap-carousel.js"></script> |
<script src="js/bootstrap-typeahead.js"></script>--> | <script src="js/bootstrap-typeahead.js"></script>--> |
</body> | </body> |
</html> | </html> |
<?php | <?php |
} | } |
function displayLogEntry($row, $idtoname) { | function displayLogEntry($row, $idtoname) { |
echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2> | echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2> <p>".$row->value->description; |
<p>".$row->value->description." <br>Note: ".$row->value->notes."</p>"; | if (isset($row->value->notes)) { |
echo " <br>Note: ".$row->value->notes; | |
} | |
echo "</p>"; | |
if (isset($row->value->links)){ | |
echo "<h3>Links/Documents</h3><ul>"; | echo "<h3>Links/Documents</h3><ul>"; |
foreach ($row->value->links as $link) { | foreach ($row->value->links as $link) { |
echo "<li><a href='$link'>".$link."</a></li>"; | echo "<li><a href='$link'>".$link."</a></li>"; |
} | } |
echo "</ul>"; | echo "</ul>"; |
} | |
echo "<small><A href='".$row->value->url."'>View original source...</a> ID: ".$row->value->docID."</small>"; | echo "<small><A href='".$row->value->url."'>View original source...</a> ID: ".$row->value->docID."</small>"; |
echo"</div>"; | echo"</div>"; |
} | } |
<?php | <?php |
include $basePath . "schemas/schemas.inc.php"; | include $basePath . "schemas/schemas.inc.php"; |
require ($basePath . 'couchdb/settee/src/settee.php'); | require ($basePath . 'couchdb/settee/src/settee.php'); |
function createFOIDocumentsDesignDoc() { | |
/* "map": "function(doc) {\n emit(doc.web_server, 1);\n}", | |
"reduce": "function (key, values, rereduce) {\n return sum(values);\n}" | |
}, | |
"byAgency": { | |
"map": "function(doc) {\n emit(doc.agencyID, 1);\n}", | |
"reduce": "function (key, values, rereduce) {\n return sum(values);\n}" | |
}, | |
"byURL": { | |
"map": "function(doc) {\n emit(doc.url, doc);\n}" | |
*/ | |
} | |
function createDocumentsDesignDoc() { | function createDocumentsDesignDoc() { |
/* "views": { | /* "views": { |
"web_server": { | "web_server": { |
"map": "function(doc) {\n emit(doc.web_server, 1);\n}", | "map": "function(doc) {\n emit(doc.web_server, 1);\n}", |
"reduce": "function (key, values, rereduce) {\n return sum(values);\n}" | "reduce": "function (key, values, rereduce) {\n return sum(values);\n}" |
}, | }, |
"byAgency": { | "byAgency": { |
"map": "function(doc) {\n emit(doc.agencyID, 1);\n}", | "map": "function(doc) {\n emit(doc.agencyID, 1);\n}", |
"reduce": "function (key, values, rereduce) {\n return sum(values);\n}" | "reduce": "function (key, values, rereduce) {\n return sum(values);\n}" |
}, | }, |
"byURL": { | "byURL": { |
"map": "function(doc) {\n emit(doc.url, doc);\n}" | "map": "function(doc) {\n emit(doc.url, doc);\n}" |
}, | }, |
"agency": { | "agency": { |
"map": "function(doc) {\n emit(doc.agencyID, doc);\n}" | "map": "function(doc) {\n emit(doc.agencyID, doc);\n}" |
}, | }, |
"byWebServer": { | "byWebServer": { |
"map": "function(doc) {\n emit(doc.web_server, doc);\n}" | "map": "function(doc) {\n emit(doc.web_server, doc);\n}" |
}, | }, |
"getValidationRequired": { | "getValidationRequired": { |
"map": "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}" | "map": "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}" |
} | } |
} */ | } */ |
} | } |
function createAgencyDesignDoc() { | function createAgencyDesignDoc() { |
global $db; | global $db; |
$obj = new stdClass(); | $obj = new stdClass(); |
$obj->_id = "_design/" . urlencode("app"); | $obj->_id = "_design/" . urlencode("app"); |
$obj->language = "javascript"; | $obj->language = "javascript"; |
$obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; | $obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; |
$obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; | $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; |
$obj->views->byCanonicalName->map = "function(doc) { | $obj->views->byCanonicalName->map = "function(doc) { |
if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { | if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { |
emit(doc.name, doc); | emit(doc.name, doc); |
} | } |
};"; | };"; |
$obj->views->byDeptStateName->map = "function(doc) { | $obj->views->byDeptStateName->map = "function(doc) { |
if (doc.orgType == 'FMA-DepartmentOfState') { | if (doc.orgType == 'FMA-DepartmentOfState') { |
emit(doc.name, doc._id); | emit(doc.name, doc._id); |
} | } |
};"; | };"; |
$obj->views->parentOrgs->map = "function(doc) { | $obj->views->parentOrgs->map = "function(doc) { |
if (doc.parentOrg) { | if (doc.parentOrg) { |
emit(doc._id, doc.parentOrg); | emit(doc._id, doc.parentOrg); |
} | } |
};"; | };"; |
$obj->views->byName->map = 'function(doc) { | $obj->views->byName->map = 'function(doc) { |
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { | if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { |
emit(doc.name, doc._id); | emit(doc.name, doc._id); |
if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { | if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { |
emit(doc.shortName, doc._id); | emit(doc.shortName, doc._id); |
} | } |
for (name in doc.otherNames) { | for (name in doc.otherNames) { |
if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { | if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { |
emit(doc.otherNames[name], doc._id); | emit(doc.otherNames[name], doc._id); |
} | } |
} | } |
for (name in doc.foiBodies) { | for (name in doc.foiBodies) { |
if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { | if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { |
emit(doc.foiBodies[name], doc._id); | emit(doc.foiBodies[name], doc._id); |
} | } |
} | } |
} | } |
};'; | };'; |
$obj->views->foiEmails->map = "function(doc) { | $obj->views->foiEmails->map = "function(doc) { |
emit(doc._id, doc.foiEmail); | emit(doc._id, doc.foiEmail); |
};"; | };"; |
$obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; | $obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; |
$obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; | $obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; |
$obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; | $obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; |
$obj->views->getScrapeRequired->map = "function(doc) { | $obj->views->getScrapeRequired->map = "function(doc) { |
var lastScrape = Date.parse(doc.metadata.lastScraped); | var lastScrape = Date.parse(doc.metadata.lastScraped); |
var today = new Date(); | var today = new Date(); |
if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { | if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { |
emit(doc._id, doc); | emit(doc._id, doc); |
} | } |
};"; | };"; |
$obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; | $obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; |
$obj->views->getConflicts->map = "function(doc) { | $obj->views->getConflicts->map = "function(doc) { |
if (doc._conflicts) { | if (doc._conflicts) { |
emit(null, [doc._rev].concat(doc._conflicts)); | emit(null, [doc._rev].concat(doc._conflicts)); |
} | } |
}"; | }"; |
// http://stackoverflow.com/questions/646628/javascript-startswith | // http://stackoverflow.com/questions/646628/javascript-startswith |
$obj->views->score->map = 'if(!String.prototype.startsWith){ | $obj->views->score->map = 'if(!String.prototype.startsWith){ |
String.prototype.startsWith = function (str) { | String.prototype.startsWith = function (str) { |
return !this.indexOf(str); | return !this.indexOf(str); |
} | } |
} | } |
function(doc) { | function(doc) { |
count = 0; | count = 0; |
if (doc["status"] != "suspended") { | if (doc["status"] != "suspended") { |
for(var propName in doc) { | for(var propName in doc) { |
if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { | if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { |
count++; | count++; |
} | } |
} | } |
portfolio = doc.parentOrg; | portfolio = doc.parentOrg; |
if (doc.orgType == "FMA-DepartmentOfState") { | if (doc.orgType == "FMA-DepartmentOfState") { |
portfolio = doc._id; | portfolio = doc._id; |
} | } |
if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { | if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { |
portfolio = doc.orgType; | portfolio = doc.orgType; |
} | } |
emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); | emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); |
} | } |
}'; | }'; |
$obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ | $obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ |
String.prototype.startsWith = function (str) { | String.prototype.startsWith = function (str) { |
return !this.indexOf(str); | return !this.indexOf(str); |
} | } |
} | } |
if(!String.prototype.endsWith){ | if(!String.prototype.endsWith){ |
String.prototype.endsWith = function(suffix) { | String.prototype.endsWith = function(suffix) { |
return this.indexOf(suffix, this.length - suffix.length) !== -1; | return this.indexOf(suffix, this.length - suffix.length) !== -1; |
}; | }; |
} | } |
function(doc) { | function(doc) { |
if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { | if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { |
for(var propName in doc) { | for(var propName in doc) { |
if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { | if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { |
emit(propName, 1); | emit(propName, 1); |
} | } |
} | } |
emit("total", 1); | emit("total", 1); |
} | } |
}'; | }'; |
$obj->views->scoreHas->reduce = 'function (key, values, rereduce) { | $obj->views->scoreHas->reduce = 'function (key, values, rereduce) { |
return sum(values); | return sum(values); |
}'; | }'; |
$obj->views->fieldNames->map = ' | $obj->views->fieldNames->map = ' |
function(doc) { | function(doc) { |
for(var propName in doc) { | for(var propName in doc) { |
emit(propName, doc._id); | emit(propName, doc._id); |
} | } |
}'; | }'; |
$obj->views->fieldNames->reduce = 'function (key, values, rereduce) { | $obj->views->fieldNames->reduce = 'function (key, values, rereduce) { |
return values.length; | return values.length; |
}'; | }'; |
// allow safe updates (even if slightly slower due to extra: rev-detection check). | // allow safe updates (even if slightly slower due to extra: rev-detection check). |
return $db->save($obj, true); | return $db->save($obj, true); |
} | } |
if (php_uname('n') == "vanille") { | if (php_uname('n') == "vanille") { |
$serverAddr = 'http://192.168.178.21:5984/'; | $serverAddr = 'http://192.168.178.21:5984/'; |
} else | } else |
if (php_uname('n') == "KYUUBEY") { | if (php_uname('n') == "KYUUBEY") { |
$serverAddr = 'http://192.168.1.148:5984/'; | $serverAddr = 'http://192.168.1.148:5984/'; |
} else { | } else { |
$serverAddr = 'http://127.0.0.1:5984/'; | $serverAddr = 'http://127.0.0.1:5984/'; |
} | } |
$server = new SetteeServer($serverAddr); | $server = new SetteeServer($serverAddr); |
function setteErrorHandler($e) { | function setteErrorHandler($e) { |
if (class_exists('Amon')) { | if (class_exists('Amon')) { |
Amon::log($e->getMessage() . " " . print_r($_SERVER,true), array('error')); | Amon::log($e->getMessage() . " " . print_r($_SERVER,true), array('error')); |
} | } |
echo $e->getMessage() . "<br>" . PHP_EOL; | echo $e->getMessage() . "<br>" . PHP_EOL; |
} | } |