From: Maxious About
--- /dev/null
+++ b/documents/agency.php
@@ -1,1 +1,41 @@
+get_db('disclosr-agencies');
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+ $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+
+include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency'));
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
+?>
+
+get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
+ foreach ($rows as $row) {
+ //print_r($rows);
+ echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey))
+ $startkey = $row->key;
+ $endkey = $row->key;
+ }
+ } else {
+ $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
+ if ($rows) {
+ foreach ($rows as $row) {
+ echo '' . $idtoname[$row->key] . " (" . $row->value . " records)
\n";
+ }
+ }
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+echo "next page ";
+include_footer_documents();
+?>
--- a/documents/charts.php
+++ b/documents/charts.php
@@ -1,6 +1,6 @@
get_db('disclosr-agencies');
@@ -15,29 +15,28 @@
Charts
Lorem ipsum.
-
+
+
+
+get_db('disclosr-agencies');
+
+$idtoname = Array();
+foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
+ $idtoname[$row->id] = trim($row->value->name);
+}
+$foidocsdb = $server->get_db('disclosr-foidocuments');
+try {
+ $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
+ if ($rows) {
+ foreach ($rows as $key => $row) {
+ echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey)) $startkey = $row->key;
+ $endkey = $row->key;
+ }
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+echo "next page ";
+*/
+include_footer_documents();
+?>
+
--- a/documents/disclogsList.php
+++ b/documents/disclogsList.php
@@ -1,7 +1,7 @@
--- /dev/null
+++ b/documents/disclosr-documents.nja
@@ -1,1 +1,7 @@
-
+{
+ "venv": "",
+ "project-type": "Import from sources",
+ "name": "disclosr-documents",
+ "license": "GNU General Public License v3",
+ "description": ""
+}
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -15,10 +15,6 @@
from StringIO import StringIO
-from docx import *
-from lxml import etree
-import zipfile
-
from pdfminer.pdfparser import PDFDocument, PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
from pdfminer.pdfdevice import PDFDevice, TagExtractor
@@ -39,14 +35,14 @@
""" disclosr agency id """
if self.agencyID is None:
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "")
- return self.agencyID
+ return self.agencyID
def getURL(self):
""" disclog URL"""
if self.disclogURL is None:
agency = scrape.agencydb.get(self.getAgencyID())
self.disclogURL = agency['FOIDocumentsURL']
- return self.disclogURL
+ return self.disclogURL
@abc.abstractmethod
def doScrape(self):
@@ -62,14 +58,15 @@
self.getURL(), "foidocuments", self.getAgencyID())
laparams = LAParams()
rsrcmgr = PDFResourceManager(caching=True)
- outfp = StringIO.StringIO()
+ outfp = StringIO()
device = TextConverter(rsrcmgr, outfp, codec='utf-8',
laparams=laparams)
- fp = StringIO.StringIO()
- fp.write(content)
- description = output.getvalue()
+ fp = StringIO()
+ fp.write(content.read())
+
process_pdf(rsrcmgr, device, fp, set(), caching=True,
check_extractable=True)
+ description = outfp.getvalue()
fp.close()
device.close()
outfp.close()
@@ -77,11 +74,10 @@
doc = foidocsdb.get(dochash)
if doc is None:
print "saving " + dochash
- edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
+ edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash,
- "date": edate, "title": "Disclosure Log Updated"}
- self.getDescription(entry, entry, doc)
+ "date": edate, "title": "Disclosure Log Updated", "description": description}
foidocsdb.save(doc)
else:
print "already saved"
@@ -103,17 +99,16 @@
for paratext in paratextlist:
newparatextlist.append(paratext.encode("utf-8"))
## Print our documnts test with two newlines under each paragraph
- description = '\n\n'.join(newparatextlist)
+ description = '\n\n'.join(newparatextlist).strip(' \t\n\r')
dochash = scrape.mkhash(description)
doc = foidocsdb.get(dochash)
if doc is None:
print "saving " + dochash
- edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d")
+ edate = time().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash,
- "date": edate, "title": "Disclosure Log Updated"}
- self.getDescription(entry, entry, doc)
+ "date": edate, "title": "Disclosure Log Updated", "description": description}
foidocsdb.save(doc)
else:
print "already saved"
@@ -201,10 +196,9 @@
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
self.getURL(), "foidocuments", self.getAgencyID())
if content is not None:
- if mime_type is "text/html"\
- or mime_type is "application/xhtml+xml"\
- or mime_type is"application/xml":
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
+ print "parsing"
soup = BeautifulSoup(content)
table = self.getTable(soup)
for row in self.getRows(table):
@@ -222,11 +216,11 @@
dochash = scrape.mkhash(
self.remove_control_chars(
url + (''.join(id.stripped_strings))))
- doc = foidocsdb.get(hash)
+ doc = foidocsdb.get(dochash)
if doc is None:
- print "saving " + hash
- doc = {'_id': hash,
+ print "saving " + dochash
+ doc = {'_id': dochash,
'agencyID': self.getAgencyID(),
'url': self.getURL(),
'docID': (''.join(id.stripped_strings))}
--- a/documents/index.php
+++ b/documents/index.php
@@ -3,10 +3,11 @@
include('template.inc.php');
include_header_documents("");
include_once('../include/common.inc.php');
-$startkey = (isset($_REQUEST['start_key']) ? $_REQUEST['start_key'] : '9999-99-99');
+$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99');
?>
+
get_db('disclosr-agencies');
$idtoname = Array();
@@ -15,17 +16,18 @@
}
$foidocsdb = $server->get_db('disclosr-foidocuments');
try {
- $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows;
+ $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows;
if ($rows) {
foreach ($rows as $key => $row) {
echo displayLogEntry($row, $idtoname);
+ if (!isset($startkey)) $startkey = $row->key;
$endkey = $row->key;
}
}
} catch (SetteeRestClientException $e) {
setteErrorHandler($e);
}
-echo "next page";
+echo "next page ";
include_footer_documents();
?>
--- a/documents/rss.xml.php
+++ b/documents/rss.xml.php
@@ -8,21 +8,28 @@
//Creating an instance of FeedWriter class.
$TestFeed = new RSS2FeedWriter();
//Setting the channel elements
-//Use wrapper functions for common channelelements
-$TestFeed->setTitle('disclosurelo.gs Newest Entries - All');
-$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php');
-$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies');
-$TestFeed->setChannelElement('language', 'en-us');
-$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
-
-//Retriving informations from database
+////Retriving informations from database
$idtoname = Array();
$agenciesdb = $server->get_db('disclosr-agencies');
foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) {
$idtoname[$row->id] = trim($row->value->name);
}
$foidocsdb = $server->get_db('disclosr-foidocuments');
-$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
+if (isset($_REQUEST['id'])) {
+ $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows;
+ $title = $idtoname[$_REQUEST['id']];
+} else {
+ $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows;
+ $title = 'All Agencies';
+}
+//Use wrapper functions for common channelelements
+$TestFeed->setTitle('disclosurelo.gs Newest Entries - '.$title);
+$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'.(isset($_REQUEST['id'])? '?id='.$_REQUEST['id'] : ''));
+$TestFeed->setDescription('disclosurelo.gs Newest Entries - '.$title);
+$TestFeed->setChannelElement('language', 'en-us');
+$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time()));
+
+
//print_r($rows);
foreach ($rows as $row) {
//Create an empty FeedItem
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -8,186 +8,188 @@
import time
import os
import mimetypes
-import re
import urllib
import urlparse
def mkhash(input):
- return hashlib.md5(input).hexdigest().encode("utf-8")
+ return hashlib.md5(input).hexdigest().encode("utf-8")
def canonurl(url):
- r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
- if the URL looks invalid.
- >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
- 'http://xn--hgi.ws/'
- """
- # strip spaces at the ends and ensure it's prefixed with 'scheme://'
- url = url.strip()
- if not url:
- return ''
- if not urlparse.urlsplit(url).scheme:
- url = 'http://' + url
-
- # turn it into Unicode
- #try:
- # url = unicode(url, 'utf-8')
- #except UnicodeDecodeError:
- # return '' # bad UTF-8 chars in URL
-
- # parse the URL into its components
- parsed = urlparse.urlsplit(url)
- scheme, netloc, path, query, fragment = parsed
-
- # ensure scheme is a letter followed by letters, digits, and '+-.' chars
- if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
- return ''
- scheme = str(scheme)
-
- # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
- match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
- if not match:
- return ''
- domain, port = match.groups()
- netloc = domain + (port if port else '')
- netloc = netloc.encode('idna')
-
- # ensure path is valid and convert Unicode chars to %-encoded
- if not path:
- path = '/' # eg: 'http://google.com' -> 'http://google.com/'
- path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
-
- # ensure query is valid
- query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
-
- # ensure fragment is valid
- fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
-
- # piece it all back together, truncating it to a maximum of 4KB
- url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
- return url[:4096]
+ r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or ''
+ if the URL looks invalid.
+ >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws
+ 'http://xn--hgi.ws/'
+ """
+ # strip spaces at the ends and ensure it's prefixed with 'scheme://'
+ url = url.strip()
+ if not url:
+ return ''
+ if not urlparse.urlsplit(url).scheme:
+ url = 'http://' + url
+
+ # turn it into Unicode
+ #try:
+ # url = unicode(url, 'utf-8')
+ #except UnicodeDecodeError:
+ # return '' # bad UTF-8 chars in URL
+
+ # parse the URL into its components
+ parsed = urlparse.urlsplit(url)
+ scheme, netloc, path, query, fragment = parsed
+
+ # ensure scheme is a letter followed by letters, digits, and '+-.' chars
+ if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I):
+ return ''
+ scheme = str(scheme)
+
+ # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port]
+ match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I)
+ if not match:
+ return ''
+ domain, port = match.groups()
+ netloc = domain + (port if port else '')
+ netloc = netloc.encode('idna')
+
+ # ensure path is valid and convert Unicode chars to %-encoded
+ if not path:
+ path = '/' # eg: 'http://google.com' -> 'http://google.com/'
+ path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;')
+
+ # ensure query is valid
+ query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/')
+
+ # ensure fragment is valid
+ fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8')))
+
+ # piece it all back together, truncating it to a maximum of 4KB
+ url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
+ return url[:4096]
def fullurl(url,href):
- href = href.replace(" ","%20")
- href = re.sub('#.*$','',href)
- return urljoin(url,href)
+ href = href.replace(" ","%20")
+ href = re.sub('#.*$','',href)
+ return urljoin(url,href)
#http://diveintopython.org/http_web_services/etags.html
-class NotModifiedHandler(urllib2.BaseHandler):
- def http_error_304(self, req, fp, code, message, headers):
- addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
- addinfourl.code = code
- return addinfourl
+class NotModifiedHandler(urllib2.BaseHandler):
+ def http_error_304(self, req, fp, code, message, headers):
+ addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url())
+ addinfourl.code = code
+ return addinfourl
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True):
- url = canonurl(url)
- hash = mkhash(url)
- req = urllib2.Request(url)
- print "Fetching %s (%s)" % (url,hash)
- if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
- print "Not a valid HTTP url"
- return (None,None,None)
- doc = docsdb.get(hash)
- if doc == None:
- doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
- else:
- if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
- print "Uh oh, trying to scrape URL again too soon!"
- last_attachment_fname = doc["_attachments"].keys()[-1]
- last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
- content = last_attachment
- return (doc['url'],doc['mime_type'],content)
- if scrape_again == False:
- print "Not scraping this URL again as requested"
- return (None,None,None)
-
- time.sleep(3) # wait 3 seconds to give webserver time to recover
-
- req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
- #if there is a previous version stored in couchdb, load caching helper tags
- if doc.has_key('etag'):
- req.add_header("If-None-Match", doc['etag'])
- if doc.has_key('last_modified'):
- req.add_header("If-Modified-Since", doc['last_modified'])
-
- opener = urllib2.build_opener(NotModifiedHandler())
- try:
- url_handle = opener.open(req)
- doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
- headers = url_handle.info() # the addinfourls have the .info() too
- doc['etag'] = headers.getheader("ETag")
- doc['last_modified'] = headers.getheader("Last-Modified")
- doc['date'] = headers.getheader("Date")
- doc['page_scraped'] = time.time()
- doc['web_server'] = headers.getheader("Server")
- doc['via'] = headers.getheader("Via")
- doc['powered_by'] = headers.getheader("X-Powered-By")
- doc['file_size'] = headers.getheader("Content-Length")
- content_type = headers.getheader("Content-Type")
- if content_type != None:
- doc['mime_type'] = content_type.split(";")[0]
- else:
- (type,encoding) = mimetypes.guess_type(url)
- doc['mime_type'] = type
- if hasattr(url_handle, 'code'):
- if url_handle.code == 304:
- print "the web page has not been modified"
- return (None,None,None)
- else:
- content = url_handle.read()
- docsdb.save(doc)
- doc = docsdb.get(hash) # need to get a _rev
- docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
- return (doc['url'], doc['mime_type'], content)
- #store as attachment epoch-filename
-
- except urllib2.URLError as e:
- error = ""
- if hasattr(e, 'reason'):
- error = "error %s in downloading %s" % (str(e.reason), url)
- elif hasattr(e, 'code'):
- error = "error %s in downloading %s" % (e.code, url)
- print error
- doc['error'] = error
- docsdb.save(doc)
- return (None,None,None)
+ url = canonurl(url)
+ hash = mkhash(url)
+ req = urllib2.Request(url)
+ print "Fetching %s (%s)" % (url,hash)
+ if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
+ print "Not a valid HTTP url"
+ return (None,None,None)
+ doc = docsdb.get(hash)
+ if doc == None:
+ doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName}
+ else:
+ if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000):
+ print "Uh oh, trying to scrape URL again too soon!"+hash
+ last_attachment_fname = doc["_attachments"].keys()[-1]
+ last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+ content = last_attachment
+ return (doc['url'],doc['mime_type'],content)
+ if scrape_again == False:
+ print "Not scraping this URL again as requested"
+ return (None,None,None)
+
+ req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
+ #if there is a previous version stored in couchdb, load caching helper tags
+ if doc.has_key('etag'):
+ req.add_header("If-None-Match", doc['etag'])
+ if doc.has_key('last_modified'):
+ req.add_header("If-Modified-Since", doc['last_modified'])
+
+ opener = urllib2.build_opener(NotModifiedHandler())
+ try:
+ url_handle = opener.open(req)
+ doc['url'] = url_handle.geturl() # may have followed a redirect to a new url
+ headers = url_handle.info() # the addinfourls have the .info() too
+ doc['etag'] = headers.getheader("ETag")
+ doc['last_modified'] = headers.getheader("Last-Modified")
+ doc['date'] = headers.getheader("Date")
+ doc['page_scraped'] = time.time()
+ doc['web_server'] = headers.getheader("Server")
+ doc['via'] = headers.getheader("Via")
+ doc['powered_by'] = headers.getheader("X-Powered-By")
+ doc['file_size'] = headers.getheader("Content-Length")
+ content_type = headers.getheader("Content-Type")
+ if content_type != None:
+ doc['mime_type'] = content_type.split(";")[0]
+ else:
+ (type,encoding) = mimetypes.guess_type(url)
+ doc['mime_type'] = type
+ if hasattr(url_handle, 'code'):
+ if url_handle.code == 304:
+ print "the web page has not been modified"+hash
+ last_attachment_fname = doc["_attachments"].keys()[-1]
+ last_attachment = docsdb.get_attachment(doc,last_attachment_fname)
+ content = last_attachment
+ return (doc['url'],doc['mime_type'],content)
+ else:
+ print "new webpage loaded"
+ content = url_handle.read()
+ docsdb.save(doc)
+ doc = docsdb.get(hash) # need to get a _rev
+ docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type'])
+ return (doc['url'], doc['mime_type'], content)
+ #store as attachment epoch-filename
+
+ except urllib2.URLError as e:
+ print "error!"
+ error = ""
+ if hasattr(e, 'reason'):
+ error = "error %s in downloading %s" % (str(e.reason), url)
+ elif hasattr(e, 'code'):
+ error = "error %s in downloading %s" % (e.code, url)
+ print error
+ doc['error'] = error
+ docsdb.save(doc)
+ return (None,None,None)
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID):
- (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
- badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
- if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
- if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
- # http://www.crummy.com/software/BeautifulSoup/documentation.html
- soup = BeautifulSoup(content)
- navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
- for nav in navIDs:
- print "Removing element", nav['id']
- nav.extract()
- navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
- for nav in navClasses:
- print "Removing element", nav['class']
- nav.extract()
- links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
- linkurls = set([])
- for link in links:
- if link.has_key("href"):
- if link['href'].startswith("http"):
- # lets not do external links for now
- # linkurls.add(link['href'])
- None
- if link['href'].startswith("mailto"):
- # not http
- None
- if link['href'].startswith("javascript"):
- # not http
- None
- else:
- # remove anchors and spaces in urls
- linkurls.add(fullurl(url,link['href']))
- for linkurl in linkurls:
- #print linkurl
- scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
+ (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID)
+ badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"]
+ if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report":
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ # http://www.crummy.com/software/BeautifulSoup/documentation.html
+ soup = BeautifulSoup(content)
+ navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header'))
+ for nav in navIDs:
+ print "Removing element", nav['id']
+ nav.extract()
+ navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')})
+ for nav in navClasses:
+ print "Removing element", nav['class']
+ nav.extract()
+ links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-"))
+ linkurls = set([])
+ for link in links:
+ if link.has_key("href"):
+ if link['href'].startswith("http"):
+ # lets not do external links for now
+ # linkurls.add(link['href'])
+ None
+ if link['href'].startswith("mailto"):
+ # not http
+ None
+ if link['href'].startswith("javascript"):
+ # not http
+ None
+ else:
+ # remove anchors and spaces in urls
+ linkurls.add(fullurl(url,link['href']))
+ for linkurl in linkurls:
+ #print linkurl
+ scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
#couch = couchdb.Server('http://192.168.1.148:5984/')
couch = couchdb.Server('http://127.0.0.1:5984/')
@@ -196,20 +198,20 @@
docsdb = couch['disclosr-documents']
if __name__ == "__main__":
- for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
- agency = agencydb.get(row.id)
- print agency['name']
- for key in agency.keys():
- if key == "FOIDocumentsURL" and "status" not in agency.keys:
- scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
- if key == 'website' and False:
- scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
- agency['metadata']['lastScraped'] = time.time()
- if key.endswith('URL') and False:
- print key
- depth = 1
- if 'scrapeDepth' in agency.keys():
- depth = agency['scrapeDepth']
- scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
- agencydb.save(agency)
-
+ for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view?
+ agency = agencydb.get(row.id)
+ print agency['name']
+ for key in agency.keys():
+ if key == "FOIDocumentsURL" and "status" not in agency.keys:
+ scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
+ if key == 'website' and False:
+ scrapeAndStore(docsdb, agency[key],0,key,agency['_id'])
+ agency['metadata']['lastScraped'] = time.time()
+ if key.endswith('URL') and False:
+ print key
+ depth = 1
+ if 'scrapeDepth' in agency.keys():
+ depth = agency['scrapeDepth']
+ scrapeAndStore(docsdb, agency[key],depth,key,agency['_id'])
+ agencydb.save(agency)
+
--- /dev/null
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-pdf
--- /dev/null
+++ b/documents/scrapers/00a294de663db69062ca09aede7c0487.py
@@ -1,1 +1,47 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+ def getDate(self, content, entry, doc):
+ date = ''.join(entry.find('th').stripped_strings).strip()
+ (a, b, c) = date.partition("(")
+ date = self.remove_control_chars(a.replace("Octber", "October"))
+ print date
+ edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ print edate
+ doc.update({'date': edate})
+ return
+ def getColumnCount(self):
+ return 4
+
+ def getTable(self, soup):
+ return soup.find(summary="List of Defence documents released under Freedom of Information requets")
+
+ def getColumns(self, columns):
+ (id, description, access, notes) = columns
+ return (id, None, description, description, notes)
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+ nsi = ScraperImplementation()
+ nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201213.cfm"
+ nsi.doScrape()
+
+ nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201112.cfm"
+ nsi.doScrape()
+
+ nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201011.cfm"
+ nsi.doScrape()
+
+
--- a/documents/scrapers/00a294de663db69062ca09aede7c0487.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage
--- a/documents/scrapers/0ae822d1a748e60d90f0b79b97d5a3e5.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-ACMA style
--- /dev/null
+++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py
@@ -1,1 +1,58 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import scrape
+from bs4 import BeautifulSoup
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+ def getDescription(self,content, entry,doc):
+ link = None
+ links = []
+ description = ""
+ for atag in entry.find_all('a'):
+ if atag.has_key('href'):
+ link = scrape.fullurl(self.getURL(), atag['href'])
+ (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+ if htcontent != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ soup = BeautifulSoup(htcontent)
+ row = soup.find(id="content_div_148050")
+ description = ''.join(row.stripped_strings)
+ for atag in row.find_all("a"):
+ if atag.has_key('href'):
+ links.append(scrape.fullurl(link, atag['href']))
+
+ if links != []:
+ doc.update({'links': links})
+ if description != "":
+ doc.update({ 'description': description})
+ def getColumnCount(self):
+ return 4
+
+ def getColumns(self, columns):
+ (id, date, datepub, title) = columns
+ return (id, date, title, title, None)
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+ nsi = ScraperImplementation()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5"
+ nsi.doScrape()
+
--- a/documents/scrapers/1803322b27286950cab0c543168b5f21.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage log
--- /dev/null
+++ b/documents/scrapers/6afdde1d4ff1ad8d8cfe1a8675ea83bd.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/6afdde1d4ff1ad8d8cfe1a8675ea83bd.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-PDF
--- /dev/null
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -1,1 +1,42 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from datetime import date
+from pyquery import PyQuery as pq
+from lxml import etree
+import urllib
+class ACMADisclogScraper(genericScrapers.GenericDisclogScraper):
+
+ def doScrape(self):
+ foidocsdb = scrape.couch['disclosr-foidocuments']
+ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+ self.getURL(), "foidocuments", self.getAgencyID())
+
+ d = pq(content)
+ d.make_links_absolute()
+ d.table.filter('.ncTAF_DataTABLE')
+ print [i.text() for i in d.items('span')]
+ description = ""
+ dochash = scrape.mkhash(description)
+ doc = foidocsdb.get(dochash)
+ if doc is None:
+ print "saving " + dochash
+ edate = date.today().strftime("%Y-%m-%d")
+ doc = {'_id': dochash, 'agencyID': self.getAgencyID()
+ , 'url': self.getURL(), 'docID': dochash,
+ "date": edate, "title": "Disclosure Log Updated", "description": description}
+ #foidocsdb.save(doc)
+ else:
+ print "already saved"
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ACMADisclogScraper,
+ genericScrapers.GenericDisclogScraper)
+ print 'Instance:', isinstance(ACMADisclogScraper(),
+ genericScrapers.GenericDisclogScraper)
+ ACMADisclogScraper().doScrape()
+
--- /dev/null
+++ b/documents/scrapers/8317df630946937864d31a4728ad8ee8.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/8317df630946937864d31a4728ad8ee8.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-pdf
--- /dev/null
+++ b/documents/scrapers/8796220032faf94501bd366763263685.py
@@ -1,1 +1,37 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+ def getColumnCount(self):
+ return 6
+
+ def getColumns(self, columns):
+ (id, date, title, description, datepub, notes) = columns
+ return (id, date, title, description, notes)
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+ nsi = ScraperImplementation()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/pmo/2011-12.cfm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2011-12.cfm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/dpmc/2012-13.cfm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omsi/2011-12.cfm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.dpmc.gov.au/foi/ips/disclosure_logs/omps/2012-13.cfm"
+ nsi.doScrape()
+
--- a/documents/scrapers/8796220032faf94501bd366763263685.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multiple pages
--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -3,7 +3,7 @@
import genericScrapers
import scrape
from bs4 import BeautifulSoup
-import codecs
+import codecs
#http://www.doughellmann.com/PyMOTW/abc/
class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getDescription(self,content, entry,doc):
@@ -20,7 +20,7 @@
soup = BeautifulSoup(htcontent)
for text in soup.find(id="divFullWidthColumn").stripped_strings:
description = description + text.encode('ascii', 'ignore')
-
+
for atag in soup.find(id="divFullWidthColumn").find_all("a"):
if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href']))
@@ -76,11 +76,10 @@
if __name__ == '__main__':
print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
- #NewScraperImplementation().doScrape()
+ NewScraperImplementation().doScrape()
print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
osi = OldScraperImplementation()
osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI"
osi.doScrape()
-# old site too
--- /dev/null
+++ b/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.py
@@ -1,1 +1,35 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+ def getColumnCount(self):
+ return 2
+
+ def getColumns(self, columns):
+ (date, title) = columns
+ return (title, date, title, title, None)
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+ nsi = ScraperImplementation()
+ nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2012.htm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2011.htm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2010.htm"
+ nsi.doScrape()
+ nsi.disclogURL = "http://www.immi.gov.au/about/foi/foi-disclosures-2009.htm"
+ nsi.doScrape()
+
--- a/documents/scrapers/b0a3281ba66efe173c5a33d5ef90ff76.txt
+++ /dev/null
@@ -1,2 +1,1 @@
-multipage immi
--- /dev/null
+++ b/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.py
@@ -1,1 +1,19 @@
+import sys
+import os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+
+class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper):
+
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation,
+ genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(),
+ genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt
+++ /dev/null
@@ -1,3 +1,1 @@
-# pdf
-http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf
--- a/documents/sitemap.xml.php
+++ b/documents/sitemap.xml.php
@@ -10,10 +10,18 @@
if (strpos($file, ".php") !== false && $file != "index.php" && $file != "sitemap.xml.php")
echo "