<?php | <?php |
include('template.inc.php'); | include('template.inc.php'); |
include_header_documents(""); | include_header_documents(""); |
include_once('../include/common.inc.php'); | include_once('../include/common.inc.php'); |
echo "<table> | echo "<table> |
<tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>"; | <tr><th>Agency Name</th><th>Disclosure Log URL recorded?</th><th>Do we monitor this URL?</th></tr>"; |
$agenciesdb = $server->get_db('disclosr-agencies'); | $agenciesdb = $server->get_db('disclosr-agencies'); |
$docsdb = $server->get_db('disclosr-documents'); | $docsdb = $server->get_db('disclosr-documents'); |
$agencies = 0; | |
$disclogs = 0; | |
$red = 0; | |
$green = 0; | |
$orange = 0; | |
try { | try { |
$rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows; | $rows = $agenciesdb->get_view("app", "byCanonicalName", null, true)->rows; |
if ($rows) { | if ($rows) { |
foreach ($rows as $row) { | foreach ($rows as $row) { |
echo "<tr><td><b>" . $row->value->name . "</b>"; | echo "<tr><td><b>" . $row->value->name . "</b>"; |
if ($ENV == "DEV") | if ($ENV == "DEV") |
echo "<br>(" . $row->id . ")"; | echo "<br>(" . $row->id . ")"; |
echo "</td>\n"; | echo "</td>\n"; |
$agencies++; | |
echo "<td>"; | echo "<td>"; |
if (isset($row->value->FOIDocumentsURL)) { | if (isset($row->value->FOIDocumentsURL)) { |
$disclogs++; | |
echo '<a href="' . $row->value->FOIDocumentsURL . '">' | echo '<a href="' . $row->value->FOIDocumentsURL . '">' |
. $row->value->FOIDocumentsURL . '</a>'; | . $row->value->FOIDocumentsURL . '</a>'; |
if ($ENV == "DEV") | if ($ENV == "DEV") |
echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">' | echo '<br><small>(<a href="viewDocument.php?hash=' . md5($row->value->FOIDocumentsURL) . '">' |
. 'view local copy</a>)</small>'; | . 'view local copy</a>)</small>'; |
} else { | } else { |
echo "<font color='red'>✘</font>"; | echo "<font color='red'>✘</font>"; |
} | } |
echo "</td>\n<td>"; | echo "</td>\n<td>"; |
if (isset($row->value->FOIDocumentsURL)) { | if (isset($row->value->FOIDocumentsURL)) { |
if (file_exists("./scrapers/" . $row->id . '.py')) { | if (file_exists("./scrapers/" . $row->id . '.py')) { |
echo "<font color='green'>✔</font>"; | echo "<font color='green'>✔</font>"; |
$green++; | |
} else if (file_exists("./scrapers/" . $row->id . '.txt')) { | } else if (file_exists("./scrapers/" . $row->id . '.txt')) { |
echo "<font color='orange'><b>▬</b></font>"; | echo "<font color='orange'><b>▬</b></font>"; |
$orange++; | |
} else { | } else { |
echo "<font color='red'>✘</font>"; | echo "<font color='red'>✘</font>"; |
$red++; | |
} | } |
} | } |
echo "</td></tr>\n"; | echo "</td></tr>\n"; |
} | } |
} | } |
} catch (SetteeRestClientException $e) { | } catch (SetteeRestClientException $e) { |
setteErrorHandler($e); | setteErrorHandler($e); |
} | } |
echo "</table>"; | echo "</table>"; |
echo $agencies." agencies ".(($disclogs/$agencies)*100)."% with disclosure logs, ".(($green/$disclogs)*100)."% with scrapers ".(($red/$disclogs)*100)."% without scrapers ".(($orange/$disclogs)*100)."% WIP scrapers "; | |
include_footer_documents(); | include_footer_documents(); |
?> | ?> |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
from time import mktime | from time import mktime |
import feedparser | import feedparser |
import abc | import abc |
import unicodedata, re | import unicodedata, re |
import dateutil | import dateutil |
from dateutil.parser import * | from dateutil.parser import * |
from datetime import * | from datetime import * |
class GenericDisclogScraper(object): | class GenericDisclogScraper(object): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
agencyID = None | agencyID = None |
disclogURL = None | disclogURL = None |
def remove_control_chars(self, input): | def remove_control_chars(self, input): |
return "".join([i for i in input if ord(i) in range(32, 127)]) | return "".join([i for i in input if ord(i) in range(32, 127)]) |
def getAgencyID(self): | def getAgencyID(self): |
""" disclosr agency id """ | """ disclosr agency id """ |
if self.agencyID == None: | if self.agencyID == None: |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") | self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") |
return self.agencyID | return self.agencyID |
def getURL(self): | def getURL(self): |
""" disclog URL""" | """ disclog URL""" |
if self.disclogURL == None: | if self.disclogURL == None: |
agency = scrape.agencydb.get(self.getAgencyID()) | agency = scrape.agencydb.get(self.getAgencyID()) |
self.disclogURL = agency['FOIDocumentsURL'] | self.disclogURL = agency['FOIDocumentsURL'] |
return self.disclogURL | return self.disclogURL |
@abc.abstractmethod | @abc.abstractmethod |
def doScrape(self): | def doScrape(self): |
""" do the scraping """ | """ do the scraping """ |
return | return |
@abc.abstractmethod | @abc.abstractmethod |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description""" | """ get description""" |
return | return |
class GenericRSSDisclogScraper(GenericDisclogScraper): | class GenericRSSDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) | (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
feed = feedparser.parse(content) | feed = feedparser.parse(content) |
for entry in feed.entries: | for entry in feed.entries: |
#print entry | #print entry |
print entry.id | print entry.id |
hash = scrape.mkhash(entry.id) | hash = scrape.mkhash(entry.id) |
#print hash | #print hash |
doc = foidocsdb.get(hash) | doc = foidocsdb.get(hash) |
#print doc | #print doc |
if doc == None: | if doc == None: |
print "saving "+ hash | print "saving "+ hash |
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") | edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, | doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, |
"date": edate,"title": entry.title} | "date": edate,"title": entry.title} |
self.getDescription(entry,entry, doc) | self.getDescription(entry,entry, doc) |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
doc.update({'description': content.summary}) | doc.update({'description': content.summary}) |
return | return |
class GenericOAICDisclogScraper(GenericDisclogScraper): | class GenericOAICDisclogScraper(GenericDisclogScraper): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
@abc.abstractmethod | @abc.abstractmethod |
def getColumns(self,columns): | def getColumns(self,columns): |
""" rearranges columns if required """ | """ rearranges columns if required """ |
return | return |
def getColumnCount(self): | def getColumnCount(self): |
return 5 | return 5 |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
descriptiontxt = "" | descriptiontxt = "" |
for string in content.stripped_strings: | for string in content.stripped_strings: |
descriptiontxt = descriptiontxt + " \n" + string | descriptiontxt = descriptiontxt + " \n" + string |
doc.update({'description': descriptiontxt}) | doc.update({'description': descriptiontxt}) |
return | return |
def getTitle(self, content, entry, doc): | def getTitle(self, content, entry, doc): |
doc.update({'title': content.string}) | doc.update({'title': (''.join(content.stripped_strings))}) |
return | return |
def getTable(self, soup): | def getTable(self, soup): |
return soup.table | return soup.table |
def getDate(self, content, entry, doc): | def getDate(self, content, entry, doc): |
edate = parse(''.join(content.stripped_strings).strip(), dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") | edate = parse(''.join(content.stripped_strings).strip(), dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") |
print edate | print edate |
doc.update({'date': edate}) | doc.update({'date': edate}) |
return | return |
def getLinks(self, content, entry, doc): | def getLinks(self, content, entry, doc): |
links = [] | links = [] |
for atag in entry.find_all("a"): | for atag in entry.find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(content,atag['href'])) | links.append(scrape.fullurl(content,atag['href'])) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
return | return |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) | (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) |
if content != None: | if content != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
table = self.getTable(soup) | table = self.getTable(soup) |
for row in table.find_all('tr'): | for row in table.find_all('tr'): |
columns = row.find_all('td') | columns = row.find_all('td') |
if len(columns) == self.getColumnCount(): | if len(columns) == self.getColumnCount(): |
(id, date, description, title, notes) = self.getColumns(columns) | (id, date, description, title, notes) = self.getColumns(columns) |
print ''.join(id.stripped_strings) | print ''.join(id.stripped_strings) |
if id.string == None: | if id.string == None: |
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) | hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) |
else: | else: |
hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) | hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) |
doc = foidocsdb.get(hash) | doc = foidocsdb.get(hash) |
if doc == None: | if doc == None: |
print "saving " +hash | print "saving " +hash |
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string} | doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} |
self.getLinks(self.getURL(),row,doc) | self.getLinks(self.getURL(),row,doc) |
self.getTitle(title,row, doc) | self.getTitle(title,row, doc) |
self.getDate(date,row, doc) | self.getDate(date,row, doc) |
self.getDescription(description,row, doc) | self.getDescription(description,row, doc) |
if notes != None: | if notes != None: |
doc.update({ 'notes': notes.string}) | doc.update({ 'notes': (''.join(notes.stripped_strings))}) |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved "+hash | print "already saved "+hash |
elif len(row.find_all('th')) == self.getColumnCount(): | elif len(row.find_all('th')) == self.getColumnCount(): |
print "header row" | print "header row" |
else: | else: |
print "ERROR number of columns incorrect" | print "ERROR number of columns incorrect" |
print row | print row |
#http://packages.python.org/CouchDB/client.html | #http://packages.python.org/CouchDB/client.html |
import couchdb | import couchdb |
import urllib2 | import urllib2 |
from BeautifulSoup import BeautifulSoup | from BeautifulSoup import BeautifulSoup |
import re | import re |
import hashlib | import hashlib |
from urlparse import urljoin | from urlparse import urljoin |
import time | import time |
import os | import os |
import mimetypes | import mimetypes |
import re | import re |
import urllib | import urllib |
import urlparse | import urlparse |
def mkhash(input): | def mkhash(input): |
return hashlib.md5(input).hexdigest().encode("utf-8") | return hashlib.md5(input).hexdigest().encode("utf-8") |
def canonurl(url): | def canonurl(url): |
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' | r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' |
if the URL looks invalid. | if the URL looks invalid. |
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws | >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws |
'http://xn--hgi.ws/' | 'http://xn--hgi.ws/' |
""" | """ |
# strip spaces at the ends and ensure it's prefixed with 'scheme://' | # strip spaces at the ends and ensure it's prefixed with 'scheme://' |
url = url.strip() | url = url.strip() |
if not url: | if not url: |
return '' | return '' |
if not urlparse.urlsplit(url).scheme: | if not urlparse.urlsplit(url).scheme: |
url = 'http://' + url | url = 'http://' + url |
# turn it into Unicode | # turn it into Unicode |
#try: | #try: |
# url = unicode(url, 'utf-8') | # url = unicode(url, 'utf-8') |
#except UnicodeDecodeError: | #except UnicodeDecodeError: |
# return '' # bad UTF-8 chars in URL | # return '' # bad UTF-8 chars in URL |
# parse the URL into its components | # parse the URL into its components |
parsed = urlparse.urlsplit(url) | parsed = urlparse.urlsplit(url) |
scheme, netloc, path, query, fragment = parsed | scheme, netloc, path, query, fragment = parsed |
# ensure scheme is a letter followed by letters, digits, and '+-.' chars | # ensure scheme is a letter followed by letters, digits, and '+-.' chars |
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): | if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): |
return '' | return '' |
scheme = str(scheme) | scheme = str(scheme) |
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] | # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] |
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) | match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) |
if not match: | if not match: |
return '' | return '' |
domain, port = match.groups() | domain, port = match.groups() |
netloc = domain + (port if port else '') | netloc = domain + (port if port else '') |
netloc = netloc.encode('idna') | netloc = netloc.encode('idna') |
# ensure path is valid and convert Unicode chars to %-encoded | # ensure path is valid and convert Unicode chars to %-encoded |
if not path: | if not path: |
path = '/' # eg: 'http://google.com' -> 'http://google.com/' | path = '/' # eg: 'http://google.com' -> 'http://google.com/' |
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') | path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') |
# ensure query is valid | # ensure query is valid |
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') | query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') |
# ensure fragment is valid | # ensure fragment is valid |
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) | fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) |
# piece it all back together, truncating it to a maximum of 4KB | # piece it all back together, truncating it to a maximum of 4KB |
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) | url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) |
return url[:4096] | return url[:4096] |
def fullurl(url,href): | def fullurl(url,href): |
href = href.replace(" ","%20") | href = href.replace(" ","%20") |
href = re.sub('#.*$','',href) | href = re.sub('#.*$','',href) |
return urljoin(url,href) | return urljoin(url,href) |
#http://diveintopython.org/http_web_services/etags.html | #http://diveintopython.org/http_web_services/etags.html |
class NotModifiedHandler(urllib2.BaseHandler): | class NotModifiedHandler(urllib2.BaseHandler): |
def http_error_304(self, req, fp, code, message, headers): | def http_error_304(self, req, fp, code, message, headers): |
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) | addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
addinfourl.code = code | addinfourl.code = code |
return addinfourl | return addinfourl |
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): | def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): |
url = canonurl(url) | url = canonurl(url) |
hash = mkhash(url) | hash = mkhash(url) |
req = urllib2.Request(url) | req = urllib2.Request(url) |
print "Fetching %s (%s)" % (url,hash) | print "Fetching %s (%s)" % (url,hash) |
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": | if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": |
print "Not a valid HTTP url" | print "Not a valid HTTP url" |
return (None,None,None) | return (None,None,None) |
doc = docsdb.get(hash) | doc = docsdb.get(hash) |
if doc == None: | if doc == None: |
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} | doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} |
else: | else: |
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): | if (('page_scraped' in doc) and (time.time() - doc['page_scraped' |