From: Alex Sadleir About
+Written and managed by Alex Sadleir (maxious [at] lambdacomplex.org)
--- a/documents/agency.php
+++ b/documents/agency.php
@@ -31,6 +31,12 @@
} else {
$rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows;
if ($rows) {
+function cmp($a, $b)
+{
+ global $idtoname;
+ return strcmp($idtoname[$a->key], $idtoname[$b->key]);
+}
+usort($rows, "cmp");
foreach ($rows as $row) {
echo '' . $idtoname[$row->key] . " (" . $row->value . " records)
\n";
}
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -199,6 +199,17 @@
def getRows(self, table):
return table.find_all('tr')
+ def getDocHash(self, id,date, url):
+ if id.string is None:
+ print "no id, using date as hash"
+ return scrape.mkhash(
+ self.remove_control_chars(
+ url + (''.join(date.stripped_strings))))
+ else:
+ return scrape.mkhash(
+ self.remove_control_chars(
+ url + (''.join(id.stripped_strings))))
+
def getDate(self, content, entry, doc):
strdate = ''.join(content.stripped_strings).strip()
(a, b, c) = strdate.partition("(")
@@ -240,15 +251,7 @@
description, notes) = self.getColumns(columns)
print self.remove_control_chars(
''.join(id.stripped_strings))
- if id.string is None:
- print "no id, using date as hash"
- dochash = scrape.mkhash(
- self.remove_control_chars(
- url + (''.join(date.stripped_strings))))
- else:
- dochash = scrape.mkhash(
- self.remove_control_chars(
- url + (''.join(id.stripped_strings))))
+ dochash = self.getDocHash(id,date,url)
doc = foidocsdb.get(dochash)
if doc is None:
--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo $DIR
cd $DIR
--- a/documents/scrapers/5716ce0aacfe98f7d638b7a66b7f1040.py
+++ b/documents/scrapers/5716ce0aacfe98f7d638b7a66b7f1040.py
@@ -6,6 +6,11 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getDocHash(self, id,date, url):
+ ''' url changes on ever request so ignore for hash '''
+ return scrape.mkhash(
+ self.remove_control_chars(
+ ''.join(id.stripped_strings)))
def getColumnCount(self):
return 4
def getColumns(self,columns):
--- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
+++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
@@ -1,16 +1,54 @@
import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers
-#RSS feed not detailed
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import scrape
+from bs4 import BeautifulSoup
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
-#http://www.doughellmann.com/PyMOTW/abc/
-class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
- def getColumns(self,columns):
- (id, date, title, description, notes) = columns
- return (id, date, title, description, notes)
+ def __init__(self):
+ super(ScraperImplementation, self).__init__()
+ def getTable(self, soup):
+ return soup.find(id='content')
+
+ def getDescription(self,content, entry,doc):
+ link = None
+ links = []
+ description = ""
+ for atag in entry.find_all('a'):
+ if atag.has_attr('href'):
+ link = scrape.fullurl(self.getURL(), atag['href'])
+ (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+ if htcontent != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ soup = BeautifulSoup(htcontent)
+ row = soup.find(id="foidetails")
+ if row == None:
+ row = soup.find(id="content").table
+ if row == None:
+ row = soup.find(id="content")
+ description = ''.join(row.stripped_strings)
+ for atag in row.find_all("a"):
+ if atag.has_attr('href'):
+ links.append(scrape.fullurl(link, atag['href']))
+
+ if links != []:
+ doc.update({'links': links})
+ if description != "":
+ doc.update({ 'description': description})
+
+ def getColumnCount(self):
+ return 3
+
+ def getColumns(self, columns):
+ (id, title, date) = columns
+ return (id, date, title, title, None)
+
if __name__ == '__main__':
- print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
- print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape()
--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -77,7 +77,7 @@