From: Maxious Date: Mon, 10 Dec 2012 23:34:34 +0000 Subject: pagination by docid X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=1e9da414dfbf564a4bc1882c6d43b32b90d4e51d --- pagination by docid Former-commit-id: 3aa6116d88acaa6e423bf1d972cda0f2a51d9f5b --- --- a/couchdb/settee/src/classes/SetteeDatabase.class.php +++ b/couchdb/settee/src/classes/SetteeDatabase.class.php @@ -1,310 +1,316 @@ conn_url = $conn_url; - $this->dbname = $dbname; - $this->rest_client = SetteeRestClient::get_instance($this->conn_url); - } - - - /** - * Get UUID from CouchDB - * - * @return - * CouchDB-generated UUID string - * - */ - function gen_uuid() { - $ret = $this->rest_client->http_get('_uuids'); - return $ret['decoded']->uuids[0]; // should never be empty at this point, so no checking - } - - /** - * Create or update a document database - * - * @param $document - * PHP object, a PHP associative array, or a JSON String representing the document to be saved. PHP Objects and arrays are JSON-encoded automatically. - * - *

If $document has a an "_id" property set, it will be used as document's unique id (even for "create" operation). - * If "_id" is missing, CouchDB will be used to generate a UUID. - * - *

If $document has a "_rev" property (revision), document will be updated, rather than creating a new document. - * You have to provide "_rev" if you want to update an existing document, otherwise operation will be assumed to be - * one of creation and you will get a duplicate document exception from CouchDB. Also, you may not provide "_rev" but - * not provide "_id" since that is an invalid input. - * - * @param $allowRevAutoDetection - * Default: false. When true and _rev is missing from the document, save() function will auto-detect latest revision - * for a document and use it. This option is "false" by default because it involves an extra http HEAD request and - * therefore can make save() operation slightly slower if such auto-detection is not required. - * - * @return - * document object with the database id (uuid) and revision attached; - * - * @throws SetteeCreateDatabaseException - */ - function save($document, $allowRevAutoDetection = false) { - if (is_string($document)) { - $document = json_decode($document); - } - - // Allow passing of $document as an array (for syntactic simplicity and also because in JSON world it does not matter) - if(is_array($document)) { - $document = (object) $document; - } - - if (empty($document->_id) && empty($document->_rev)) { - $id = $this->gen_uuid(); - } - elseif (empty($document->_id) && !empty($document->_rev)) { - throw new SetteeWrongInputException("Error: You can not save a document with a revision provided, but missing id"); - } - else { - $id = $document->_id; - - if ($allowRevAutoDetection) { - try { - $rev = $this->get_rev($id); - } catch (SetteeRestClientException $e) { - // auto-detection may fail legitimately, if a document has never been saved before (new doc), so skipping error - } - if (!empty($rev)) { - $document->_rev = $rev; - } - } - } - - $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); - $document_json = json_encode($document, JSON_NUMERIC_CHECK); - - $ret = $this->rest_client->http_put($full_uri, $document_json); - - $document->_id = $ret['decoded']->id; - $document->_rev = $ret['decoded']->rev; - - return $document; - } - - /** - * @param $doc - * @param $name - * @param $content - * Content of the attachment in a string-buffer format. This function will automatically base64-encode content for - * you, so you don't have to do it. - * @param $mime_type - * Optional. Will be auto-detected if not provided - * @return void - */ - public function add_attachment($doc, $name, $content, $mime_type = null) { - if (empty($doc->_attachments) || !is_object($doc->_attachments)) { - $doc->_attachments = new stdClass(); - } - - if (empty($mime_type)) { - $mime_type = $this->rest_client->content_mime_type($content); - } - - $doc->_attachments->$name = new stdClass(); - $doc->_attachments->$name->content_type = $mime_type; - $doc->_attachments->$name->data = base64_encode($content); - } - - /** - * @param $doc - * @param $name - * @param $file - * Full path to a file (e.g. as returned by PHP's realpath function). - * @param $mime_type - * Optional. Will be auto-detected if not provided - * @return void - */ - public function add_attachment_file($doc, $name, $file, $mime_type = null) { - $content = file_get_contents($file); - $this->add_attachment($doc, $name, $content, $mime_type); - } - - /** - * - * Retrieve a document from CouchDB - * - * @throws SetteeWrongInputException - * - * @param $id - * Unique ID (usually: UUID) of the document to be retrieved. - * @return - * database document in PHP object format. - */ - function get($id) { - if (empty($id)) { - throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid."); - } - - $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); -$full_uri = str_replace("%3Frev%3D","?rev=",$full_uri); - $ret = $this->rest_client->http_get($full_uri); - return $ret['decoded']; - } - - /** - * - * Get the latest revision of a document with document id: $id in CouchDB. - * - * @throws SetteeWrongInputException - * - * @param $id - * Unique ID (usually: UUID) of the document to be retrieved. - * @return - * database document in PHP object format. - */ - function get_rev($id) { - if (empty($id)) { - throw new SetteeWrongInputException("Error: Can't query a document without a uuid."); - } - - $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); - $headers = $this->rest_client->http_head($full_uri); - if (empty($headers['Etag'])) { - throw new SetteeRestClientException("Error: could not retrieve revision. Server unexpectedly returned empty Etag"); - } - $etag = str_replace('"', '', $headers['Etag']); - return $etag; - } - - /** - * Delete a document - * - * @param $document - * a PHP object or JSON representation of the document that has _id and _rev fields. - * - * @return void - */ - function delete($document) { - if (!is_object($document)) { - $document = json_decode($document); - } - - $full_uri = $this->dbname . "/" . $this->safe_urlencode($document->_id) . "?rev=" . $document->_rev; - $this->rest_client->http_delete($full_uri); - } - - - /*----------------- View-related functions --------------*/ - - /** - * Create a new view or update an existing one. - * - * @param $design_doc - * @param $view_name - * @param $map_src - * Source code of the map function in Javascript - * @param $reduce_src - * Source code of the reduce function in Javascript (optional) - * @return void - */ - function save_view($design_doc, $view_name, $map_src, $reduce_src = null) { - $obj = new stdClass(); - $obj->_id = "_design/" . urlencode($design_doc); - $view_name = urlencode($view_name); - $obj->views->$view_name->map = $map_src; - if (!empty($reduce_src)) { - $obj->views->$view_name->reduce = $reduce_src; - } - - // allow safe updates (even if slightly slower due to extra: rev-detection check). - return $this->save($obj, true); - } - - /** - * Create a new view or update an existing one. - * - * @param $design_doc - * @param $view_name - * @param $key - * key parameter to a view. Can be a single value or an array (for a range). If passed an array, function assumes - * that first element is startkey, second: endkey. - * @param $descending - * return results in descending order. Please don't forget that if you are using a startkey/endkey, when you change - * order you also need to swap startkey and endkey values! - * - * @return void - */ - function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false) { - $id = "_design/" . urlencode($design_doc); - $view_name = urlencode($view_name); - $id .= "/_view/$view_name"; - - $data = array(); - if (!empty($key)) { - if (is_string($key)) { - $data = "key=" . '"' . $key . '"'; - } - elseif (is_array($key)) { - list($startkey, $endkey) = $key; - $data = "startkey=" . '"' . $startkey . '"&' . "endkey=" . '"' . $endkey . '"'; - } - - if ($descending) { - $data .= "&descending=true"; - } - if ($limit) { - $data .= "&limit=".$limit; - } - } - - - - if (empty($id)) { - throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid."); - } - - $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); -$full_uri = str_replace("%253Fgroup%253D","?group=",$full_uri); -$full_uri = str_replace("%253Flimit%253D","?limit=",$full_uri); - $ret = $this->rest_client->http_get($full_uri, $data); - return $ret['decoded']; - - } - - /** - * @param $id - * @return - * return a properly url-encoded id. - */ - private function safe_urlencode($id) { - //-- System views like _design can have "/" in their URLs. - $id = rawurlencode($id); - if (substr($id, 0, 1) == '_') { - $id = str_replace('%2F', '/', $id); - } - return $id; - } - - /** Getter for a database name */ - function get_name() { - return $this->dbname; - } + /** + * Base URL of the CouchDB REST API + */ + private $conn_url; + + /** + * HTTP REST Client instance + */ + protected $rest_client; + + /** + * Name of the database + */ + private $dbname; + + /** + * Default constructor + */ + function __construct($conn_url, $dbname) { + $this->conn_url = $conn_url; + $this->dbname = $dbname; + $this->rest_client = SetteeRestClient::get_instance($this->conn_url); + } + + /** + * Get UUID from CouchDB + * + * @return + * CouchDB-generated UUID string + * + */ + function gen_uuid() { + $ret = $this->rest_client->http_get('_uuids'); + return $ret['decoded']->uuids[0]; // should never be empty at this point, so no checking + } + + /** + * Create or update a document database + * + * @param $document + * PHP object, a PHP associative array, or a JSON String representing the document to be saved. PHP Objects and arrays are JSON-encoded automatically. + * + *

If $document has a an "_id" property set, it will be used as document's unique id (even for "create" operation). + * If "_id" is missing, CouchDB will be used to generate a UUID. + * + *

If $document has a "_rev" property (revision), document will be updated, rather than creating a new document. + * You have to provide "_rev" if you want to update an existing document, otherwise operation will be assumed to be + * one of creation and you will get a duplicate document exception from CouchDB. Also, you may not provide "_rev" but + * not provide "_id" since that is an invalid input. + * + * @param $allowRevAutoDetection + * Default: false. When true and _rev is missing from the document, save() function will auto-detect latest revision + * for a document and use it. This option is "false" by default because it involves an extra http HEAD request and + * therefore can make save() operation slightly slower if such auto-detection is not required. + * + * @return + * document object with the database id (uuid) and revision attached; + * + * @throws SetteeCreateDatabaseException + */ + function save($document, $allowRevAutoDetection = false) { + if (is_string($document)) { + $document = json_decode($document); + } + + // Allow passing of $document as an array (for syntactic simplicity and also because in JSON world it does not matter) + if (is_array($document)) { + $document = (object) $document; + } + + if (empty($document->_id) && empty($document->_rev)) { + $id = $this->gen_uuid(); + } elseif (empty($document->_id) && !empty($document->_rev)) { + throw new SetteeWrongInputException("Error: You can not save a document with a revision provided, but missing id"); + } else { + $id = $document->_id; + + if ($allowRevAutoDetection) { + try { + $rev = $this->get_rev($id); + } catch (SetteeRestClientException $e) { + // auto-detection may fail legitimately, if a document has never been saved before (new doc), so skipping error + } + if (!empty($rev)) { + $document->_rev = $rev; + } + } + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); + $document_json = json_encode($document, JSON_NUMERIC_CHECK); + + $ret = $this->rest_client->http_put($full_uri, $document_json); + + $document->_id = $ret['decoded']->id; + $document->_rev = $ret['decoded']->rev; + + return $document; + } + + /** + * @param $doc + * @param $name + * @param $content + * Content of the attachment in a string-buffer format. This function will automatically base64-encode content for + * you, so you don't have to do it. + * @param $mime_type + * Optional. Will be auto-detected if not provided + * @return void + */ + public function add_attachment($doc, $name, $content, $mime_type = null) { + if (empty($doc->_attachments) || !is_object($doc->_attachments)) { + $doc->_attachments = new stdClass(); + } + + if (empty($mime_type)) { + $mime_type = $this->rest_client->content_mime_type($content); + } + + $doc->_attachments->$name = new stdClass(); + $doc->_attachments->$name->content_type = $mime_type; + $doc->_attachments->$name->data = base64_encode($content); + } + + /** + * @param $doc + * @param $name + * @param $file + * Full path to a file (e.g. as returned by PHP's realpath function). + * @param $mime_type + * Optional. Will be auto-detected if not provided + * @return void + */ + public function add_attachment_file($doc, $name, $file, $mime_type = null) { + $content = file_get_contents($file); + $this->add_attachment($doc, $name, $content, $mime_type); + } + + /** + * + * Retrieve a document from CouchDB + * + * @throws SetteeWrongInputException + * + * @param $id + * Unique ID (usually: UUID) of the document to be retrieved. + * @return + * database document in PHP object format. + */ + function get($id) { + if (empty($id)) { + throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid."); + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); + $full_uri = str_replace("%3Frev%3D", "?rev=", $full_uri); + $ret = $this->rest_client->http_get($full_uri); + return $ret['decoded']; + } + + /** + * + * Get the latest revision of a document with document id: $id in CouchDB. + * + * @throws SetteeWrongInputException + * + * @param $id + * Unique ID (usually: UUID) of the document to be retrieved. + * @return + * database document in PHP object format. + */ + function get_rev($id) { + if (empty($id)) { + throw new SetteeWrongInputException("Error: Can't query a document without a uuid."); + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); + $headers = $this->rest_client->http_head($full_uri); + if (empty($headers['Etag'])) { + throw new SetteeRestClientException("Error: could not retrieve revision. Server unexpectedly returned empty Etag"); + } + $etag = str_replace('"', '', $headers['Etag']); + return $etag; + } + + /** + * Delete a document + * + * @param $document + * a PHP object or JSON representation of the document that has _id and _rev fields. + * + * @return void + */ + function delete($document) { + if (!is_object($document)) { + $document = json_decode($document); + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($document->_id) . "?rev=" . $document->_rev; + $this->rest_client->http_delete($full_uri); + } + + /* ----------------- View-related functions -------------- */ + + /** + * Create a new view or update an existing one. + * + * @param $design_doc + * @param $view_name + * @param $map_src + * Source code of the map function in Javascript + * @param $reduce_src + * Source code of the reduce function in Javascript (optional) + * @return void + */ + function save_view($design_doc, $view_name, $map_src, $reduce_src = null) { + $obj = new stdClass(); + $obj->_id = "_design/" . urlencode($design_doc); + $view_name = urlencode($view_name); + $obj->views->$view_name->map = $map_src; + if (!empty($reduce_src)) { + $obj->views->$view_name->reduce = $reduce_src; + } + + // allow safe updates (even if slightly slower due to extra: rev-detection check). + return $this->save($obj, true); + } + + /** + * Create a new view or update an existing one. + * + * @param $design_doc + * @param $view_name + * @param $key + * key parameter to a view. Can be a single value or an array (for a range). If passed an array, function assumes + * that first element is startkey, second: endkey. + * @param $descending + * return results in descending order. Please don't forget that if you are using a startkey/endkey, when you change + * order you also need to swap startkey and endkey values! + * + * @return void + */ + function get_view($design_doc, $view_name, $key = null, $descending = false, $limit = false, $reduce = null, $startdocid = null) { + $id = "_design/" . urlencode($design_doc); + $view_name = urlencode($view_name); + $id .= "/_view/$view_name"; + + $data = array(); + if (!empty($key)) { + if (is_string($key)) { + $data = "key=" . '"' . $key . '"'; + } elseif (is_array($key)) { + list($startkey, $endkey) = $key; + $data = "startkey=" . '"' . $startkey . '"&' . "endkey=" . '"' . $endkey . '"'; + } + + if ($descending) { + $data .= "&descending=true"; + } + if ($startdocid != null) { + $data .= "&startkey_docid='$startdocid'"; + } + if ($reduce != null) { + if ($reduce == true) { + $data .= "&reduce=true"; + } else { + $data .= "&reduce=false"; + } + } + if ($limit) { + $data .= "&limit=" . $limit; + } + } + + + + if (empty($id)) { + throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid."); + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); + + $full_uri = str_replace("%253Fgroup%253D", "?group=", $full_uri); + $full_uri = str_replace("%253Flimit%253D", "?limit=", $full_uri); + $ret = $this->rest_client->http_get($full_uri, $data); + //$ret['decoded'] = str_replace("?k","&k",$ret['decoded']); + return $ret['decoded']; + } + + /** + * @param $id + * @return + * return a properly url-encoded id. + */ + private function safe_urlencode($id) { + //-- System views like _design can have "/" in their URLs. + $id = rawurlencode($id); + if (substr($id, 0, 1) == '_') { + $id = str_replace('%2F', '/', $id); + } + return $id; + } + + /** Getter for a database name */ + function get_name() { + return $this->dbname; + } } --- a/documents/about.php +++ b/documents/about.php @@ -1,7 +1,7 @@

About

--- /dev/null +++ b/documents/agency.php @@ -1,1 +1,41 @@ +get_db('disclosr-agencies'); +$idtoname = Array(); +foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { + $idtoname[$row->id] = trim($row->value->name); +} +$foidocsdb = $server->get_db('disclosr-foidocuments'); + +include_header_documents((isset($_REQUEST['id']) ? $idtoname[$_REQUEST['id']] : 'Entries by Agency')); +$endkey = (isset($_REQUEST['end_key']) ? $_REQUEST['end_key'] : '9999-99-99'); +?> +
Read all the information released by Australian Federal Government agencies under the FOI Act in one place!
+RSS Icon All Agencies RSS Feed
+get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; + foreach ($rows as $row) { + //print_r($rows); + echo displayLogEntry($row, $idtoname); + if (!isset($startkey)) + $startkey = $row->key; + $endkey = $row->key; + } + } else { + $rows = $foidocsdb->get_view("app", "byAgencyID?group=true", null, false, false, true)->rows; + if ($rows) { + foreach ($rows as $row) { + echo '' . $idtoname[$row->key] . " (" . $row->value . " records)
\n"; + } + } + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +echo "next page "; +include_footer_documents(); +?> --- a/documents/charts.php +++ b/documents/charts.php @@ -1,6 +1,6 @@ get_db('disclosr-agencies'); @@ -15,29 +15,28 @@

Charts

Lorem ipsum.

-
+
+
+
Read all the information released by Australian Federal Government agencies under the FOI Act in one place!
+RSS Icon All Agencies RSS Feed
+get_db('disclosr-agencies'); + +$idtoname = Array(); +foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { + $idtoname[$row->id] = trim($row->value->name); +} +$foidocsdb = $server->get_db('disclosr-foidocuments'); +try { + $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20)->rows; + if ($rows) { + foreach ($rows as $key => $row) { + echo displayLogEntry($row, $idtoname); + if (!isset($startkey)) $startkey = $row->key; + $endkey = $row->key; + } + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +echo "next page "; +*/ +include_footer_documents(); +?> + --- a/documents/disclogsList.php +++ b/documents/disclogsList.php @@ -1,7 +1,7 @@ --- /dev/null +++ b/documents/disclosr-documents.nja @@ -1,1 +1,7 @@ - +{ + "venv": "", + "project-type": "Import from sources", + "name": "disclosr-documents", + "license": "GNU General Public License v3", + "description": "" +} --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -1,155 +1,281 @@ -import sys,os +import sys +import os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import scrape from bs4 import BeautifulSoup from time import mktime import feedparser import abc -import unicodedata, re +import unicodedata +import re import dateutil from dateutil.parser import * from datetime import * import codecs +import difflib + +from StringIO import StringIO + +from pdfminer.pdfparser import PDFDocument, PDFParser +from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf +from pdfminer.pdfdevice import PDFDevice, TagExtractor +from pdfminer.converter import TextConverter +from pdfminer.cmapdb import CMapDB +from pdfminer.layout import LAParams + + class GenericDisclogScraper(object): - __metaclass__ = abc.ABCMeta - agencyID = None - disclogURL = None - def remove_control_chars(self, input): - return "".join([i for i in input if ord(i) in range(32, 127)]) - def getAgencyID(self): - """ disclosr agency id """ - if self.agencyID == None: - self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") - return self.agencyID - - def getURL(self): - """ disclog URL""" - if self.disclogURL == None: - agency = scrape.agencydb.get(self.getAgencyID()) - self.disclogURL = agency['FOIDocumentsURL'] - return self.disclogURL - - @abc.abstractmethod - def doScrape(self): - """ do the scraping """ - return - - @abc.abstractmethod - def getDescription(self, content, entry, doc): - """ get description""" - return - + __metaclass__ = abc.ABCMeta + agencyID = None + disclogURL = None + + def remove_control_chars(self, input): + return "".join([i for i in input if ord(i) in range(32, 127)]) + + def getAgencyID(self): + """ disclosr agency id """ + if self.agencyID is None: + self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") + return self.agencyID + + def getURL(self): + """ disclog URL""" + if self.disclogURL is None: + agency = scrape.agencydb.get(self.getAgencyID()) + self.disclogURL = agency['FOIDocumentsURL'] + return self.disclogURL + + @abc.abstractmethod + def doScrape(self): + """ do the scraping """ + return + +class GenericHTMLDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + content = rcontent + dochash = scrape.mkhash(content) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" + last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) + if last_attach != None: + html_diff = difflib.HtmlDiff() + description = description + "\nChanges: " + description = description + html_diff.make_table(last_attach.read().split('\n'), + content.split('\n')) + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "date": edate, "title": "Disclosure Log Updated", "description": description} + foidocsdb.save(doc) + else: + print "already saved" + +class GenericPDFDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + laparams = LAParams() + rsrcmgr = PDFResourceManager(caching=True) + outfp = StringIO() + device = TextConverter(rsrcmgr, outfp, codec='utf-8', + laparams=laparams) + fp = StringIO() + fp.write(content) + + process_pdf(rsrcmgr, device, fp, set(), caching=True, + check_extractable=True) + description = outfp.getvalue() + fp.close() + device.close() + outfp.close() + dochash = scrape.mkhash(description) + doc = foidocsdb.get(dochash) + if doc is None: + print "saving " + dochash + edate = date.today().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "date": edate, "title": "Disclosure Log Updated", "description": description} + foidocsdb.save(doc) + else: + print "already saved" + + +class GenericDOCXDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb + , self.getURL(), "foidocuments", self.getAgencyID()) + mydoc = zipfile.ZipFile(file) + xmlcontent = mydoc.read('word/document.xml') + document = etree.fromstring(xmlcontent) + ## Fetch all the text out of the document we just created + paratextlist = getdocumenttext(document) + # Make explicit unicode version + newparatextlist = [] + for paratext in paratextlist: + newparatextlist.append(paratext.encode("utf-8")) + ## Print our documnts test with two newlines under each paragraph + description = '\n\n'.join(newparatextlist).strip(' \t\n\r') + dochash = scrape.mkhash(description) + doc = foidocsdb.get(dochash) + + if doc is None: + print "saving " + dochash + edate = time().strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID() + , 'url': self.getURL(), 'docID': dochash, + "date": edate, "title": "Disclosure Log Updated", "description": description} + foidocsdb.save(doc) + else: + print "already saved" class GenericRSSDisclogScraper(GenericDisclogScraper): - def doScrape(self): - foidocsdb = scrape.couch['disclosr-foidocuments'] - (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) - feed = feedparser.parse(content) - for entry in feed.entries: - #print entry - print entry.id - hash = scrape.mkhash(entry.id) - #print hash - doc = foidocsdb.get(hash) - #print doc - if doc == None: - print "saving "+ hash - edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") - doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, - "date": edate,"title": entry.title} - self.getDescription(entry,entry, doc) - foidocsdb.save(doc) + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + feed = feedparser.parse(content) + for entry in feed.entries: + #print entry + print entry.id + dochash = scrape.mkhash(entry.id) + doc = foidocsdb.get(dochash) + #print doc + if doc is None: + print "saving " + dochash + edate = datetime.fromtimestamp( + mktime(entry.published_parsed)).strftime("%Y-%m-%d") + doc = {'_id': dochash, 'agencyID': self.getAgencyID(), + 'url': entry.link, 'docID': entry.id, + "date": edate, "title": entry.title} + self.getDescription(entry, entry, doc) + foidocsdb.save(doc) + else: + print "already saved" + + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + doc.update({'description': content.summary}) + return + + +class GenericOAICDisclogScraper(GenericDisclogScraper): + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def getColumns(self, columns): + """ rearranges columns if required """ + return + + def getColumnCount(self): + return 5 + + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + descriptiontxt = "" + for string in content.stripped_strings: + descriptiontxt = descriptiontxt + " \n" + string + doc.update({'description': descriptiontxt}) + + def getTitle(self, content, entry, doc): + doc.update({'title': (''.join(content.stripped_strings))}) + + def getTable(self, soup): + return soup.table + + def getRows(self, table): + return table.find_all('tr') + + def getDate(self, content, entry, doc): + date = ''.join(content.stripped_strings).strip() + (a, b, c) = date.partition("(") + date = self.remove_control_chars(a.replace("Octber", "October")) + print date + edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + print edate + doc.update({'date': edate}) + return + + def getLinks(self, content, entry, doc): + links = [] + for atag in entry.find_all("a"): + if atag.has_key('href'): + links.append(scrape.fullurl(content, atag['href'])) + if links != []: + doc.update({'links': links}) + return + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, + self.getURL(), "foidocuments", self.getAgencyID()) + if content is not None: + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": + # http://www.crummy.com/software/BeautifulSoup/documentation.html + print "parsing" + soup = BeautifulSoup(content) + table = self.getTable(soup) + for row in self.getRows(table): + columns = row.find_all('td') + if len(columns) is self.getColumnCount(): + (id, date, title, + description, notes) = self.getColumns(columns) + print self.remove_control_chars( + ''.join(id.stripped_strings)) + if id.string is None: + dochash = scrape.mkhash( + self.remove_control_chars( + url + (''.join(date.stripped_strings)))) else: - print "already saved" - def getDescription(self, content, entry, doc): - """ get description from rss entry""" - doc.update({'description': content.summary}) - return - -class GenericOAICDisclogScraper(GenericDisclogScraper): - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def getColumns(self,columns): - """ rearranges columns if required """ - return - def getColumnCount(self): - return 5 - def getDescription(self, content, entry, doc): - """ get description from rss entry""" - descriptiontxt = "" - for string in content.stripped_strings: - descriptiontxt = descriptiontxt + " \n" + string - doc.update({'description': descriptiontxt}) - return - def getTitle(self, content, entry, doc): - doc.update({'title': (''.join(content.stripped_strings))}) - return - def getTable(self, soup): - return soup.table - def getRows(self, table): - return table.find_all('tr') - def getDate(self, content, entry, doc): - date = ''.join(content.stripped_strings).strip() - (a,b,c) = date.partition("(") - date = self.remove_control_chars(a.replace("Octber","October")) - print date - edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") - print edate - doc.update({'date': edate}) - return - def getLinks(self, content, entry, doc): - links = [] - for atag in entry.find_all("a"): - if atag.has_key('href'): - links.append(scrape.fullurl(content,atag['href'])) - if links != []: - doc.update({'links': links}) - return - - def doScrape(self): - foidocsdb = scrape.couch['disclosr-foidocuments'] - (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) - if content != None: - if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": - # http://www.crummy.com/software/BeautifulSoup/documentation.html - soup = BeautifulSoup(content) - table = self.getTable(soup) - for row in self.getRows(table): - columns = row.find_all('td') - if len(columns) == self.getColumnCount(): - (id, date, title, description, notes) = self.getColumns(columns) - print self.remove_control_chars(''.join(id.stripped_strings)) - if id.string == None: - hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) - else: - hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) - doc = foidocsdb.get(hash) - - if doc == None: - print "saving " +hash - doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} - self.getLinks(self.getURL(),row,doc) - self.getTitle(title,row, doc) - self.getDate(date,row, doc) - self.getDescription(description,row, doc) - if notes != None: - doc.update({ 'notes': (''.join(notes.stripped_strings))}) - badtitles = ['-','Summary of FOI Request','FOI request(in summary form)','Summary of FOI request received by the ASC', -'Summary of FOI request received by agency/minister','Description of Documents Requested','FOI request','Description of FOI Request','Summary of request','Description','Summary', + dochash = scrape.mkhash( + self.remove_control_chars( + url + (''.join(id.stripped_strings)))) + doc = foidocsdb.get(dochash) + + if doc is None: + print "saving " + dochash + doc = {'_id': dochash, + 'agencyID': self.getAgencyID(), + 'url': self.getURL(), + 'docID': (''.join(id.stripped_strings))} + self.getLinks(self.getURL(), row, doc) + self.getTitle(title, row, doc) + self.getDate(date, row, doc) + self.getDescription(description, row, doc) + if notes is not None: + doc.update({ 'notes': ( + ''.join(notes.stripped_strings))}) + badtitles = ['-','Summary of FOI Request' + , 'FOI request(in summary form)' + , 'Summary of FOI request received by the ASC', +'Summary of FOI request received by agency/minister', +'Description of Documents Requested','FOI request', +'Description of FOI Request','Summary of request','Description','Summary', 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67'] - if doc['title'] not in badtitles and doc['description'] != '': + if doc['title'] not in badtitles\ + and doc['description'] != '': print "saving" foidocsdb.save(doc) - else: - print "already saved "+hash - - elif len(row.find_all('th')) == self.getColumnCount(): - print "header row" - - else: - print "ERROR number of columns incorrect" - print row - + else: + print "already saved " + dochash + + elif len(row.find_all('th')) is self.getColumnCount(): + print "header row" + + else: + print "ERROR number of columns incorrect" + print row + --- a/documents/index.php +++ b/documents/index.php @@ -1,12 +1,13 @@ +
Read all the information released by Australian Federal Government agencies under the FOI Act in one place!
+RSS Icon All Agencies RSS Feed
get_db('disclosr-agencies'); $idtoname = Array(); @@ -15,17 +16,20 @@ } $foidocsdb = $server->get_db('disclosr-foidocuments'); try { - $rows = $foidocsdb->get_view("app", "byDate", Array($startkey, '0000-00-00'), true, 20)->rows; + $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20,null, $enddocid)->rows; if ($rows) { foreach ($rows as $key => $row) { echo displayLogEntry($row, $idtoname); + if (!isset($startkey)) + $startkey = $row->key; $endkey = $row->key; + $enddocid = $row->value->_id; } } } catch (SetteeRestClientException $e) { setteErrorHandler($e); } -echo "next page"; +echo "next page "; include_footer_documents(); ?> --- a/documents/rss.xml.php +++ b/documents/rss.xml.php @@ -8,30 +8,39 @@ //Creating an instance of FeedWriter class. $TestFeed = new RSS2FeedWriter(); //Setting the channel elements -//Use wrapper functions for common channelelements -$TestFeed->setTitle('Last Modified - All'); -$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'); -$TestFeed->setDescription('Latest entries'); - $TestFeed->setChannelElement('language', 'en-us'); - $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); -//Retriving informations from database +////Retriving informations from database $idtoname = Array(); $agenciesdb = $server->get_db('disclosr-agencies'); foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { $idtoname[$row->id] = trim($row->value->name); } $foidocsdb = $server->get_db('disclosr-foidocuments'); -$rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99','0000-00-00', 50), true)->rows; +if (isset($_REQUEST['id'])) { + $rows = $foidocsdb->get_view("app", "byAgencyID", $_REQUEST['id'], false, false, false)->rows; + $title = $idtoname[$_REQUEST['id']]; +} else { + $rows = $foidocsdb->get_view("app", "byDate", Array('9999-99-99', '0000-00-00', 50), true)->rows; + $title = 'All Agencies'; +} +//Use wrapper functions for common channelelements +$TestFeed->setTitle('disclosurelo.gs Newest Entries - '.$title); +$TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'.(isset($_REQUEST['id'])? '?id='.$_REQUEST['id'] : '')); +$TestFeed->setDescription('disclosurelo.gs Newest Entries - '.$title); +$TestFeed->setChannelElement('language', 'en-us'); +$TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); + + //print_r($rows); foreach ($rows as $row) { //Create an empty FeedItem $newItem = $TestFeed->createNewItem(); //Add elements to the feed item $newItem->setTitle($row->value->title); - $newItem->setLink("view.php?id=".$row->value->_id); - $newItem->setDate(date("c", strtotime($row->value->date))); - $newItem->setDescription(displayLogEntry($row,$idtoname)); - $newItem->addElement('guid', $row->value->_id,array('isPermaLink'=>'true')); + $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id); + $newItem->setDate(strtotime($row->value->date)); + $newItem->setDescription(displayLogEntry($row, $idtoname)); + $newItem->setAuthor($idtoname[$row->value->agencyID]); + $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true')); //Now add the feed item $TestFeed->addItem($newItem); } --- a/documents/runScrapers.sh +++ b/documents/runScrapers.sh @@ -1,3 +1,10 @@ -for f in scrapers/*.py; do echo "Processing $f file.."; python $f; done +for f in scrapers/*.py; + do echo "Processing $f file.."; + python $f; + if [ "$?" -ne "0" ]; then + echo "error"; + sleep 2; + fi +done --- a/documents/scrape.py +++ b/documents/scrape.py @@ -8,186 +8,198 @@ import time import os import mimetypes -import re import urllib import urlparse def mkhash(input): - return hashlib.md5(input).hexdigest().encode("utf-8") + return hashlib.md5(input).hexdigest().encode("utf-8") def canonurl(url): - r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' - if the URL looks invalid. - >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws - 'http://xn--hgi.ws/' - """ - # strip spaces at the ends and ensure it's prefixed with 'scheme://' - url = url.strip() - if not url: - return '' - if not urlparse.urlsplit(url).scheme: - url = 'http://' + url - - # turn it into Unicode - #try: - # url = unicode(url, 'utf-8') - #except UnicodeDecodeError: - # return '' # bad UTF-8 chars in URL - - # parse the URL into its components - parsed = urlparse.urlsplit(url) - scheme, netloc, path, query, fragment = parsed - - # ensure scheme is a letter followed by letters, digits, and '+-.' chars - if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): - return '' - scheme = str(scheme) - - # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] - match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) - if not match: - return '' - domain, port = match.groups() - netloc = domain + (port if port else '') - netloc = netloc.encode('idna') - - # ensure path is valid and convert Unicode chars to %-encoded - if not path: - path = '/' # eg: 'http://google.com' -> 'http://google.com/' - path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') - - # ensure query is valid - query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') - - # ensure fragment is valid - fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) - - # piece it all back together, truncating it to a maximum of 4KB - url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) - return url[:4096] + r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' + if the URL looks invalid. + >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws + 'http://xn--hgi.ws/' + """ + # strip spaces at the ends and ensure it's prefixed with 'scheme://' + url = url.strip() + if not url: + return '' + if not urlparse.urlsplit(url).scheme: + url = 'http://' + url + + # turn it into Unicode + #try: + # url = unicode(url, 'utf-8') + #except UnicodeDecodeError: + # return '' # bad UTF-8 chars in URL + + # parse the URL into its components + parsed = urlparse.urlsplit(url) + scheme, netloc, path, query, fragment = parsed + + # ensure scheme is a letter followed by letters, digits, and '+-.' chars + if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): + return '' + scheme = str(scheme) + + # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] + match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) + if not match: + return '' + domain, port = match.groups() + netloc = domain + (port if port else '') + netloc = netloc.encode('idna') + + # ensure path is valid and convert Unicode chars to %-encoded + if not path: + path = '/' # eg: 'http://google.com' -> 'http://google.com/' + path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') + + # ensure query is valid + query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') + + # ensure fragment is valid + fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) + + # piece it all back together, truncating it to a maximum of 4KB + url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) + return url[:4096] def fullurl(url,href): - href = href.replace(" ","%20") - href = re.sub('#.*$','',href) - return urljoin(url,href) + href = href.replace(" ","%20") + href = re.sub('#.*$','',href) + return urljoin(url,href) #http://diveintopython.org/http_web_services/etags.html -class NotModifiedHandler(urllib2.BaseHandler): - def http_error_304(self, req, fp, code, message, headers): - addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) - addinfourl.code = code - return addinfourl +class NotModifiedHandler(urllib2.BaseHandler): + def http_error_304(self, req, fp, code, message, headers): + addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) + addinfourl.code = code + return addinfourl + +def getLastAttachment(docsdb,url): + hash = mkhash(url) + doc = docsdb.get(hash) + if doc != None: + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + return last_attachment + else: + return None def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): - url = canonurl(url) - hash = mkhash(url) - req = urllib2.Request(url) - print "Fetching %s (%s)" % (url,hash) - if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": - print "Not a valid HTTP url" - return (None,None,None) - doc = docsdb.get(hash) - if doc == None: - doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} - else: - if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): - print "Uh oh, trying to scrape URL again too soon!" - last_attachment_fname = doc["_attachments"].keys()[-1] - last_attachment = docsdb.get_attachment(doc,last_attachment_fname) - content = last_attachment - return (doc['url'],doc['mime_type'],content) - if scrape_again == False: - print "Not scraping this URL again as requested" - return (None,None,None) - - time.sleep(3) # wait 3 seconds to give webserver time to recover - - req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") - #if there is a previous version stored in couchdb, load caching helper tags - if doc.has_key('etag'): - req.add_header("If-None-Match", doc['etag']) - if doc.has_key('last_modified'): - req.add_header("If-Modified-Since", doc['last_modified']) - - opener = urllib2.build_opener(NotModifiedHandler()) - try: - url_handle = opener.open(req) - doc['url'] = url_handle.geturl() # may have followed a redirect to a new url - headers = url_handle.info() # the addinfourls have the .info() too - doc['etag'] = headers.getheader("ETag") - doc['last_modified'] = headers.getheader("Last-Modified") - doc['date'] = headers.getheader("Date") - doc['page_scraped'] = time.time() - doc['web_server'] = headers.getheader("Server") - doc['via'] = headers.getheader("Via") - doc['powered_by'] = headers.getheader("X-Powered-By") - doc['file_size'] = headers.getheader("Content-Length") - content_type = headers.getheader("Content-Type") - if content_type != None: - doc['mime_type'] = content_type.split(";")[0] - else: - (type,encoding) = mimetypes.guess_type(url) - doc['mime_type'] = type - if hasattr(url_handle, 'code'): - if url_handle.code == 304: - print "the web page has not been modified" - return (None,None,None) - else: - content = url_handle.read() - docsdb.save(doc) - doc = docsdb.get(hash) # need to get a _rev - docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) - return (doc['url'], doc['mime_type'], content) - #store as attachment epoch-filename - - except urllib2.URLError as e: - error = "" - if hasattr(e, 'reason'): - error = "error %s in downloading %s" % (str(e.reason), url) - elif hasattr(e, 'code'): - error = "error %s in downloading %s" % (e.code, url) - print error - doc['error'] = error - docsdb.save(doc) - return (None,None,None) + url = canonurl(url) + hash = mkhash(url) + req = urllib2.Request(url) + print "Fetching %s (%s)" % (url,hash) + if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": + print "Not a valid HTTP url" + return (None,None,None) + doc = docsdb.get(hash) + if doc == None: + doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} + else: + if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): + print "Uh oh, trying to scrape URL again too soon!"+hash + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + content = last_attachment + return (doc['url'],doc['mime_type'],content.read()) + if scrape_again == False: + print "Not scraping this URL again as requested" + return (doc['url'],doc['mime_type'],content.read()) + + req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") + #if there is a previous version stored in couchdb, load caching helper tags + if doc.has_key('etag'): + req.add_header("If-None-Match", doc['etag']) + if doc.has_key('last_modified'): + req.add_header("If-Modified-Since", doc['last_modified']) + + opener = urllib2.build_opener(NotModifiedHandler()) + try: + url_handle = opener.open(req) + doc['url'] = url_handle.geturl() # may have followed a redirect to a new url + headers = url_handle.info() # the addinfourls have the .info() too + doc['etag'] = headers.getheader("ETag") + doc['last_modified'] = headers.getheader("Last-Modified") + doc['date'] = headers.getheader("Date") + doc['page_scraped'] = time.time() + doc['web_server'] = headers.getheader("Server") + doc['via'] = headers.getheader("Via") + doc['powered_by'] = headers.getheader("X-Powered-By") + doc['file_size'] = headers.getheader("Content-Length") + content_type = headers.getheader("Content-Type") + if content_type != None: + doc['mime_type'] = content_type.split(";")[0] + else: + (type,encoding) = mimetypes.guess_type(url) + doc['mime_type'] = type + if hasattr(url_handle, 'code'): + if url_handle.code == 304: + print "the web page has not been modified"+hash + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc,last_attachment_fname) + content = last_attachment + return (doc['url'],doc['mime_type'],content.read()) + else: + print "new webpage loaded" + content = url_handle.read() + docsdb.save(doc) + doc = docsdb.get(hash) # need to get a _rev + docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) + return (doc['url'], doc['mime_type'], content) + #store as attachment epoch-filename + + except urllib2.URLError as e: + print "error!" + error = "" + if hasattr(e, 'reason'): + error = "error %s in downloading %s" % (str(e.reason), url) + elif hasattr(e, 'code'): + error = "error %s in downloading %s" % (e.code, url) + print error + doc['error'] = error + docsdb.save(doc) + return (None,None,None) def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): - (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) - badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] - if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": - if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": - # http://www.crummy.com/software/BeautifulSoup/documentation.html - soup = BeautifulSoup(content) - navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) - for nav in navIDs: - print "Removing element", nav['id'] - nav.extract() - navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) - for nav in navClasses: - print "Removing element", nav['class'] - nav.extract() - links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) - linkurls = set([]) - for link in links: - if link.has_key("href"): - if link['href'].startswith("http"): - # lets not do external links for now - # linkurls.add(link['href']) - None - if link['href'].startswith("mailto"): - # not http - None - if link['href'].startswith("javascript"): - # not http - None - else: - # remove anchors and spaces in urls - linkurls.add(fullurl(url,link['href'])) - for linkurl in linkurls: - #print linkurl - scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) + (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) + badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] + if content != None and depth > 0 and url != "http://www.ausport.gov.au/