From: maxious Date: Fri, 10 Feb 2012 13:30:58 +0000 Subject: Handle more http edge cases in scraper X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=8e2e4d8645d25db5380e3c3050ca1c79c4415da3 --- Handle more http edge cases in scraper Former-commit-id: 994d782d8883843a55bf2558f8e6a6c9ffbcebde --- --- /dev/null +++ b/admin/exportAll.csv.php @@ -1,1 +1,63 @@ +get_db('disclosr-agencies'); +$headers = Array(); + try { + $rows = $db->get_view("app", "fieldNames?group=true", null, true)->rows; + + $dataValues = Array(); + foreach ($rows as $row) { + $headers[] = $row->key; + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} + +$fp = fopen('php://output', 'w'); +if ($fp && $db) { + header('Content-Type: text/csv; charset=utf-8'); + header('Content-Disposition: attachment; filename="export.' . date("c") . '.csv"'); + header('Pragma: no-cache'); + header('Expires: 0'); + fputcsv($fp, $headers); + try { + $agencies = $db->get_view("app", "byCanonicalName", null, true)->rows; + //print_r($rows); + foreach ($agencies as $agency) { + // print_r($agency); + + if ( !isset($agency->value->status)) { + $row = Array(); + $agencyArray = object_to_array($agency->value); + foreach ($headers as $fieldName) { + if (isset($agencyArray[$fieldName])) { + if (is_array($agencyArray[$fieldName])) { + $row[] = implode(";",$agencyArray[$fieldName]); + } else { + $row[] = $agencyArray[$fieldName]; + } + } else { + $row[] = ""; + } + } + + fputcsv($fp, array_values($row)); + + + } + } + } catch (SetteeRestClientException $e) { + setteErrorHandler($e); + } + + die; +} +?> + --- /dev/null +++ b/couchdb/SetteeDatabase.class.php @@ -1,1 +1,306 @@ - +conn_url = $conn_url; + $this->dbname = $dbname; + $this->rest_client = SetteeRestClient::get_instance($this->conn_url); + } + + + /** + * Get UUID from CouchDB + * + * @return + * CouchDB-generated UUID string + * + */ + function gen_uuid() { + $ret = $this->rest_client->http_get('_uuids'); + return $ret['decoded']->uuids[0]; // should never be empty at this point, so no checking + } + + /** + * Create or update a document database + * + * @param $document + * PHP object, a PHP associative array, or a JSON String representing the document to be saved. PHP Objects and arrays are JSON-encoded automatically. + * + *

If $document has a an "_id" property set, it will be used as document's unique id (even for "create" operation). + * If "_id" is missing, CouchDB will be used to generate a UUID. + * + *

If $document has a "_rev" property (revision), document will be updated, rather than creating a new document. + * You have to provide "_rev" if you want to update an existing document, otherwise operation will be assumed to be + * one of creation and you will get a duplicate document exception from CouchDB. Also, you may not provide "_rev" but + * not provide "_id" since that is an invalid input. + * + * @param $allowRevAutoDetection + * Default: false. When true and _rev is missing from the document, save() function will auto-detect latest revision + * for a document and use it. This option is "false" by default because it involves an extra http HEAD request and + * therefore can make save() operation slightly slower if such auto-detection is not required. + * + * @return + * document object with the database id (uuid) and revision attached; + * + * @throws SetteeCreateDatabaseException + */ + function save($document, $allowRevAutoDetection = false) { + if (is_string($document)) { + $document = json_decode($document); + } + + // Allow passing of $document as an array (for syntactic simplicity and also because in JSON world it does not matter) + if(is_array($document)) { + $document = (object) $document; + } + + if (empty($document->_id) && empty($document->_rev)) { + $id = $this->gen_uuid(); + } + elseif (empty($document->_id) && !empty($document->_rev)) { + throw new SetteeWrongInputException("Error: You can not save a document with a revision provided, but missing id"); + } + else { + $id = $document->_id; + + if ($allowRevAutoDetection) { + try { + $rev = $this->get_rev($id); + } catch (SetteeRestClientException $e) { + // auto-detection may fail legitimately, if a document has never been saved before (new doc), so skipping error + } + if (!empty($rev)) { + $document->_rev = $rev; + } + } + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); + $document_json = json_encode($document, JSON_NUMERIC_CHECK); + + $ret = $this->rest_client->http_put($full_uri, $document_json); + + $document->_id = $ret['decoded']->id; + $document->_rev = $ret['decoded']->rev; + + return $document; + } + + /** + * @param $doc + * @param $name + * @param $content + * Content of the attachment in a string-buffer format. This function will automatically base64-encode content for + * you, so you don't have to do it. + * @param $mime_type + * Optional. Will be auto-detected if not provided + * @return void + */ + public function add_attachment($doc, $name, $content, $mime_type = null) { + if (empty($doc->_attachments) || !is_object($doc->_attachments)) { + $doc->_attachments = new stdClass(); + } + + if (empty($mime_type)) { + $mime_type = $this->rest_client->content_mime_type($content); + } + + $doc->_attachments->$name = new stdClass(); + $doc->_attachments->$name->content_type = $mime_type; + $doc->_attachments->$name->data = base64_encode($content); + } + + /** + * @param $doc + * @param $name + * @param $file + * Full path to a file (e.g. as returned by PHP's realpath function). + * @param $mime_type + * Optional. Will be auto-detected if not provided + * @return void + */ + public function add_attachment_file($doc, $name, $file, $mime_type = null) { + $content = file_get_contents($file); + $this->add_attachment($doc, $name, $content, $mime_type); + } + + /** + * + * Retrieve a document from CouchDB + * + * @throws SetteeWrongInputException + * + * @param $id + * Unique ID (usually: UUID) of the document to be retrieved. + * @return + * database document in PHP object format. + */ + function get($id) { + if (empty($id)) { + throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid."); + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); +$full_uri = str_replace("%3Frev%3D","?rev=",$full_uri); + $ret = $this->rest_client->http_get($full_uri); + return $ret['decoded']; + } + + /** + * + * Get the latest revision of a document with document id: $id in CouchDB. + * + * @throws SetteeWrongInputException + * + * @param $id + * Unique ID (usually: UUID) of the document to be retrieved. + * @return + * database document in PHP object format. + */ + function get_rev($id) { + if (empty($id)) { + throw new SetteeWrongInputException("Error: Can't query a document without a uuid."); + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); + $headers = $this->rest_client->http_head($full_uri); + if (empty($headers['Etag'])) { + throw new SetteeRestClientException("Error: could not retrieve revision. Server unexpectedly returned empty Etag"); + } + $etag = str_replace('"', '', $headers['Etag']); + return $etag; + } + + /** + * Delete a document + * + * @param $document + * a PHP object or JSON representation of the document that has _id and _rev fields. + * + * @return void + */ + function delete($document) { + if (!is_object($document)) { + $document = json_decode($document); + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($document->_id) . "?rev=" . $document->_rev; + $this->rest_client->http_delete($full_uri); + } + + + /*----------------- View-related functions --------------*/ + + /** + * Create a new view or update an existing one. + * + * @param $design_doc + * @param $view_name + * @param $map_src + * Source code of the map function in Javascript + * @param $reduce_src + * Source code of the reduce function in Javascript (optional) + * @return void + */ + function save_view($design_doc, $view_name, $map_src, $reduce_src = null) { + $obj = new stdClass(); + $obj->_id = "_design/" . urlencode($design_doc); + $view_name = urlencode($view_name); + $obj->views->$view_name->map = $map_src; + if (!empty($reduce_src)) { + $obj->views->$view_name->reduce = $reduce_src; + } + + // allow safe updates (even if slightly slower due to extra: rev-detection check). + return $this->save($obj, true); + } + + /** + * Create a new view or update an existing one. + * + * @param $design_doc + * @param $view_name + * @param $key + * key parameter to a view. Can be a single value or an array (for a range). If passed an array, function assumes + * that first element is startkey, second: endkey. + * @param $descending + * return results in descending order. Please don't forget that if you are using a startkey/endkey, when you change + * order you also need to swap startkey and endkey values! + * + * @return void + */ + function get_view($design_doc, $view_name, $key = null, $descending = false) { + $id = "_design/" . urlencode($design_doc); + $view_name = urlencode($view_name); + $id .= "/_view/$view_name"; + + $data = array(); + if (!empty($key)) { + if (is_string($key)) { + $data = "key=" . '"' . $key . '"'; + } + elseif (is_array($key)) { + list($startkey, $endkey) = $key; + $data = "startkey=" . '"' . $startkey . '"&' . "endkey=" . '"' . $endkey . '"'; + } + + if ($descending) { + $data .= "&descending=true"; + } + } + + + + if (empty($id)) { + throw new SetteeWrongInputException("Error: Can't retrieve a document without a uuid."); + } + + $full_uri = $this->dbname . "/" . $this->safe_urlencode($id); +$full_uri = str_replace("%253Fgroup%253Dtrue","?group=true",$full_uri); + $ret = $this->rest_client->http_get($full_uri, $data); + return $ret['decoded']; + + } + + /** + * @param $id + * @return + * return a properly url-encoded id. + */ + private function safe_urlencode($id) { + //-- System views like _design can have "/" in their URLs. + $id = rawurlencode($id); + if (substr($id, 0, 1) == '_') { + $id = str_replace('%2F', '/', $id); + } + return $id; + } + + /** Getter for a database name */ + function get_name() { + return $this->dbname; + } + +} --- a/scrape.py +++ b/scrape.py @@ -7,6 +7,61 @@ from urlparse import urljoin import time import os +import mimetypes +import re +import urllib +import urlparse + +def canonurl(url): + r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' + if the URL looks invalid. + >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws + 'http://xn--hgi.ws/' + """ + # strip spaces at the ends and ensure it's prefixed with 'scheme://' + url = url.strip() + if not url: + return '' + if not urlparse.urlsplit(url).scheme: + url = 'http://' + url + + # turn it into Unicode + #try: + # url = unicode(url, 'utf-8') + #except UnicodeDecodeError: + # return '' # bad UTF-8 chars in URL + + # parse the URL into its components + parsed = urlparse.urlsplit(url) + scheme, netloc, path, query, fragment = parsed + + # ensure scheme is a letter followed by letters, digits, and '+-.' chars + if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): + return '' + scheme = str(scheme) + + # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] + match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) + if not match: + return '' + domain, port = match.groups() + netloc = domain + (port if port else '') + netloc = netloc.encode('idna') + + # ensure path is valid and convert Unicode chars to %-encoded + if not path: + path = '/' # eg: 'http://google.com' -> 'http://google.com/' + path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') + + # ensure query is valid + query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') + + # ensure fragment is valid + fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) + + # piece it all back together, truncating it to a maximum of 4KB + url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) + return url[:4096] #http://diveintopython.org/http_web_services/etags.html class NotModifiedHandler(urllib2.BaseHandler): @@ -16,14 +71,18 @@ return addinfourl def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): - hash = hashlib.md5(url).hexdigest() + url = canonurl(url) + hash = hashlib.md5(url).hexdigest().encode("utf-8") req = urllib2.Request(url) print "Fetching %s" % url + if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": + print "Not a valid HTTP url" + return (None,None) doc = docsdb.get(hash) if doc == None: doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} else: - if (time.time() - doc['page_scraped']) < 3600: + if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 999999): print "Uh oh, trying to scrape URL again too soon!" last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment = docsdb.get_attachment(doc,last_attachment_fname) @@ -41,17 +100,23 @@ req.add_header("If-Modified-Since", doc['last_modified']) opener = urllib2.build_opener(NotModifiedHandler()) - url_handle = opener.open(req) - headers = url_handle.info() # the addinfourls have the .info() too - doc['etag'] = headers.getheader("ETag") - doc['last_modified'] = headers.getheader("Last-Modified") - doc['date'] = headers.getheader("Date") - doc['page_scraped'] = time.time() - doc['web_server'] = headers.getheader("Server") - doc['powered_by'] = headers.getheader("X-Powered-By") - doc['file_size'] = headers.getheader("Content-Length") - doc['mime_type'] = headers.getheader("Content-Type").split(";")[0] - if hasattr(url_handle, 'code'): + try: + url_handle = opener.open(req) + headers = url_handle.info() # the addinfourls have the .info() too + doc['etag'] = headers.getheader("ETag") + doc['last_modified'] = headers.getheader("Last-Modified") + doc['date'] = headers.getheader("Date") + doc['page_scraped'] = time.time() + doc['web_server'] = headers.getheader("Server") + doc['powered_by'] = headers.getheader("X-Powered-By") + doc['file_size'] = headers.getheader("Content-Length") + content_type = headers.getheader("Content-Type") + if content_type != None: + doc['mime_type'] = content_type.split(";")[0] + else: + (type,encoding) = mimetypes.guess_type(url) + doc['mime_type'] = type + if hasattr(url_handle, 'code'): if url_handle.code == 304: print "the web page has not been modified" return (None,None) @@ -61,10 +126,15 @@ doc = docsdb.get(hash) # need to get a _rev docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) return (doc['mime_type'], content) - #store as attachment epoch-filename - else: - print "error %s in downloading %s" % url_handle.code, URL - doc['error'] = "error %s in downloading %s" % url_handle.code, URL + #store as attachment epoch-filename + except urllib2.URLError as e: + error = "" + if hasattr(e, 'reason'): + error = "error %s in downloading %s" % (str(e.reason), url) + elif hasattr(e, 'code'): + error = "error %s in downloading %s" % (e.code, url) + print error + doc['error'] = error docsdb.save(doc) return (None,None) @@ -92,6 +162,12 @@ # lets not do external links for now # linkurls.add(link['href']) None + if link['href'].startswith("mailto"): + # not http + None + if link['href'].startswith("javascript"): + # not http + None else: linkurls.add(urljoin(url,link['href'].replace(" ","%20"))) for linkurl in linkurls: @@ -108,9 +184,15 @@ agency = agencydb.get(row.id) print agency['name'] for key in agency.keys(): - if key == 'website' or key.endswith('URL'): - print key - scrapeAndStore(docsdb, agency[key],agency['scrapeDepth'],key,agency['_id']) - agency['metadata']['lastscraped'] = time.time() + if key == 'website': + scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) + if key.endswith('URL'): + print key + depth = 1 + if 'scrapeDepth' in agency.keys(): + depth = agency['scrapeDepth'] + scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) + + agency['metadata']['lastScraped'] = time.time() agencydb.save(agency)