From: maxious Date: Mon, 30 Jan 2012 04:30:13 +0000 Subject: Remove couchdb-lucene compiled version X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=baaaa2c5a4f9c40c69331a8788db0c00058efe96 --- Remove couchdb-lucene compiled version Former-commit-id: 5fe11f7e1bd562a35633597a22768d929c424758 --- --- a/getAgency.php +++ b/getAgency.php @@ -124,7 +124,7 @@ } } - $mode = "view"; + $mode = "edit"; if ($mode == "edit") { $row = addDefaultFields(object_to_array($row)); } else { --- a/include/couchdb.inc.php +++ b/include/couchdb.inc.php @@ -26,14 +26,16 @@ emit(doc._id, doc.parentOrg); } };"; - $obj->views->byName->map = "function(doc) { + $obj->views->byName->map = 'function(doc) { + if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { emit(doc.name, doc._id); for (name in doc.otherNames) { -if (doc.otherNames[name] != '' && doc.otherNames[name] != doc.name) { +if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { emit(doc.otherNames[name], doc._id); } } -};"; + } +};'; $obj->views->foiEmails->map = "function(doc) { emit(doc._id, doc.foiEmail); --- a/schemas/agency.json.php +++ b/schemas/agency.json.php @@ -24,8 +24,10 @@ "consultanciesURL" => Array("type" => "string", "required" => true, "x-title" => "Consultants Hired", "description" => ""), "legalExpenditureURL" => Array("type" => "string", "required" => true, "x-title" => "Legal Services Expenditure", "description" => "Legal Services Expenditure mandated by Legal Services Directions 2005"), "recordsListURL" => Array("type" => "string", "required" => true, "x-title" => "Files/Records Held", "description" => "Indexed lists of departmental and agency files, mandated by the Senate"), - "FOIDocumentsURL" => Array("type" => "string", "required" => true, "x-title" => "FOI Documents Released", "description" => ""), - "infoPublicationSchemeURL" => Array("type" => "string", "required" => true, "x-title" => "Information Publication Scheme", "description" => ""), + "FOIDocumentsURL" => Array("type" => "string", "required" => true, "x-title" => "FOI Documents Released", "description" => "FOI Disclosure Log URL"), + "FOIDocumentsRSSURL" => Array("type" => "string", "required" => false, "x-title" => "RSS Feed of FOI Documents Released", "description" => "FOI Disclosure Log in RSS format"), + "hasFOIPDF" => Array("type" => "string", "required" => false, "x-title" => "Has FOI Documents Released in PDF", "description" => "FOI Disclosure Log contains any PDFs"), + "infoPublicationSchemeURL" => Array("type" => "string", "required" => true, "x-title" => "Information Publication Scheme", "description" => ""), "appointmentsURL" => Array("type" => "string", "required" => true, "x-title" => "Agency Appointments/Boards", "description" => "Departmental and agency appointments and vacancies , mandated by the Senate"), "advertisingURL" => Array("type" => "string", "required" => true, "x-title" => "Approved Advertising Campaigns", "description" => " Agency advertising and public information projects, mandated by the Senate "), "hasRSS" => Array("type" => "string", "required" => true, "x-title" => "Has RSS", "description" => ""), --- a/scrape.py +++ b/scrape.py @@ -3,6 +3,7 @@ import urllib2 from BeautifulSoup import BeautifulSoup import re +import hashlib #http://diveintopython.org/http_web_services/etags.html class NotModifiedHandler(urllib2.BaseHandler): @@ -11,25 +12,25 @@ addinfourl.code = code return addinfourl -def scrapeAndStore(URL, depth, agency): - URL = "http://www.google.com" - req = urllib2.Request(URL) - etag = 'y' - last_modified = 'y' - #if there is a previous version sotred in couchdb, load caching helper tags - if etag: - req.add_header("If-None-Match", etag) - if last_modified: - req.add_header("If-Modified-Since", last_modified) +def scrapeAndStore(docsdb, url, depth, agencyID): + hash = hashlib.md5(url).hexdigest() + req = urllib2.Request(url) + print "Fetching %s", url + doc = docsdb['hash'] + #if there is a previous version stored in couchdb, load caching helper tags + if doc.has_key('etag'): + req.add_header("If-None-Match", doc['etag']) + if doc.has_key('last_modified'): + req.add_header("If-Modified-Since", doc['last_modified']) opener = urllib2.build_opener(NotModifiedHandler()) url_handle = opener.open(req) headers = url_handle.info() # the addinfourls have the .info() too - etag = headers.getheader("ETag") - last_modified = headers.getheader("Last-Modified") - web_server = headers.getheader("Server") - file_size = headers.getheader("Content-Length") - mime_type = headers.getheader("Content-Type") + doc['etag'] = headers.getheader("ETag") + doc['last_modified'] = headers.getheader("Last-Modified") + doc['web_server'] = headers.getheader("Server") + doc['file_size'] = headers.getheader("Content-Length") + doc['mime_type'] = headers.getheader("Content-Type") if hasattr(url_handle, 'code'): if url_handle.code == 304: @@ -41,13 +42,14 @@ soup = BeautifulSoup(html) links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) for link in links: - print link['href'] - #for each unique link - #if html mimetype - # go down X levels, - # diff with last stored attachment, store in document - #if not - # remember to save parentURL and title (link text that lead to document) + if link.has_key("href"): + print link['href'] + #for each unique link + #if html mimetype + # go down X levels, + # diff with last stored attachment, store in document + #if not + # remember to save parentURL and title (link text that lead to document) #store as attachment epoch-filename else: @@ -68,9 +70,10 @@ # select database agencydb = couch['disclosr-agencies'] +docsdb = couch['disclosr-documents'] for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? agency = agencydb.get(row.id) print agency['name'] -scrapeAndStore("A",1,1) + scrapeAndStore(docsdb, agency['website'],1,agency['_id'])