From: Maxious Date: Wed, 14 Nov 2012 03:34:08 +0000 Subject: add production domain name X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=b63eff86af709f99bfd8fb854f78a5c39f34fed3 --- add production domain name Former-commit-id: be7a82ae478776c6067cb44055f74cb1014df8fd --- --- a/.gitmodules +++ b/.gitmodules @@ -28,4 +28,7 @@ [submodule "lib/amon-php"] path = lib/amon-php url = https://github.com/martinrusev/amon-php.git +[submodule "documents/lib/parsedatetime"] + path = documents/lib/parsedatetime + url = git://github.com/bear/parsedatetime.git --- a/admin/refreshDesignDoc.php +++ b/admin/refreshDesignDoc.php @@ -1,7 +1,177 @@ get_db('disclosr-foidocuments'); + $obj = new stdClass(); + $obj->_id = "_design/" . urlencode("app"); + $obj->language = "javascript"; + $obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; + $obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };"; + + // allow safe updates (even if slightly slower due to extra: rev-detection check). + $foidb->save($obj, true); + + +function createDocumentsDesignDoc() { + /* + global $db; + $obj = new stdClass(); + $obj->_id = "_design/" . urlencode("app"); + $obj->language = "javascript"; + $obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; + $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; + "views": { + "web_server": { + "map": "function(doc) {\n emit(doc.web_server, 1);\n}", + "reduce": "function (key, values, rereduce) {\n return sum(values);\n}" + }, + "byAgency": { + "map": "function(doc) {\n emit(doc.agencyID, 1);\n}", + "reduce": "function (key, values, rereduce) {\n return sum(values);\n}" + }, + "byURL": { + "map": "function(doc) {\n emit(doc.url, doc);\n}" + }, + "agency": { + "map": "function(doc) {\n emit(doc.agencyID, doc);\n}" + }, + "byWebServer": { + "map": "function(doc) {\n emit(doc.web_server, doc);\n}" + }, + "getValidationRequired": { + "map": "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}" + } + } */ +} + +//function createAgencyDesignDoc() { $db = $server->get_db('disclosr-agencies'); -createAgencyDesignDoc(); + $obj = new stdClass(); + $obj->_id = "_design/" . urlencode("app"); + $obj->language = "javascript"; + $obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; + $obj->views->byABN->map = "function(doc) { emit(doc.abn, doc); };"; + $obj->views->byCanonicalName->map = "function(doc) { + if (doc.parentOrg || doc.orgType == 'FMA-DepartmentOfState') { + emit(doc.name, doc); + } +};"; + $obj->views->byDeptStateName->map = "function(doc) { + if (doc.orgType == 'FMA-DepartmentOfState') { + emit(doc.name, doc._id); + } +};"; + $obj->views->parentOrgs->map = "function(doc) { + if (doc.parentOrg) { + emit(doc._id, doc.parentOrg); + } +};"; + $obj->views->byName->map = 'function(doc) { + if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { + emit(doc.name, doc._id); +if (typeof(doc.shortName) != "undefined" && doc.shortName != doc.name) { + emit(doc.shortName, doc._id); +} + for (name in doc.otherNames) { +if (doc.otherNames[name] != "" && doc.otherNames[name] != doc.name) { + emit(doc.otherNames[name], doc._id); +} + } + for (name in doc.foiBodies) { +if (doc.foiBodies[name] != "" && doc.foiBodies[name] != doc.name) { + emit(doc.foiBodies[name], doc._id); +} + } + } +};'; + + $obj->views->foiEmails->map = "function(doc) { + emit(doc._id, doc.foiEmail); +};"; + + $obj->views->byLastModified->map = "function(doc) { emit(doc.metadata.lastModified, doc); }"; + $obj->views->getActive->map = 'function(doc) { if (doc.status == "active") { emit(doc._id, doc); } };'; + $obj->views->getSuspended->map = 'function(doc) { if (doc.status == "suspended") { emit(doc._id, doc); } };'; + $obj->views->getScrapeRequired->map = "function(doc) { + +var lastScrape = Date.parse(doc.metadata.lastScraped); + +var today = new Date(); + +if (!lastScrape || lastScrape.getTime() + 1000 != today.getTime()) { + emit(doc._id, doc); +} + +};"; + $obj->views->showNamesABNs->map = "function(doc) { emit(doc._id, {name: doc.name, abn: doc.abn}); };"; + $obj->views->getConflicts->map = "function(doc) { + if (doc._conflicts) { + emit(null, [doc._rev].concat(doc._conflicts)); + } +}"; + // http://stackoverflow.com/questions/646628/javascript-startswith + $obj->views->score->map = 'if(!String.prototype.startsWith){ + String.prototype.startsWith = function (str) { + return !this.indexOf(str); + } +} + +function(doc) { + count = 0; + if (doc["status"] != "suspended") { + for(var propName in doc) { + if(typeof(doc[propName]) != "undefined" && doc[propName] != "") { + count++; + } + } + portfolio = doc.parentOrg; + if (doc.orgType == "FMA-DepartmentOfState") { + portfolio = doc._id; + } + if (doc.orgType == "Court-Commonwealth" || doc.orgType == "FMA-DepartmentOfParliament") { + portfolio = doc.orgType; + } + emit(count+doc._id, {id:doc._id, name: doc.name, score:count, orgType: doc.orgType, portfolio:portfolio}); + } +}'; + $obj->views->scoreHas->map = 'if(!String.prototype.startsWith){ + String.prototype.startsWith = function (str) { + return !this.indexOf(str); + } +} +if(!String.prototype.endsWith){ + String.prototype.endsWith = function(suffix) { +     return this.indexOf(suffix, this.length - suffix.length) !== -1; + }; +} +function(doc) { +if (typeof(doc["status"]) == "undefined" || doc["status"] != "suspended") { +for(var propName in doc) { + if(typeof(doc[propName]) != "undefined" && (propName.startsWith("has") || propName.endsWith("URL"))) { + emit(propName, 1); + } +} + emit("total", 1); + } +}'; + $obj->views->scoreHas->reduce = 'function (key, values, rereduce) { + return sum(values); +}'; + $obj->views->fieldNames->map = ' +function(doc) { +for(var propName in doc) { + emit(propName, doc._id); + } + +}'; + $obj->views->fieldNames->reduce = 'function (key, values, rereduce) { + return values.length; +}'; + // allow safe updates (even if slightly slower due to extra: rev-detection check). + $db->save($obj, true); + + ?> --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -1,29 +1,73 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import scrape +from bs4 import BeautifulSoup +import parsedatetime as pdt +from time import mktime +from datetime import datetime +import feedparser +import abc -from bs4 import BeautifulSoup -import abc -import dateutil.parser +class GenericDisclogScraper(object): + __metaclass__ = abc.ABCMeta + agencyID = None + disclogURL = None + def getAgencyID(self): + """ disclosr agency id """ + if self.agencyID == None: + self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") + return self.agencyID -class GenericOAICDisclogScraper(object): - __metaclass__ = abc.ABCMeta + def getURL(self): + """ disclog URL""" + if self.disclogURL == None: + agency = scrape.agencydb.get(self.getAgencyID()) + self.disclogURL = agency['FOIDocumentsURL'] + return self.disclogURL + @abc.abstractmethod - def getAgencyID(self): - """ disclosr agency id """ + def doScrape(self): + """ do the scraping """ return - @abc.abstractmethod - def getURL(self): - """ disclog URL""" + + +class GenericRSSDisclogScraper(GenericDisclogScraper): + def getDescription(self, entry, doc): + """ get description from rss entry""" + doc['description'] = entry.summary return + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) + feed = feedparser.parse(content) + for entry in feed.entries: + #print entry + print entry.id + hash = scrape.mkhash(entry.id) + #print hash + doc = foidocsdb.get(hash) + #print doc + if doc == None: + print "saving" + edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, + "date": edate,"title": entry.title} + self.getDescription(entry, doc) + foidocsdb.save(doc) + else: + print "already saved" + +class GenericOAICDisclogScraper(GenericDisclogScraper): + __metaclass__ = abc.ABCMeta @abc.abstractmethod def getColumns(self,columns): """ rearranges columns if required """ return def doScrape(self): + cal = pdt.Calendar() foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) if content != None: @@ -43,11 +87,17 @@ doc = foidocsdb.get(hash) descriptiontxt = "" for string in description.stripped_strings: - descriptiontxt = descriptiontxt + string + descriptiontxt = descriptiontxt + " \n" + string if doc == None: print "saving" - edate = dateutil.parser.parse(date.string).date().strftime("%Y-%m-%d") + dtresult = cal.parseDateText(date.string) + if len(dtresult) == 2: + (dtdate,dtr) = dtresult + print dtdate + edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) + else: + edate = "" doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} foidocsdb.save(doc) --- a/documents/index.php +++ b/documents/index.php @@ -10,8 +10,8 @@ $agenciesdb = $server->get_db('disclosr-agencies'); $idtoname = Array(); -foreach ($agenciesdb->get_view("app", "byName")->rows as $row) { - $idtoname[$row->value] = trim($row->key); +foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { + $idtoname[$row->id] = trim($row->value->name); } $foidocsdb = $server->get_db('disclosr-foidocuments'); try { @@ -34,3 +34,4 @@ } include_footer_documents(); ?> + --- /dev/null +++ b/documents/lib/parsedatetime --- a/documents/scrape.py +++ b/documents/scrape.py @@ -189,7 +189,7 @@ scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) #couch = couchdb.Server('http://192.168.1.148:5984/') -couch = couchdb.Server('http://192.168.1.148:5984/') +couch = couchdb.Server('http://127.0.0.1:5984/') # select database agencydb = couch['disclosr-agencies'] docsdb = couch['disclosr-documents'] --- a/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py +++ b/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py @@ -5,12 +5,6 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getAgencyID(self): - return "3cd40b1240e987cbcd3f0e67054ce259" - - def getURL(self): - return "http://www.apvma.gov.au/about/foi/disclosure/index.php" - def getColumns(self,columns): (id, date, description, title, notes) = columns return (id, date, description, title, notes) @@ -19,3 +13,4 @@ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) ScraperImplementation().doScrape() + --- a/documents/scrapers/6847d7d95fda5ea58a7cd9a2620c673a.txt +++ b/documents/scrapers/6847d7d95fda5ea58a7cd9a2620c673a.txt @@ -1,1 +1,3 @@ +# multiple pages need to be scraped initially, each entry has a subpage http://www.ipaustralia.gov.au/about-us/freedom-of-information/foi-disclosure-log/?page=35&sessionId=3644188 + --- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py +++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py @@ -1,8 +1,17 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) -import scrape -foidocsdb = scrape.couch['disclosr-foidocuments'] +import genericScrapers +#RSS feed not detailed -#rss feed has only one entry -http://www.daff.gov.au/about/foi/ips/disclosure-log +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getColumns(self,columns): + (id, date, title, description, notes) = columns + return (id, date, description, title, notes) +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + + --- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py +++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py @@ -1,11 +1,42 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed import scrape -foidocsdb = scrape.couch['disclosr-foidocuments'] - -import feedparser -feed = feedparser.parse( "http://foi.deewr.gov.au/disclosure-log/rss") -print feed.entries[0] -#foreach feed.entries +from bs4 import BeautifulSoup +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): + def getDescription(self,entry,doc): + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False) + if content != None: + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": + # http://www.crummy.com/software/BeautifulSoup/documentation.html + soup = BeautifulSoup(content) + links = [] + description = "" + dldivs = soup.find('div',class_="download") + if dldivs != None: + for atag in dldivs.find_all("a"): + if atag.has_key('href'): + links.append(scrape.fullurl(url,atag['href'])) + nodldivs = soup.find('div',class_="incompleteNotification") + if nodldivs != None and nodldivs.stripped_strings != None: + for text in nodldivs.stripped_strings: + description = description + text + for row in soup.table.find_all('tr'): + if row != None: + description = description + "\n" + row.find('th').string + ": " + for text in row.find('div').stripped_strings: + description = description + text + if links != []: + doc.update({'links': links}) + if description != "": + doc.update({ 'description': description}) +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + ScraperImplementation().doScrape() + + --- a/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt +++ b/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt @@ -1,1 +1,3 @@ +# pdf http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf + --- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py +++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py @@ -1,1 +1,18 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed + +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): + def getColumns(self,columns): + (id, date, title, description, notes) = columns + return (id, date, description, title, notes) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + ScraperImplementation().doScrape() + www.finance.gov.au/foi/disclosure-log/foi-rss.xml + --- a/documents/scrapers/rtk.py +++ b/documents/scrapers/rtk.py @@ -1,1 +1,18 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed + +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): + def getColumns(self,columns): + (id, date, title, description, notes) = columns + return (id, date, description, title, notes) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + ScraperImplementation().doScrape() + http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful) + --- a/documents/template.inc.php +++ b/documents/template.inc.php @@ -69,7 +69,7 @@