From: maxious Date: Sat, 10 Nov 2012 09:15:21 +0000 Subject: derive agencyID and disclog url from filename X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=8ac510d67c62c859f6f548b3224996516ef1708b --- derive agencyID and disclog url from filename Former-commit-id: eddf90809214ee502e593c769c4bd0b0b2fafc3a --- --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -3,19 +3,64 @@ import scrape from bs4 import BeautifulSoup import parsedatetime as pdt +from time import mktime +from datetime import datetime +import feedparser import abc -class GenericOAICDisclogScraper(object): - __metaclass__ = abc.ABCMeta + +class GenericDisclogScraper(object): + __metaclass__ = abc.ABCMeta + agencyID = None + disclogURL = None + def getAgencyID(self): + """ disclosr agency id """ + if self.agencyID == None: + self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") + return self.agencyID + + def getURL(self): + """ disclog URL""" + if self.disclogURL == None: + agency = scrape.agencydb.get(self.getAgencyID()) + self.disclogURL = agency['FOIDocumentsURL'] + return self.disclogURL + @abc.abstractmethod - def getAgencyID(self): - """ disclosr agency id """ + def doScrape(self): + """ do the scraping """ return - @abc.abstractmethod - def getURL(self): - """ disclog URL""" + + +class GenericRSSDisclogScraper(GenericDisclogScraper): + def getDescription(self, entry, doc): + """ get description from rss entry""" + doc['description'] = entry.summary return + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) + feed = feedparser.parse(content) + for entry in feed.entries: + #print entry + print entry.id + hash = scrape.mkhash(entry.id) + #print hash + doc = foidocsdb.get(hash) + #print doc + if doc == None: + print "saving" + edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, + "date": edate,"title": entry.title} + self.getDescription(entry, doc) + foidocsdb.save(doc) + else: + print "already saved" + +class GenericOAICDisclogScraper(GenericDisclogScraper): + __metaclass__ = abc.ABCMeta @abc.abstractmethod def getColumns(self,columns): """ rearranges columns if required """ @@ -42,7 +87,7 @@ doc = foidocsdb.get(hash) descriptiontxt = "" for string in description.stripped_strings: - descriptiontxt = descriptiontxt + string + descriptiontxt = descriptiontxt + " \n" + string if doc == None: print "saving" --- a/documents/index.php +++ b/documents/index.php @@ -10,8 +10,8 @@ $agenciesdb = $server->get_db('disclosr-agencies'); $idtoname = Array(); -foreach ($agenciesdb->get_view("app", "byName")->rows as $row) { - $idtoname[$row->value] = trim($row->key); +foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { + $idtoname[$row->id] = trim($row->value->name); } $foidocsdb = $server->get_db('disclosr-foidocuments'); try { @@ -34,3 +34,4 @@ } include_footer_documents(); ?> + --- a/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py +++ b/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py @@ -5,12 +5,6 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getAgencyID(self): - return "3cd40b1240e987cbcd3f0e67054ce259" - - def getURL(self): - return "http://www.apvma.gov.au/about/foi/disclosure/index.php" - def getColumns(self,columns): (id, date, description, title, notes) = columns return (id, date, description, title, notes) @@ -19,3 +13,4 @@ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) ScraperImplementation().doScrape() + --- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py +++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py @@ -5,12 +5,6 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getAgencyID(self): - return "8c9421f852c441910bf1d93a57b31d64" - - def getURL(self): - return "http://www.daff.gov.au/about/foi/ips/disclosure-log" - def getColumns(self,columns): (id, date, title, description, notes) = columns return (id, date, description, title, notes) --- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py +++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py @@ -1,11 +1,42 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed import scrape -foidocsdb = scrape.couch['disclosr-foidocuments'] - -import feedparser -feed = feedparser.parse( "http://foi.deewr.gov.au/disclosure-log/rss") -print feed.entries[0] -#foreach feed.entries +from bs4 import BeautifulSoup +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): + def getDescription(self,entry,doc): + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False) + if content != None: + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": + # http://www.crummy.com/software/BeautifulSoup/documentation.html + soup = BeautifulSoup(content) + links = [] + description = "" + dldivs = soup.find('div',class_="download") + if dldivs != None: + for atag in dldivs.find_all("a"): + if atag.has_key('href'): + links.append(scrape.fullurl(url,atag['href'])) + nodldivs = soup.find('div',class_="incompleteNotification") + if nodldivs != None and nodldivs.stripped_strings != None: + for text in nodldivs.stripped_strings: + description = description + text + for row in soup.table.find_all('tr'): + if row != None: + description = description + "\n" + row.find('th').string + ": " + for text in row.find('div').stripped_strings: + description = description + text + if links != []: + doc.update({'links': links}) + if description != "": + doc.update({ 'description': description}) +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + ScraperImplementation().doScrape() + + --- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py +++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py @@ -1,1 +1,18 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed + +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): + def getColumns(self,columns): + (id, date, title, description, notes) = columns + return (id, date, description, title, notes) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + ScraperImplementation().doScrape() + www.finance.gov.au/foi/disclosure-log/foi-rss.xml + --- a/documents/scrapers/rtk.py +++ b/documents/scrapers/rtk.py @@ -1,1 +1,18 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed + +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): + def getColumns(self,columns): + (id, date, title, description, notes) = columns + return (id, date, description, title, notes) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + ScraperImplementation().doScrape() + http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful) + --- a/documents/template.inc.php +++ b/documents/template.inc.php @@ -69,7 +69,7 @@