From: maxious Date: Sat, 10 Nov 2012 02:42:42 +0000 Subject: beginnings rss scraper X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=6ae81704d2f2dce978de8da3826c30fc101f22e3 --- beginnings rss scraper Former-commit-id: 78b7f5ee0c86281368da5eb0ed92ce1ad9cc575d --- --- a/.gitmodules +++ b/.gitmodules @@ -28,4 +28,7 @@ [submodule "lib/amon-php"] path = lib/amon-php url = https://github.com/martinrusev/amon-php.git +[submodule "documents/lib/parsedatetime"] + path = documents/lib/parsedatetime + url = git://github.com/bear/parsedatetime.git --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -1,10 +1,41 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import scrape +from bs4 import BeautifulSoup +import parsedatetime as pdt +from time import mktime +from datetime import datetime +import feedparser +import abc -from bs4 import BeautifulSoup -import abc -import dateutil.parser +class GenericRSSDisclogScraper(object): + __metaclass__ = abc.ABCMeta + @abc.abstractmethod + def getAgencyID(self): + """ disclosr agency id """ + return + + @abc.abstractmethod + def getURL(self): + """ disclog URL""" + return + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) + feed = feedparser.parse(content) + for entry in feed.entries: + #print entry + print entry.id + hash = scrape.mkhash(entry.link) + doc = foidocsdb.get(hash) + if doc == None: + print "saving" + edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") + doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, + "date": edate, "description": entry.summary,"title": entry.title} + foidocsdb.save(doc) + else: + print "already saved" class GenericOAICDisclogScraper(object): __metaclass__ = abc.ABCMeta @@ -24,6 +55,7 @@ return def doScrape(self): + cal = pdt.Calendar() foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) if content != None: @@ -43,12 +75,18 @@ doc = foidocsdb.get(hash) descriptiontxt = "" for string in description.stripped_strings: - descriptiontxt = descriptiontxt + string + descriptiontxt = descriptiontxt + " \n" + string if doc == None: print "saving" - edate = dateutil.parser.parse(date.string).date().strftime("%Y-%m-%d") - doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, + dtresult = cal.parseDateText(date.string) + if len(dtresult) == 2: + (dtdate,dtr) = dtresult + print dtdate + edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) + else: + edate = "" + doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} foidocsdb.save(doc) else: --- a/documents/index.php +++ b/documents/index.php @@ -10,8 +10,8 @@ $agenciesdb = $server->get_db('disclosr-agencies'); $idtoname = Array(); -foreach ($agenciesdb->get_view("app", "byName")->rows as $row) { - $idtoname[$row->value] = trim($row->key); +foreach ($agenciesdb->get_view("app", "byCanonicalName")->rows as $row) { + $idtoname[$row->id] = trim($row->value->name); } $foidocsdb = $server->get_db('disclosr-foidocuments'); try { @@ -34,3 +34,4 @@ } include_footer_documents(); ?> + --- /dev/null +++ b/documents/lib/parsedatetime --- a/documents/scrape.py +++ b/documents/scrape.py @@ -189,7 +189,7 @@ scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) #couch = couchdb.Server('http://192.168.1.148:5984/') -couch = couchdb.Server('http://192.168.1.148:5984/') +couch = couchdb.Server('http://127.0.0.1:5984/') # select database agencydb = couch['disclosr-agencies'] docsdb = couch['disclosr-documents'] --- a/documents/scrapers/6847d7d95fda5ea58a7cd9a2620c673a.txt +++ b/documents/scrapers/6847d7d95fda5ea58a7cd9a2620c673a.txt @@ -1,1 +1,3 @@ +# multiple pages need to be scraped initially, each entry has a subpage http://www.ipaustralia.gov.au/about-us/freedom-of-information/foi-disclosure-log/?page=35&sessionId=3644188 + --- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py +++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py @@ -1,8 +1,23 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) -import scrape -foidocsdb = scrape.couch['disclosr-foidocuments'] +import genericScrapers +#RSS feed not detailed -#rss feed has only one entry -http://www.daff.gov.au/about/foi/ips/disclosure-log +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getAgencyID(self): + return "8c9421f852c441910bf1d93a57b31d64" + def getURL(self): + return "http://www.daff.gov.au/about/foi/ips/disclosure-log" + + def getColumns(self,columns): + (id, date, title, description, notes) = columns + return (id, date, description, title, notes) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + + --- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py +++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py @@ -1,11 +1,19 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) -import scrape -foidocsdb = scrape.couch['disclosr-foidocuments'] +import genericScrapers +#RSS feed not detailed -import feedparser -feed = feedparser.parse( "http://foi.deewr.gov.au/disclosure-log/rss") -print feed.entries[0] -#foreach feed.entries +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): + def getAgencyID(self): + return "be9996f0ac58f71f23d074e82d44ead3" + + def getURL(self): + return "http://foi.deewr.gov.au/disclosure-log/rss" + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + ScraperImplementation().doScrape() --- a/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt +++ b/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt @@ -1,1 +1,3 @@ +# pdf http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf + --- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py +++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py @@ -1,1 +1,24 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed + +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): + def getAgencyID(self): + return "be9996f0ac58f71f23d074e82d44ead3" + + def getURL(self): + return "http://foi.deewr.gov.au/disclosure-log/rss" + + def getColumns(self,columns): + (id, date, title, description, notes) = columns + return (id, date, description, title, notes) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + ScraperImplementation().doScrape() + www.finance.gov.au/foi/disclosure-log/foi-rss.xml + --- a/documents/scrapers/rtk.py +++ b/documents/scrapers/rtk.py @@ -1,1 +1,24 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed + +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): + def getAgencyID(self): + return "be9996f0ac58f71f23d074e82d44ead3" + + def getURL(self): + return "http://foi.deewr.gov.au/disclosure-log/rss" + + def getColumns(self,columns): + (id, date, title, description, notes) = columns + return (id, date, description, title, notes) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + ScraperImplementation().doScrape() + http://www.righttoknow.org.au/feed/search/%20(latest_status:successful%20OR%20latest_status:partially_successful) + --- a/documents/template.inc.php +++ b/documents/template.inc.php @@ -127,13 +127,21 @@ } function displayLogEntry($row, $idtoname) { - echo "

".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")

-

".$row->value->description."
Note: ".$row->value->notes."

"; + echo "

".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")

".$row->value->description; +if (isset($row->value->notes)) { +echo "
Note: ".$row->value->notes; +} +echo "

"; + +if (isset($row->value->links)){ echo "

Links/Documents

    "; foreach ($row->value->links as $link) { echo "
  • ".$link."
  • "; } + echo "
"; +} echo "View original source... ID: ".$row->value->docID.""; echo"
"; } + --- a/include/couchdb.inc.php +++ b/include/couchdb.inc.php @@ -3,7 +3,18 @@ include $basePath . "schemas/schemas.inc.php"; require ($basePath . 'couchdb/settee/src/settee.php'); - +function createFOIDocumentsDesignDoc() { + /* "map": "function(doc) {\n emit(doc.web_server, 1);\n}", + "reduce": "function (key, values, rereduce) {\n return sum(values);\n}" + }, + "byAgency": { + "map": "function(doc) {\n emit(doc.agencyID, 1);\n}", + "reduce": "function (key, values, rereduce) {\n return sum(values);\n}" + }, + "byURL": { + "map": "function(doc) {\n emit(doc.url, doc);\n}" +*/ +} function createDocumentsDesignDoc() { /* "views": { "web_server": {