From: Alex Sadleir Date: Sun, 17 Nov 2013 09:30:33 +0000 Subject: scraper and sort order updatyes X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=30003873fb353ea406ce641336b6ba3cbe9d74d8 --- scraper and sort order updatyes Former-commit-id: c8bfc5c3ecbee616fa6dd8bfdd147bedf4d64646 --- --- a/documents/charts.php +++ b/documents/charts.php @@ -112,7 +112,11 @@ get_view("app", "byAgencyID?group=true",null, false,false,true)->rows; - +function cmp($a, $b) +{ + return $a->value > $b->value; +} +usort($rows, "cmp"); $dataValues = Array(); $i = 0; --- a/documents/index.php +++ b/documents/index.php @@ -18,6 +18,7 @@ $idtoname[$row->id] = trim($row->value->name); } $foidocsdb = $server->get_db('disclosr-foidocuments'); +//print_r($foidocsdb); try { $rows = $foidocsdb->get_view("app", "byDate", Array($endkey, '0000-00-00'), true, 20, null, $enddocid)->rows; if ($rows) { --- a/documents/rss.xml.php +++ b/documents/rss.xml.php @@ -31,11 +31,12 @@ //print_r($rows); +$i =0; foreach ($rows as $row) { //Create an empty FeedItem $newItem = $TestFeed->createNewItem(); //Add elements to the feed item - $newItem->setTitle($row->value->title); + $newItem->setTitle(preg_replace('/[\x00-\x1F\x80-\xFF]/', '', $row->value->title)); $newItem->setLink("http://disclosurelo.gs/view.php?id=" . $row->value->_id); $newItem->setDate(strtotime($row->value->date)); $newItem->setDescription(displayLogEntry($row, $idtoname)); @@ -43,6 +44,8 @@ $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=" . $row->value->_id, array('isPermaLink' => 'true')); //Now add the feed item $TestFeed->addItem($newItem); +$i++; +if ($i > 50) break; } //OK. Everything is done. Now genarate the feed. $TestFeed->generateFeed(); --- a/documents/scrapers/1d404c4934f74feacd00dcb434e7c10a.py +++ b/documents/scrapers/1d404c4934f74feacd00dcb434e7c10a.py @@ -6,8 +6,8 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - #def getTable(self,soup): - # return soup.find(id = "cphMain_C001_Col01").table + def getTable(self,soup): + return soup.findAll('table')[1] def getColumnCount(self): return 5 def getColumns(self,columns): --- a/documents/scrapers/41a166419503bb50e410c58be54c102f.py +++ b/documents/scrapers/41a166419503bb50e410c58be54c102f.py @@ -8,7 +8,7 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getTable(self,soup): - return soup.find(id= "ctl00_MSO_ContentDiv").table + return soup.find(class_ = "rgMasterTable") def getColumns(self,columns): (id, title, description, notes) = columns --- a/documents/scrapers/601aedeef4344638d635bdd761e9fdba.py +++ b/documents/scrapers/601aedeef4344638d635bdd761e9fdba.py @@ -6,8 +6,8 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - #def getTable(self,soup): - # return soup.find(id = "ctl00_PlaceHolderMain_intro2__ControlWrapper_CerRichHtmlField").table + def getTable(self,soup): + return soup.find(id = "main").table def getColumnCount(self): return 4 def getColumns(self,columns): --- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py +++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py @@ -5,6 +5,8 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getTable(self,soup): + return soup.find(id = "page_content").table def getColumns(self,columns): (id, date, title, description, notes) = columns return (id, date, title, description, notes) --- a/documents/scrapers/ad033512610d8e36886ab6a795f26561.py +++ b/documents/scrapers/ad033512610d8e36886ab6a795f26561.py @@ -6,8 +6,8 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getTable(self,soup): - return soup.find(id = "_ctl0__ctl0_MainContentPlaceHolder_MainContentPlaceHolder_ContentSpan").findAll("table")[3] +# def getTable(self,soup): +# return soup.find(_class = "content").table def getColumnCount(self): return 5 def getColumns(self,columns): --- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py +++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py @@ -1,16 +1,54 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import genericScrapers -#RSS feed not detailed +import dateutil +from dateutil.parser import * +from datetime import * +import scrape +from bs4 import BeautifulSoup +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): -#http://www.doughellmann.com/PyMOTW/abc/ -class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): - def getColumns(self,columns): - (id, date, title, description, notes) = columns - return (id, date, title, description, notes) + def __init__(self): + super(ScraperImplementation, self).__init__() + def getTable(self, soup): + return soup.find(id='content') + + def getDescription(self,content, entry,doc): + link = None + links = [] + description = "" + for atag in entry.find_all('a'): + if atag.has_attr('href'): + link = scrape.fullurl(self.getURL(), atag['href']) + (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) + if htcontent != None: + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": + soup = BeautifulSoup(htcontent) + row = soup.find(id="foidetails") + if row == None: + row = soup.find(id="content").table + if row == None: + row = soup.find(id="content") + description = ''.join(row.stripped_strings) + for atag in row.find_all("a"): + if atag.has_attr('href'): + links.append(scrape.fullurl(link, atag['href'])) + + if links != []: + doc.update({'links': links}) + if description != "": + doc.update({ 'description': description}) + + def getColumnCount(self): + return 3 + + def getColumns(self, columns): + (id, title, date) = columns + return (id, date, title, title, None) + if __name__ == '__main__': - print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper) - print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper) + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) ScraperImplementation().doScrape()