From: Maxious Date: Mon, 03 Dec 2012 07:49:55 +0000 Subject: beginning of docx/pdf scrapers X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=29ff266d09edba92b2e51ee98a7b3035428553e5 --- beginning of docx/pdf scrapers Former-commit-id: 72b18d2f2bae7cfce33fb8639ad1523c7bbcc0a3 --- --- a/admin/refreshDesignDoc.php +++ b/admin/refreshDesignDoc.php @@ -9,6 +9,7 @@ $obj->language = "javascript"; $obj->views->all->map = "function(doc) { emit(doc._id, doc); };"; $obj->views->byDate->map = "function(doc) { emit(doc.date, doc); };"; +$obj->views->byDate->reduce = "_count"; $obj->views->byDateMonthYear->map = "function(doc) { emit(doc.date, doc); };"; $obj->views->byDateMonthYear->reduce = "_count"; $obj->views->byAgencyID->map = "function(doc) { emit(doc.agencyID, doc); };"; --- a/documents/about.php +++ /dev/null @@ -1,11 +1,1 @@ - -

About

- - --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -11,6 +11,19 @@ from datetime import * import codecs +from StringIO import StringIO + +from docx import * +from lxml import etree +import zipfile + +from pdfminer.pdfparser import PDFDocument, PDFParser +from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf +from pdfminer.pdfdevice import PDFDevice, TagExtractor +from pdfminer.converter import TextConverter +from pdfminer.cmapdb import CMapDB +from pdfminer.layout import LAParams + class GenericDisclogScraper(object): __metaclass__ = abc.ABCMeta agencyID = None @@ -35,11 +48,78 @@ """ do the scraping """ return - @abc.abstractmethod - def getDescription(self, content, entry, doc): - """ get description""" - return - +class GenericPDFDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) + + laparams = LAParams() + + rsrcmgr = PDFResourceManager(caching=True) + + outfp = StringIO.StringIO() + + device = TextConverter(rsrcmgr, outfp, codec='utf-8', laparams=laparams) + + + fp = StringIO.StringIO() + fp.write(content) + description = output.getvalue(); + process_pdf(rsrcmgr, device, fp, set(), caching=True, check_extractable=True) + fp.close() + device.close() + outfp.close() + + hash = scrape.mkhash(description) + #print hash + doc = foidocsdb.get(hash) + #print doc + if doc == None: + print "saving "+ hash + edate = datetime.fromtimestamp(mktime( )).strftime("%Y-%m-%d") + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': hash, + "date": edate,"title": "Disclosure Log Updated"} + self.getDescription(entry,entry, doc) + foidocsdb.save(doc) + else: + print "already saved" + + +class GenericDOCXDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) + + mydoc = zipfile.ZipFile(file) + xmlcontent = mydoc.read('word/document.xml') + document = etree.fromstring(xmlcontent) + + ## Fetch all the text out of the document we just created + paratextlist = getdocumenttext(document) + + # Make explicit unicode version + newparatextlist = [] + for paratext in paratextlist: + newparatextlist.append(paratext.encode("utf-8")) + + ## Print our documnts test with two newlines under each paragraph + description = '\n\n'.join(newparatextlist) + + hash = scrape.mkhash(description) + #print hash + doc = foidocsdb.get(hash) + #print doc + if doc == None: + print "saving "+ hash + edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d") + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': hash, + "date": edate,"title": "Disclosure Log Updated"} + self.getDescription(entry,entry, doc) + foidocsdb.save(doc) + else: + print "already saved" class GenericRSSDisclogScraper(GenericDisclogScraper): @@ -137,12 +217,7 @@ self.getDescription(description,row, doc) if notes != None: doc.update({ 'notes': (''.join(notes.stripped_strings))}) - badtitles = ['-','Summary of FOI Request','FOI request(in summary form)','Summary of FOI request received by the ASC', -'Summary of FOI request received by agency/minister','Description of Documents Requested','FOI request','Description of FOI Request','Summary of request','Description','Summary', -'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67'] - if doc['title'] not in badtitles and doc['description'] != '': - print "saving" - foidocsdb.save(doc) + foidocsdb.save(doc) else: print "already saved "+hash --- a/documents/rss.xml.php +++ b/documents/rss.xml.php @@ -9,9 +9,9 @@ $TestFeed = new RSS2FeedWriter(); //Setting the channel elements //Use wrapper functions for common channelelements -$TestFeed->setTitle('Last Modified - All'); +$TestFeed->setTitle('disclosurelo.gs Newest Entries - All'); $TestFeed->setLink('http://disclosurelo.gs/rss.xml.php'); -$TestFeed->setDescription('Latest entries'); +$TestFeed->setDescription('disclosurelo.gs Newest Entries - All Agencies'); $TestFeed->setChannelElement('language', 'en-us'); $TestFeed->setChannelElement('pubDate', date(DATE_RSS, time())); //Retriving informations from database @@ -28,10 +28,10 @@ $newItem = $TestFeed->createNewItem(); //Add elements to the feed item $newItem->setTitle($row->value->title); - $newItem->setLink("view.php?id=".$row->value->_id); - $newItem->setDate(date("c", strtotime($row->value->date))); + $newItem->setLink("http://disclosurelo.gs/view.php?id=".$row->value->_id); + $newItem->setDate(strtotime($row->value->date)); $newItem->setDescription(displayLogEntry($row,$idtoname)); - $newItem->addElement('guid', $row->value->_id,array('isPermaLink'=>'true')); + $newItem->addElement('guid', "http://disclosurelo.gs/view.php?id=".$row->value->_id,array('isPermaLink'=>'true')); //Now add the feed item $TestFeed->addItem($newItem); } --- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py +++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py @@ -21,10 +21,9 @@ for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'): if row != None: rowtitle = row.find('th').string - if rowtitle != None: - description = description + "\n" + rowtitle + ": " + description = description + "\n" + rowtitle + ": " for text in row.find('td').stripped_strings: - description = description + text + description = description + text for atag in row.find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(link,atag['href'])) --- a/documents/template.inc.php +++ b/documents/template.inc.php @@ -89,7 +89,7 @@