--- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -2,16 +2,34 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import scrape from bs4 import BeautifulSoup -import parsedatetime as pdt from time import mktime -from datetime import datetime import feedparser import abc +import unicodedata, re +import dateutil +from dateutil.parser import * +from datetime import * +import codecs + +from StringIO import StringIO + +from docx import * +from lxml import etree +import zipfile + +from pdfminer.pdfparser import PDFDocument, PDFParser +from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf +from pdfminer.pdfdevice import PDFDevice, TagExtractor +from pdfminer.converter import TextConverter +from pdfminer.cmapdb import CMapDB +from pdfminer.layout import LAParams class GenericDisclogScraper(object): __metaclass__ = abc.ABCMeta agencyID = None disclogURL = None + def remove_control_chars(self, input): + return "".join([i for i in input if ord(i) in range(32, 127)]) def getAgencyID(self): """ disclosr agency id """ if self.agencyID == None: @@ -30,13 +48,81 @@ """ do the scraping """ return +class GenericPDFDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) + + laparams = LAParams() + + rsrcmgr = PDFResourceManager(caching=True) + + outfp = StringIO.StringIO() + + device = TextConverter(rsrcmgr, outfp, codec='utf-8', laparams=laparams) + + + fp = StringIO.StringIO() + fp.write(content) + description = output.getvalue(); + process_pdf(rsrcmgr, device, fp, set(), caching=True, check_extractable=True) + fp.close() + device.close() + outfp.close() + + hash = scrape.mkhash(description) + #print hash + doc = foidocsdb.get(hash) + #print doc + if doc == None: + print "saving "+ hash + edate = datetime.fromtimestamp(mktime( )).strftime("%Y-%m-%d") + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': hash, + "date": edate,"title": "Disclosure Log Updated"} + self.getDescription(entry,entry, doc) + foidocsdb.save(doc) + else: + print "already saved" + + +class GenericDOCXDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) + + mydoc = zipfile.ZipFile(file) + xmlcontent = mydoc.read('word/document.xml') + document = etree.fromstring(xmlcontent) + + ## Fetch all the text out of the document we just created + paratextlist = getdocumenttext(document) + + # Make explicit unicode version + newparatextlist = [] + for paratext in paratextlist: + newparatextlist.append(paratext.encode("utf-8")) + + ## Print our documnts test with two newlines under each paragraph + description = '\n\n'.join(newparatextlist) + + hash = scrape.mkhash(description) + #print hash + doc = foidocsdb.get(hash) + #print doc + if doc == None: + print "saving "+ hash + edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d") + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': hash, + "date": edate,"title": "Disclosure Log Updated"} + self.getDescription(entry,entry, doc) + foidocsdb.save(doc) + else: + print "already saved" class GenericRSSDisclogScraper(GenericDisclogScraper): - def getDescription(self, entry, doc): - """ get description from rss entry""" - doc['description'] = entry.summary - return def doScrape(self): foidocsdb = scrape.couch['disclosr-foidocuments'] @@ -50,14 +136,18 @@ doc = foidocsdb.get(hash) #print doc if doc == None: - print "saving" + print "saving "+ hash edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, "date": edate,"title": entry.title} - self.getDescription(entry, doc) + self.getDescription(entry,entry, doc) foidocsdb.save(doc) else: print "already saved" + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + doc.update({'description': content.summary}) + return class GenericOAICDisclogScraper(GenericDisclogScraper): __metaclass__ = abc.ABCMeta @@ -65,46 +155,73 @@ def getColumns(self,columns): """ rearranges columns if required """ return + def getColumnCount(self): + return 5 + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + descriptiontxt = "" + for string in content.stripped_strings: + descriptiontxt = descriptiontxt + " \n" + string + doc.update({'description': descriptiontxt}) + return + def getTitle(self, content, entry, doc): + doc.update({'title': (''.join(content.stripped_strings))}) + return + def getTable(self, soup): + return soup.table + def getRows(self, table): + return table.find_all('tr') + def getDate(self, content, entry, doc): + date = ''.join(content.stripped_strings).strip() + (a,b,c) = date.partition("(") + date = self.remove_control_chars(a.replace("Octber","October")) + print date + edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + print edate + doc.update({'date': edate}) + return + def getLinks(self, content, entry, doc): + links = [] + for atag in entry.find_all("a"): + if atag.has_key('href'): + links.append(scrape.fullurl(content,atag['href'])) + if links != []: + doc.update({'links': links}) + return def doScrape(self): - cal = pdt.Calendar() foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) if content != None: if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) - for row in soup.table.find_all('tr'): + table = self.getTable(soup) + for row in self.getRows(table): columns = row.find_all('td') - if len(columns) == 5: - (id, date, description, title, notes) = self.getColumns(columns) - print id.string - hash = scrape.mkhash(url+id.string) - links = [] - for atag in row.find_all("a"): - if atag.has_key('href'): - links.append(scrape.fullurl(url,atag['href'])) + if len(columns) == self.getColumnCount(): + (id, date, title, description, notes) = self.getColumns(columns) + print self.remove_control_chars(''.join(id.stripped_strings)) + if id.string == None: + hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) + else: + hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) doc = foidocsdb.get(hash) - descriptiontxt = "" - for string in description.stripped_strings: - descriptiontxt = descriptiontxt + " \n" + string if doc == None: - print "saving" - dtresult = cal.parseDateText(date.string) - if len(dtresult) == 2: - (dtdate,dtr) = dtresult - print dtdate - edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) - else: - edate = "" - doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, - "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} + print "saving " +hash + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} + self.getLinks(self.getURL(),row,doc) + self.getTitle(title,row, doc) + self.getDate(date,row, doc) + self.getDescription(description,row, doc) + if notes != None: + doc.update({ 'notes': (''.join(notes.stripped_strings))}) foidocsdb.save(doc) else: - print "already saved" + print "already saved "+hash - elif len(row.find_all('th')) == 5: + elif len(row.find_all('th')) == self.getColumnCount(): print "header row" else: