--- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -1,26 +1,105 @@ import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import scrape +from bs4 import BeautifulSoup +from time import mktime +import feedparser +import abc +import unicodedata, re +import dateutil +from dateutil.parser import * +from datetime import * -from bs4 import BeautifulSoup -import abc -import dateutil.parser +class GenericDisclogScraper(object): + __metaclass__ = abc.ABCMeta + agencyID = None + disclogURL = None + def remove_control_chars(self, input): + return "".join([i for i in input if ord(i) in range(32, 127)]) + def getAgencyID(self): + """ disclosr agency id """ + if self.agencyID == None: + self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") + return self.agencyID -class GenericOAICDisclogScraper(object): - __metaclass__ = abc.ABCMeta + def getURL(self): + """ disclog URL""" + if self.disclogURL == None: + agency = scrape.agencydb.get(self.getAgencyID()) + self.disclogURL = agency['FOIDocumentsURL'] + return self.disclogURL + @abc.abstractmethod - def getAgencyID(self): - """ disclosr agency id """ + def doScrape(self): + """ do the scraping """ return @abc.abstractmethod - def getURL(self): - """ disclog URL""" + def getDescription(self, content, entry, doc): + """ get description""" return + + +class GenericRSSDisclogScraper(GenericDisclogScraper): + + def doScrape(self): + foidocsdb = scrape.couch['disclosr-foidocuments'] + (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) + feed = feedparser.parse(content) + for entry in feed.entries: + #print entry + print entry.id + hash = scrape.mkhash(entry.id) + #print hash + doc = foidocsdb.get(hash) + #print doc + if doc == None: + print "saving "+ hash + edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, + "date": edate,"title": entry.title} + self.getDescription(entry,entry, doc) + foidocsdb.save(doc) + else: + print "already saved" + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + doc.update({'description': content.summary}) + return + +class GenericOAICDisclogScraper(GenericDisclogScraper): + __metaclass__ = abc.ABCMeta @abc.abstractmethod def getColumns(self,columns): """ rearranges columns if required """ + return + def getColumnCount(self): + return 5 + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + descriptiontxt = "" + for string in content.stripped_strings: + descriptiontxt = descriptiontxt + " \n" + string + doc.update({'description': descriptiontxt}) + return + def getTitle(self, content, entry, doc): + doc.update({'title': (''.join(content.stripped_strings))}) + return + def getTable(self, soup): + return soup.table + def getDate(self, content, entry, doc): + edate = parse(''.join(content.stripped_strings).strip(), dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + print edate + doc.update({'date': edate}) + return + def getLinks(self, content, entry, doc): + links = [] + for atag in entry.find_all("a"): + if atag.has_key('href'): + links.append(scrape.fullurl(content,atag['href'])) + if links != []: + doc.update({'links': links}) return def doScrape(self): @@ -30,31 +109,32 @@ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) - for row in soup.table.find_all('tr'): + table = self.getTable(soup) + for row in table.find_all('tr'): columns = row.find_all('td') - if len(columns) == 5: + if len(columns) == self.getColumnCount(): (id, date, description, title, notes) = self.getColumns(columns) - print id.string - hash = scrape.mkhash(url+id.string) - links = [] - for atag in row.find_all("a"): - if atag.has_key('href'): - links.append(scrape.fullurl(url,atag['href'])) + print ''.join(id.stripped_strings) + if id.string == None: + hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) + else: + hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) doc = foidocsdb.get(hash) - descriptiontxt = "" - for string in description.stripped_strings: - descriptiontxt = descriptiontxt + string if doc == None: - print "saving" - edate = dateutil.parser.parse(date.string).date().strftime("%Y-%m-%d") - doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, - "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} + print "saving " +hash + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} + self.getLinks(self.getURL(),row,doc) + self.getTitle(title,row, doc) + self.getDate(date,row, doc) + self.getDescription(description,row, doc) + if notes != None: + doc.update({ 'notes': (''.join(notes.stripped_strings))}) foidocsdb.save(doc) else: - print "already saved" + print "already saved "+hash - elif len(row.find_all('th')) == 5: + elif len(row.find_all('th')) == self.getColumnCount(): print "header row" else: