--- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -19,6 +19,12 @@ def getURL(self): """ disclog URL""" return + + def getDescription(self, entry, doc): + """ get description from rss entry""" + doc['description'] = entry.summary + return + def doScrape(self): foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) @@ -26,13 +32,16 @@ for entry in feed.entries: #print entry print entry.id - hash = scrape.mkhash(entry.link) + hash = scrape.mkhash(entry.id) + #print hash doc = foidocsdb.get(hash) + #print doc if doc == None: print "saving" edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") - doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, - "date": edate, "description": entry.summary,"title": entry.title} + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, + "date": edate,"title": entry.title} + self.getDescription(entry, doc) foidocsdb.save(doc) else: print "already saved" @@ -86,7 +95,7 @@ edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) else: edate = "" - doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} foidocsdb.save(doc) else: