1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import scrape from bs4 import BeautifulSoup import parsedatetime as pdt from time import mktime from datetime import datetime import feedparser import abc class GenericRSSDisclogScraper(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def getAgencyID(self): """ disclosr agency id """ return @abc.abstractmethod def getURL(self): """ disclog URL""" return def getDescription(self, entry, doc): """ get description from rss entry""" doc['description'] = entry.summary return def doScrape(self): foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) feed = feedparser.parse(content) for entry in feed.entries: #print entry print entry.id hash = scrape.mkhash(entry.id) #print hash doc = foidocsdb.get(hash) #print doc if doc == None: print "saving" edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, "date": edate,"title": entry.title} self.getDescription(entry, doc) foidocsdb.save(doc) else: print "already saved" class GenericOAICDisclogScraper(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def getAgencyID(self): """ disclosr agency id """ return @abc.abstractmethod def getURL(self): """ disclog URL""" return @abc.abstractmethod def getColumns(self,columns): """ rearranges columns if required """ return def doScrape(self): cal = pdt.Calendar() foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) if content != None: if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) for row in soup.table.find_all('tr'): columns = row.find_all('td') if len(columns) == 5: (id, date, description, title, notes) = self.getColumns(columns) print id.string hash = scrape.mkhash(url+id.string) links = [] for atag in row.find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(url,atag['href'])) doc = foidocsdb.get(hash) descriptiontxt = "" for string in description.stripped_strings: descriptiontxt = descriptiontxt + " \n" + string if doc == None: print "saving" dtresult = cal.parseDateText(date.string) if len(dtresult) == 2: (dtdate,dtr) = dtresult print dtdate edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) else: edate = "" doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} foidocsdb.save(doc) else: print "already saved" elif len(row.find_all('th')) == 5: print "header row" else: print "ERROR number of columns incorrect" print row |