1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import scrape from bs4 import BeautifulSoup import parsedatetime as pdt from time import mktime from datetime import datetime import feedparser import abc class GenericDisclogScraper(object): __metaclass__ = abc.ABCMeta agencyID = None disclogURL = None def getAgencyID(self): """ disclosr agency id """ if self.agencyID == None: self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") return self.agencyID def getURL(self): """ disclog URL""" if self.disclogURL == None: agency = scrape.agencydb.get(self.getAgencyID()) self.disclogURL = agency['FOIDocumentsURL'] return self.disclogURL @abc.abstractmethod def doScrape(self): """ do the scraping """ return @abc.abstractmethod def getDescription(self, content, entry, doc): """ get description""" return class GenericRSSDisclogScraper(GenericDisclogScraper): def doScrape(self): foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) feed = feedparser.parse(content) for entry in feed.entries: #print entry print entry.id hash = scrape.mkhash(entry.id) #print hash doc = foidocsdb.get(hash) #print doc if doc == None: print "saving" edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, "date": edate,"title": entry.title} self.getDescription(entry,entry, doc) foidocsdb.save(doc) else: print "already saved" def getDescription(self, content, entry, doc): """ get description from rss entry""" doc.update({'description': content.summary}) return class GenericOAICDisclogScraper(GenericDisclogScraper): __metaclass__ = abc.ABCMeta @abc.abstractmethod def getColumns(self,columns): """ rearranges columns if required """ return def getColumnCount(self): return 5 def getDescription(self, content, entry, doc): """ get description from rss entry""" descriptiontxt = "" for string in content.stripped_strings: descriptiontxt = descriptiontxt + " \n" + string doc.update({'description': descriptiontxt}) return def doScrape(self): cal = pdt.Calendar() foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) if content != None: if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) for row in soup.table.find_all('tr'): columns = row.find_all('td') if len(columns) == self.getColumnCount(): (id, date, description, title, notes) = self.getColumns(columns) print id.string hash = scrape.mkhash(url+id.string) links = [] for atag in row.find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(url,atag['href'])) doc = foidocsdb.get(hash) if doc == None: print "saving" dtresult = cal.parseDateText(date.string) if len(dtresult) == 2: (dtdate,dtr) = dtresult print dtdate edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) else: edate = datetime.strptime(date.string, "%d %B %Y").strftime("%Y-%m-%d") doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string, "date": edate,"title": title.string} self.getDescription(description,row, doc) if links != []: doc.update({'links': links}) if notes != None: doc.update({ 'notes': notes.string}) foidocsdb.save(doc) else: print "already saved "+hash elif len(row.find_all('th')) == self.getColumnCount(): print "header row" else: print "ERROR number of columns incorrect" print row |