1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 | import sys,os sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) import scrape from bs4 import BeautifulSoup from time import mktime import feedparser import abc import unicodedata, re import dateutil from dateutil.parser import * from datetime import * class GenericDisclogScraper(object): __metaclass__ = abc.ABCMeta agencyID = None disclogURL = None def remove_control_chars(self, input): return "".join([i for i in input if ord(i) in range(32, 127)]) def getAgencyID(self): """ disclosr agency id """ if self.agencyID == None: self.agencyID = os.path.basename(sys.argv[0]).replace(".py","") return self.agencyID def getURL(self): """ disclog URL""" if self.disclogURL == None: agency = scrape.agencydb.get(self.getAgencyID()) self.disclogURL = agency['FOIDocumentsURL'] return self.disclogURL @abc.abstractmethod def doScrape(self): """ do the scraping """ return @abc.abstractmethod def getDescription(self, content, entry, doc): """ get description""" return class GenericRSSDisclogScraper(GenericDisclogScraper): def doScrape(self): foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) feed = feedparser.parse(content) for entry in feed.entries: #print entry print entry.id hash = scrape.mkhash(entry.id) #print hash doc = foidocsdb.get(hash) #print doc if doc == None: print "saving "+ hash edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, "date": edate,"title": entry.title} self.getDescription(entry,entry, doc) foidocsdb.save(doc) else: print "already saved" def getDescription(self, content, entry, doc): """ get description from rss entry""" doc.update({'description': content.summary}) return class GenericOAICDisclogScraper(GenericDisclogScraper): __metaclass__ = abc.ABCMeta @abc.abstractmethod def getColumns(self,columns): """ rearranges columns if required """ return def getColumnCount(self): return 5 def getDescription(self, content, entry, doc): """ get description from rss entry""" descriptiontxt = "" for string in content.stripped_strings: descriptiontxt = descriptiontxt + " \n" + string doc.update({'description': descriptiontxt}) return def getTitle(self, content, entry, doc): doc.update({'title': (''.join(content.stripped_strings))}) return def getTable(self, soup): return soup.table def getRows(self, table): return table.find_all('tr') def getDate(self, content, entry, doc): date = ''.join(content.stripped_strings).strip() (a,b,c) = date.partition("(") date = a.replace("Octber","October") print date edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") print edate doc.update({'date': edate}) return def getLinks(self, content, entry, doc): links = [] for atag in entry.find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(content,atag['href'])) if links != []: doc.update({'links': links}) return def doScrape(self): foidocsdb = scrape.couch['disclosr-foidocuments'] (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID()) if content != None: if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) table = self.getTable(soup) for row in self.getRows(table): columns = row.find_all('td') if len(columns) == self.getColumnCount(): (id, date, title, description, notes) = self.getColumns(columns) print ''.join(id.stripped_strings) if id.string == None: hash = scrape.mkhash(self.remove_control_chars(url+(''.join(date.stripped_strings)))) else: hash = scrape.mkhash(self.remove_control_chars(url+(''.join(id.stripped_strings)))) doc = foidocsdb.get(hash) if doc == None: print "saving " +hash doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': (''.join(id.stripped_strings))} self.getLinks(self.getURL(),row,doc) self.getTitle(title,row, doc) self.getDate(date,row, doc) self.getDescription(description,row, doc) if notes != None: doc.update({ 'notes': (''.join(notes.stripped_strings))}) foidocsdb.save(doc) else: print "already saved "+hash elif len(row.find_all('th')) == self.getColumnCount(): print "header row" else: print "ERROR number of columns incorrect" print row |