From: Maxious Date: Mon, 19 Nov 2012 08:35:46 +0000 Subject: fix agd scraper X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=5fe7acc259f57c4eef599409f4e65a89c182cc81 --- fix agd scraper Former-commit-id: 387571086ee1f51b4ec59e3de6959dfb4f78beff --- --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -7,11 +7,14 @@ from datetime import datetime import feedparser import abc +import unicodedata, re class GenericDisclogScraper(object): __metaclass__ = abc.ABCMeta agencyID = None disclogURL = None + def remove_control_chars(self, input): + return "".join([i for i in input if ord(i) in range(32, 127)]) def getAgencyID(self): """ disclosr agency id """ if self.agencyID == None: @@ -30,13 +33,14 @@ """ do the scraping """ return + @abc.abstractmethod + def getDescription(self, content, entry, doc): + """ get description""" + return + class GenericRSSDisclogScraper(GenericDisclogScraper): - def getDescription(self, entry, doc): - """ get description from rss entry""" - doc['description'] = entry.summary - return def doScrape(self): foidocsdb = scrape.couch['disclosr-foidocuments'] @@ -54,10 +58,14 @@ edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d") doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id, "date": edate,"title": entry.title} - self.getDescription(entry, doc) + self.getDescription(entry,entry, doc) foidocsdb.save(doc) else: print "already saved" + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + doc.update({'description': content.summary}) + return class GenericOAICDisclogScraper(GenericDisclogScraper): __metaclass__ = abc.ABCMeta @@ -65,6 +73,17 @@ def getColumns(self,columns): """ rearranges columns if required """ return + def getColumnCount(self): + return 5 + def getDescription(self, content, entry, doc): + """ get description from rss entry""" + descriptiontxt = "" + for string in content.stripped_strings: + descriptiontxt = descriptiontxt + " \n" + string + doc.update({'description': descriptiontxt}) + return + def getTable(self, soup): + return soup.table def doScrape(self): cal = pdt.Calendar() @@ -74,20 +93,21 @@ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) - for row in soup.table.find_all('tr'): + table = self.getTable(soup) + for row in table.find_all('tr'): columns = row.find_all('td') - if len(columns) == 5: + if len(columns) == self.getColumnCount(): (id, date, description, title, notes) = self.getColumns(columns) print id.string - hash = scrape.mkhash(url+id.string) + if id.string == None: + hash = scrape.mkhash(self.remove_control_chars(url+date.string)) + else: + hash = scrape.mkhash(self.remove_control_chars(url+id.string)) links = [] for atag in row.find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(url,atag['href'])) doc = foidocsdb.get(hash) - descriptiontxt = "" - for string in description.stripped_strings: - descriptiontxt = descriptiontxt + " \n" + string if doc == None: print "saving" @@ -97,14 +117,19 @@ print dtdate edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2]) else: - edate = "" - doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string, - "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string} + edate = datetime.strptime(date.string.strip(), "%d %B %Y").strftime("%Y-%m-%d") + doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string, + "date": edate,"title": title.string} + if links != []: + doc.update({'links': links}) + self.getDescription(description,row, doc) + if notes != None: + doc.update({ 'notes': notes.string}) foidocsdb.save(doc) else: - print "already saved" + print "already saved "+hash - elif len(row.find_all('th')) == 5: + elif len(row.find_all('th')) == self.getColumnCount(): print "header row" else: --- a/documents/scrape.py +++ b/documents/scrape.py @@ -82,7 +82,7 @@ url = canonurl(url) hash = mkhash(url) req = urllib2.Request(url) - print "Fetching %s" % url + print "Fetching %s (%s)" % (url,hash) if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": print "Not a valid HTTP url" return (None,None,None) @@ -94,7 +94,8 @@ print "Uh oh, trying to scrape URL again too soon!" last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment = docsdb.get_attachment(doc,last_attachment_fname) - return (doc['url'],doc['mime_type'],last_attachment.read()) + content = last_attachment + return (doc['url'],doc['mime_type'],content) if scrape_again == False: print "Not scraping this URL again as requested" return (None,None,None) --- /dev/null +++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py @@ -1,1 +1,48 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import scrape +from bs4 import BeautifulSoup +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getDescription(self,content, entry,doc): + link = None + links = [] + description = "" + for atag in entry.find_all('a'): + if atag.has_key('href'): + link = scrape.fullurl(self.getURL(),atag['href']) + (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) + if htcontent != None: + if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": + # http://www.crummy.com/software/BeautifulSoup/documentation.html + soup = BeautifulSoup(htcontent) + for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'): + if row != None: + rowtitle = row.find('th').string + description = description + "\n" + rowtitle + ": " + for text in row.find('td').stripped_strings: + description = description + text + for atag in row.find_all("a"): + if atag.has_key('href'): + links.append(scrape.fullurl(link,atag['href'])) + + if links != []: + doc.update({'links': links}) + if description != "": + doc.update({ 'description': description}) + + def getColumnCount(self): + return 2 + def getTable(self,soup): + return soup.find(class_ = "ms-rteTable-GreyAlternating") + def getColumns(self,columns): + (date, title) = columns + return (title, date, title, title, None) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- /dev/null +++ b/documents/scrapers/820c3df09aa62f6ee7468c73bea0e323.py @@ -1,1 +1,18 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getColumnCount(self): + return 2 + def getColumns(self,columns): + (date, title) = columns + return (title, date, title, title, None) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- /dev/null +++ b/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.txt @@ -1,1 +1,2 @@ +# does not have any disclog entries or table --- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py +++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py @@ -6,9 +6,9 @@ from bs4 import BeautifulSoup #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper): - def getDescription(self,entry,doc): - (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False) - if content != None: + def getDescription(self,content, entry,doc): + (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False) + if htcontent != None: if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(content) --- /dev/null +++ b/documents/scrapers/c43ca6780764f4e61918e8836be74420.py @@ -1,1 +1,16 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +#RSS feed not detailed +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getColumns(self,columns): + (id, date, title,description,notes) = columns + return (id, date, description, title, notes) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() +