Merge branch 'master' of ssh://maxious.lambdacomplex.org/git/disclosr
Former-commit-id: 406a4f269604c98cb406ec3ddb8842a8f203ab1c
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -30,13 +30,14 @@
""" do the scraping """
return
+ @abc.abstractmethod
+ def getDescription(self, content, entry, doc):
+ """ get description"""
+ return
+
class GenericRSSDisclogScraper(GenericDisclogScraper):
- def getDescription(self, entry, doc):
- """ get description from rss entry"""
- doc['description'] = entry.summary
- return
def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments']
@@ -54,16 +55,29 @@
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
"date": edate,"title": entry.title}
- self.getDescription(entry, doc)
+ self.getDescription(entry,entry, doc)
foidocsdb.save(doc)
else:
print "already saved"
+ def getDescription(self, content, entry, doc):
+ """ get description from rss entry"""
+ doc.update({'description': content.summary})
+ return
class GenericOAICDisclogScraper(GenericDisclogScraper):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def getColumns(self,columns):
""" rearranges columns if required """
+ return
+ def getColumnCount(self):
+ return 5
+ def getDescription(self, content, entry, doc):
+ """ get description from rss entry"""
+ descriptiontxt = ""
+ for string in content.stripped_strings:
+ descriptiontxt = descriptiontxt + " \n" + string
+ doc.update({'description': descriptiontxt})
return
def doScrape(self):
@@ -76,7 +90,7 @@
soup = BeautifulSoup(content)
for row in soup.table.find_all('tr'):
columns = row.find_all('td')
- if len(columns) == 5:
+ if len(columns) == self.getColumnCount():
(id, date, description, title, notes) = self.getColumns(columns)
print id.string
hash = scrape.mkhash(url+id.string)
@@ -85,9 +99,6 @@
if atag.has_key('href'):
links.append(scrape.fullurl(url,atag['href']))
doc = foidocsdb.get(hash)
- descriptiontxt = ""
- for string in description.stripped_strings:
- descriptiontxt = descriptiontxt + " \n" + string
if doc == None:
print "saving"
@@ -97,14 +108,20 @@
print dtdate
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
else:
- edate = ""
- doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
- "date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}
+ edate = datetime.strptime(date.string, "%d %B %Y").strftime("%Y-%m-%d")
+ doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), 'docID': id.string,
+ "date": edate,"title": title.string}
+ self.getDescription(description,row, doc)
+
+ if links != []:
+ doc.update({'links': links})
+ if notes != None:
+ doc.update({ 'notes': notes.string})
foidocsdb.save(doc)
else:
- print "already saved"
+ print "already saved "+hash
- elif len(row.find_all('th')) == 5:
+ elif len(row.find_all('th')) == self.getColumnCount():
print "header row"
else:
--- /dev/null
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -1,1 +1,48 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getDescription(self,content, entry,doc):
+ link = None
+ for atag in entry.find_all('a'):
+ if atag.has_key('href'):
+ link = scrape.fullurl(url,atag['href'])
+ (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+ if htcontent != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ # http://www.crummy.com/software/BeautifulSoup/documentation.html
+ soup = BeautifulSoup(content)
+ links = []
+ description = ""
+ dldivs = soup.find('div',class_="download")
+ if dldivs != None:
+ for atag in dldivs.find_all("a"):
+ if atag.has_key('href'):
+ links.append(scrape.fullurl(url,atag['href']))
+ nodldivs = soup.find('div',class_="incompleteNotification")
+ if nodldivs != None and nodldivs.stripped_strings != None:
+ for text in nodldivs.stripped_strings:
+ description = description + text
+ for row in soup.table.find_all('tr'):
+ if row != None:
+ description = description + "\n" + row.find('th').string + ": "
+ for text in row.find('div').stripped_strings:
+ description = description + text
+ if links != []:
+ doc.update({'links': links})
+ if description != "":
+ doc.update({ 'description': description})
+ def getColumnCount(self):
+ return 2
+ def getColumns(self,columns):
+ (date, title) = columns
+ return (title, date, title, title, None)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- /dev/null
+++ b/documents/scrapers/820c3df09aa62f6ee7468c73bea0e323.py
@@ -1,1 +1,18 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getColumnCount(self):
+ return 2
+ def getColumns(self,columns):
+ (date, title) = columns
+ return (title, date, title, title, None)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- /dev/null
+++ b/documents/scrapers/bb96fe4065afb7e0872136dd657f9369.txt
@@ -1,1 +1,2 @@
+# does not have any disclog entries or table
--- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
+++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
@@ -6,9 +6,9 @@
from bs4 import BeautifulSoup
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
- def getDescription(self,entry,doc):
- (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False)
- if content != None:
+ def getDescription(self,content, entry,doc):
+ (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False)
+ if htcontent != None:
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(content)
--- /dev/null
+++ b/documents/scrapers/c43ca6780764f4e61918e8836be74420.py
@@ -1,1 +1,16 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+#RSS feed not detailed
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getColumns(self,columns):
+ (id, date, title,description,notes) = columns
+ return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+