beginning deewr scraper
Former-commit-id: c17beeeab98ca8e389303cfcc75566c09aaf49cc
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -19,6 +19,12 @@
def getURL(self):
""" disclog URL"""
return
+
+ def getDescription(self, entry, doc):
+ """ get description from rss entry"""
+ doc['description'] = entry.summary
+ return
+
def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
@@ -26,13 +32,16 @@
for entry in feed.entries:
#print entry
print entry.id
- hash = scrape.mkhash(entry.link)
+ hash = scrape.mkhash(entry.id)
+ #print hash
doc = foidocsdb.get(hash)
+ #print doc
if doc == None:
print "saving"
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
- doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
- "date": edate, "description": entry.summary,"title": entry.title}
+ doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
+ "date": edate,"title": entry.title}
+ self.getDescription(entry, doc)
foidocsdb.save(doc)
else:
print "already saved"
@@ -86,7 +95,7 @@
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
else:
edate = ""
- doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
+ doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}
foidocsdb.save(doc)
else:
--- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
+++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
@@ -2,7 +2,8 @@
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers
#RSS feed not detailed
-
+import scrape
+from bs4 import BeautifulSoup
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
def getAgencyID(self):
@@ -11,6 +12,34 @@
def getURL(self):
return "http://foi.deewr.gov.au/disclosure-log/rss"
+ def getDescription(self,entry,doc):
+ (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False)
+ if content != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ # http://www.crummy.com/software/BeautifulSoup/documentation.html
+ soup = BeautifulSoup(content)
+ links = []
+ description = ""
+ dldivs = soup.find('div',class_="download")
+ if dldivs != None:
+ for atag in dldivs.find_all("a"):
+ if atag.has_key('href'):
+ links.append(scrape.fullurl(url,atag['href']))
+ nodldivs = soup.find('div',class_="incompleteNotification")
+ if nodldivs != None and nodldivs.stripped_strings != None:
+ for text in nodldivs.stripped_strings:
+ description = description + text
+ for row in soup.table.find_all('tr'):
+ if row != None:
+ description = description + "\n" + row.find('th').string + ": "
+ for text in row.find('div').stripped_strings:
+ description = description + text
+ if links != []:
+ doc.update({'links': links})
+ if description != "":
+ doc.update({ 'description': description})
+
+
if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericRSSDisclogScraper)
--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -69,7 +69,7 @@
</p>
<ul class="nav">
- <li class="active"><a href="#">Home</a></li>
+ <li><a href="index.php">Home</a></li>
<li><a href="disclogsList.php">List of Disclosure Logs</a></li>
<li><a href="about.php">About</a></li>