derive agencyID and disclog url from filename
Former-commit-id: eddf90809214ee502e593c769c4bd0b0b2fafc3a
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -8,17 +8,36 @@
import feedparser
import abc
-class GenericRSSDisclogScraper(object):
+class GenericDisclogScraper(object):
__metaclass__ = abc.ABCMeta
- @abc.abstractmethod
+ agencyID = None
+ disclogURL = None
def getAgencyID(self):
""" disclosr agency id """
- return
+ if self.agencyID == None:
+ self.agencyID = os.path.basename(sys.argv[0]).replace(".py","")
+ return self.agencyID
- @abc.abstractmethod
def getURL(self):
""" disclog URL"""
- return
+ if self.disclogURL == None:
+ agency = scrape.agencydb.get(self.getAgencyID())
+ self.disclogURL = agency['FOIDocumentsURL']
+ return self.disclogURL
+
+ @abc.abstractmethod
+ def doScrape(self):
+ """ do the scraping """
+ return
+
+
+
+class GenericRSSDisclogScraper(GenericDisclogScraper):
+ def getDescription(self, entry, doc):
+ """ get description from rss entry"""
+ doc['description'] = entry.summary
+ return
+
def doScrape(self):
foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
@@ -26,29 +45,22 @@
for entry in feed.entries:
#print entry
print entry.id
- hash = scrape.mkhash(entry.link)
+ hash = scrape.mkhash(entry.id)
+ #print hash
doc = foidocsdb.get(hash)
+ #print doc
if doc == None:
print "saving"
edate = datetime.fromtimestamp(mktime( entry.published_parsed)).strftime("%Y-%m-%d")
- doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
- "date": edate, "description": entry.summary,"title": entry.title}
+ doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': entry.link, 'docID': entry.id,
+ "date": edate,"title": entry.title}
+ self.getDescription(entry, doc)
foidocsdb.save(doc)
else:
print "already saved"
-class GenericOAICDisclogScraper(object):
- __metaclass__ = abc.ABCMeta
- @abc.abstractmethod
- def getAgencyID(self):
- """ disclosr agency id """
- return
-
- @abc.abstractmethod
- def getURL(self):
- """ disclog URL"""
- return
-
+class GenericOAICDisclogScraper(GenericDisclogScraper):
+ __metaclass__ = abc.ABCMeta
@abc.abstractmethod
def getColumns(self,columns):
""" rearranges columns if required """
@@ -86,7 +98,7 @@
edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
else:
edate = ""
- doc = {'id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
+ doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}
foidocsdb.save(doc)
else:
--- a/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
+++ b/documents/scrapers/3cd40b1240e987cbcd3f0e67054ce259.py
@@ -5,12 +5,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getAgencyID(self):
- return "3cd40b1240e987cbcd3f0e67054ce259"
-
- def getURL(self):
- return "http://www.apvma.gov.au/about/foi/disclosure/index.php"
-
def getColumns(self,columns):
(id, date, description, title, notes) = columns
return (id, date, description, title, notes)
@@ -19,3 +13,4 @@
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
ScraperImplementation().doScrape()
+
--- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
+++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
@@ -5,12 +5,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getAgencyID(self):
- return "8c9421f852c441910bf1d93a57b31d64"
-
- def getURL(self):
- return "http://www.daff.gov.au/about/foi/ips/disclosure-log"
-
def getColumns(self,columns):
(id, date, title, description, notes) = columns
return (id, date, description, title, notes)
--- a/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
+++ b/documents/scrapers/be9996f0ac58f71f23d074e82d44ead3.py
@@ -2,14 +2,37 @@
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers
#RSS feed not detailed
-
+import scrape
+from bs4 import BeautifulSoup
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
- def getAgencyID(self):
- return "be9996f0ac58f71f23d074e82d44ead3"
+ def getDescription(self,entry,doc):
+ (url,mime_type,content) = scrape.fetchURL(scrape.docsdb, entry.link, "foidocuments", self.getAgencyID(), False)
+ if content != None:
+ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+ # http://www.crummy.com/software/BeautifulSoup/documentation.html
+ soup = BeautifulSoup(content)
+ links = []
+ description = ""
+ dldivs = soup.find('div',class_="download")
+ if dldivs != None:
+ for atag in dldivs.find_all("a"):
+ if atag.has_key('href'):
+ links.append(scrape.fullurl(url,atag['href']))
+ nodldivs = soup.find('div',class_="incompleteNotification")
+ if nodldivs != None and nodldivs.stripped_strings != None:
+ for text in nodldivs.stripped_strings:
+ description = description + text
+ for row in soup.table.find_all('tr'):
+ if row != None:
+ description = description + "\n" + row.find('th').string + ": "
+ for text in row.find('div').stripped_strings:
+ description = description + text
+ if links != []:
+ doc.update({'links': links})
+ if description != "":
+ doc.update({ 'description': description})
- def getURL(self):
- return "http://foi.deewr.gov.au/disclosure-log/rss"
if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericRSSDisclogScraper)
--- a/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
+++ b/documents/scrapers/e2a845e55bc9986e6c75c5ad2c508b8d.py
@@ -5,12 +5,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
- def getAgencyID(self):
- return "be9996f0ac58f71f23d074e82d44ead3"
-
- def getURL(self):
- return "http://foi.deewr.gov.au/disclosure-log/rss"
-
def getColumns(self,columns):
(id, date, title, description, notes) = columns
return (id, date, description, title, notes)
--- a/documents/scrapers/rtk.py
+++ b/documents/scrapers/rtk.py
@@ -5,12 +5,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericRSSDisclogScraper):
- def getAgencyID(self):
- return "be9996f0ac58f71f23d074e82d44ead3"
-
- def getURL(self):
- return "http://foi.deewr.gov.au/disclosure-log/rss"
-
def getColumns(self,columns):
(id, date, title, description, notes) = columns
return (id, date, description, title, notes)
--- a/documents/template.inc.php
+++ b/documents/template.inc.php
@@ -69,7 +69,7 @@
</p>
<ul class="nav">
- <li class="active"><a href="#">Home</a></li>
+ <li><a href="index.php">Home</a></li>
<li><a href="disclogsList.php">List of Disclosure Logs</a></li>
<li><a href="about.php">About</a></li>
@@ -127,7 +127,7 @@
}
function displayLogEntry($row, $idtoname) {
- echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2> <p>".$row->value->description;
+ echo "<div><h2>".$row->value->date.": ".$row->value->title." (".$idtoname[$row->value->agencyID].")</h2> <p>".str_replace("\n","<br>",$row->value->description);
if (isset($row->value->notes)) {
echo " <br>Note: ".$row->value->notes;
}