more scrapers
[disclosr.git] / documents / scrapers / 1803322b27286950cab0c543168b5f21.py
blob:a/documents/scrapers/1803322b27286950cab0c543168b5f21.py -> blob:b/documents/scrapers/1803322b27286950cab0c543168b5f21.py
--- a/documents/scrapers/1803322b27286950cab0c543168b5f21.py
+++ b/documents/scrapers/1803322b27286950cab0c543168b5f21.py
@@ -1,1 +1,58 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import dateutil
+from dateutil.parser import *
+from datetime import *
+import scrape
+from bs4 import BeautifulSoup
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
+    def __init__(self):
+        super(ScraperImplementation, self).__init__()
+
+    def getDescription(self,content, entry,doc):
+        link = None
+        links = []
+        description = ""
+        for atag in entry.find_all('a'):
+            if atag.has_key('href'):
+                link = scrape.fullurl(self.getURL(), atag['href'])
+                (url, mime_type, htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False)
+                if htcontent != None:
+                    if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
+                        soup = BeautifulSoup(htcontent)
+                        row  = soup.find(id="content_div_148050")
+                        description = ''.join(row.stripped_strings)
+                        for atag in row.find_all("a"):
+                                    if atag.has_key('href'):
+                                        links.append(scrape.fullurl(link, atag['href']))
+
+        if links != []:
+                     doc.update({'links': links})
+        if description != "":
+            doc.update({ 'description': description})
+    def getColumnCount(self):
+        return 4
+
+    def getColumns(self, columns):
+        (id, date, datepub, title) = columns
+        return (id, date, title, title, None)
+
+
+if __name__ == '__main__':
+    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+
+    nsi = ScraperImplementation()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=1"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=2"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=3"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=4"
+    nsi.doScrape()
+    nsi.disclogURL = "http://www.dbcde.gov.au/about_us/freedom_of_information_disclosure_log/foi_list?result_146858_result_page=5"
+    nsi.doScrape()
+