argh
[disclosr.git] / documents / scrapers / 00a294de663db69062ca09aede7c0487.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import genericScrapers
import dateutil
from dateutil.parser import *
from datetime import *
 
 
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
 
    def __init__(self):
        super(ScraperImplementation, self).__init__()
    def getDate(self, content, entry, doc):
        date = ''.join(entry.find('th').stripped_strings).strip()
        (a, b, c) = date.partition("(")
        date = self.remove_control_chars(a.replace("Octber", "October"))
        print date
        edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
        print edate
        doc.update({'date': edate})
        return
    def getColumnCount(self):
        return 4
 
    def getTable(self, soup):
        return soup.find(summary="List of Defence documents released under Freedom of Information requets")
 
    def getColumns(self, columns):
        (id, description, access, notes) = columns
        return (id, None, description, description, notes)
 
 
if __name__ == '__main__':
    print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
    print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
 
    nsi = ScraperImplementation()
    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201213.cfm"
    nsi.doScrape()
 
    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201112.cfm"
    nsi.doScrape()
 
    nsi.disclogURL = "http://www.defence.gov.au/foi/disclosure_log_201011.cfm"
    nsi.doScrape()