better date parser
Former-commit-id: 38a9a7c779e6b5552cfe6a66b7689a306e94d9e2
--- a/.gitmodules
+++ b/.gitmodules
@@ -28,4 +28,7 @@
[submodule "lib/amon-php"]
path = lib/amon-php
url = https://github.com/martinrusev/amon-php.git
+[submodule "documents/lib/parsedatetime"]
+ path = documents/lib/parsedatetime
+ url = git://github.com/bear/parsedatetime.git
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -1,11 +1,9 @@
import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
import scrape
-
from bs4 import BeautifulSoup
+import parsedatetime as pdt
import abc
-import dateutil.parser
-
class GenericOAICDisclogScraper(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
@@ -24,6 +22,7 @@
return
def doScrape(self):
+ cal = pdt.Calendar()
foidocsdb = scrape.couch['disclosr-foidocuments']
(url,mime_type,content) = scrape.fetchURL(scrape.docsdb, self.getURL(), "foidocuments", self.getAgencyID())
if content != None:
@@ -47,7 +46,13 @@
if doc == None:
print "saving"
- edate = dateutil.parser.parse(date.string).date().strftime("%Y-%m-%d")
+ dtresult = cal.parseDateText(date.string)
+ if len(dtresult) == 2:
+ (dtdate,dtr) = dtresult
+ print dtdate
+ edate = ""+str(dtdate[0])+'-'+str(dtdate[1])+'-'+str(dtdate[2])
+ else:
+ edate = ""
doc = {'_id': hash, 'agencyID': self.getAgencyID(), 'url': self.getURL(), "links": links, 'docID': id.string,
"date": edate, "description": descriptiontxt,"title": title.string,"notes": notes.string}
foidocsdb.save(doc)
--- /dev/null
+++ b/documents/lib/parsedatetime
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -189,7 +189,7 @@
scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID)
#couch = couchdb.Server('http://192.168.1.148:5984/')
-couch = couchdb.Server('http://192.168.1.148:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
# select database
agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents']
--- a/documents/scrapers/6847d7d95fda5ea58a7cd9a2620c673a.txt
+++ b/documents/scrapers/6847d7d95fda5ea58a7cd9a2620c673a.txt
@@ -1,1 +1,3 @@
+# multiple pages need to be scraped initially, each entry has a subpage
http://www.ipaustralia.gov.au/about-us/freedom-of-information/foi-disclosure-log/?page=35&sessionId=3644188
+
--- a/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
+++ b/documents/scrapers/8c9421f852c441910bf1d93a57b31d64.py
@@ -1,8 +1,23 @@
import sys,os
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
-import scrape
-foidocsdb = scrape.couch['disclosr-foidocuments']
+import genericScrapers
+#RSS feed not detailed
-#rss feed has only one entry
-http://www.daff.gov.au/about/foi/ips/disclosure-log
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getAgencyID(self):
+ return "8c9421f852c441910bf1d93a57b31d64"
+ def getURL(self):
+ return "http://www.daff.gov.au/about/foi/ips/disclosure-log"
+
+ def getColumns(self,columns):
+ (id, date, title, description, notes) = columns
+ return (id, date, description, title, notes)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
+
--- a/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt
+++ b/documents/scrapers/c57c0bf315ce5977e730905707a2f6a3.txt
@@ -1,1 +1,3 @@
+# pdf
http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf
+