Experimental organization support
Former-commit-id: 8d69f7e95445481fa0fff75ee0def014614268c8
--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -3,6 +3,7 @@
from ckanclient import CkanApiError
import re
import html2text # aaronsw :(
+import ckanapi
class LoaderError(Exception):
@@ -10,10 +11,12 @@
# Instantiate the CKAN client.
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'aeab80a6-0a58-4026-96a4-c07e57f9c434'
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
- api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
+ api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = {
@@ -91,6 +94,7 @@
def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+ #[:100]
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
@@ -115,79 +119,122 @@
docsdb = couch['disclosr-documents']
if __name__ == "__main__":
+ orgs_list = []
for doc in docsdb.view('app/datasets'):
print doc.id
- if doc.value['url'] != "http://data.gov.au/data/":
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
# Collect the package metadata.
- pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
+ pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/','')[:100];
tags = []
- if len(doc.value['metadata']["Keywords / Tags"]) > 0:
- if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
- tags = tags + doc.value['metadata']["Keywords / Tags"]
- else:
- tags = tags + [doc.value['metadata']["Keywords / Tags"]]
- if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
- if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
- tags = tags + doc.value['metadata']['data.gov.au Category']
- else:
- tags = tags + [doc.value['metadata']['data.gov.au Category']]
- tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
- print tags
- package_entity = {
- 'name': pkg_name,
- 'title': doc.value['metadata']['DCTERMS.Title'],
- 'url': doc.value['metadata']['DCTERMS.Source.URI'],
- 'tags': tags, #tags are mandatory?
- 'author': doc.value['metadata']["DCTERMS.Creator"],
- 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
- 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
- 'notes': html2text.html2text(doc.value['metadata']['Description']),
- }
+ if doc.value['agencyID'] == "AGIMO":
+ if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+ if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+ tags = tags + doc.value['metadata']["Keywords / Tags"]
+ else:
+ tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+ if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
+ if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
+ tags = tags + doc.value['metadata']['data.gov.au Category']
+ else:
+ tags = tags + [doc.value['metadata']['data.gov.au Category']]
+ tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
+ #print tags
+ package_entity = {
+ 'name': pkg_name,
+ 'title': doc.value['metadata']['DCTERMS.Title'],
+ 'url': doc.value['metadata']['DCTERMS.Source.URI'],
+ 'tags': tags, #tags are mandatory?
+ 'author': doc.value['metadata']["DCTERMS.Creator"],
+ 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
+ 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+ 'notes': html2text.html2text(doc.value['metadata']['Description']),
+ }
+ if doc.value['agencyID'] == "qld":
+ package_entity = doc.value['metadata']
try:
- print package_entity
+ #print package_entity
ckan.package_register_post(package_entity)
except CkanApiError, e:
- if ckan.last_status == 409:
+ if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
print "package already exists"
else:
+ print ckan.last_message
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args))
-
+ pkg = ckan.package_entity_get(pkg_name)
#add to group
- group_name = name_munge(doc.value['metadata']["Agency"][:100])
+ # group_name = name_munge(doc.value['metadata']["Agency"][:100])
+ # try:
+ # print ckan.group_entity_get(group_name)
+ #
+ # # Update the group details
+ # group_entity = ckan.last_message
+ # print "group exists"
+ # if 'packages' in group_entity.keys():
+ # group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
+ # else:
+ # group_entity['packages'] = [pkg_name]
+ # ckan.group_entity_put(group_entity)
+ # except CkanApiError, e:
+ # if ckan.last_status == 404:
+ # print "group does not exist, creating"
+ # group_entity = {
+ # 'name': group_name,
+ # 'title': doc.value['metadata']["Agency"],
+ # 'description': doc.value['metadata']["Agency"],
+ # 'packages': [pkg_name],
+ # # 'type': "organization" # not allowed via API, use database query
+ # # update "group" set type = 'organization';
+ # }
+ # print group_entity
+ # ckan.group_register_post(group_entity)
+ # elif ckan.last_status == 409:
+ # print "group already exists"
+ # else:
+ # raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+ # ckan.last_status, pkg_name, e.args))
+
+ #add to or create organization using direct API
+ org_name = name_munge(doc.value['metadata']["Agency"][:100])
+ if org_name not in orgs_list:
+ orgs_list = ckandirect.action.organization_list()['result']
+ print orgs_list
+ if org_name not in orgs_list:
+ try:
+ print "org not found, creating "+org_name
+ ckandirect.action.organization_create(name = org_name, title= doc.value['metadata']["Agency"],
+ description= doc.value['metadata']["Agency"])
+ orgs_list.append(org_name)
+ except ckanapi.ValidationError, e:
+ print e
+ raise LoaderError('Unexpected status')
+
try:
- print ckan.group_entity_get(group_name)
-
- # Update the group details
- group_entity = ckan.last_message
- print "group exists"
- if 'packages' in group_entity.keys():
- group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
- else:
- group_entity['packages'] = [pkg_name]
- ckan.group_entity_put(group_entity)
- except CkanApiError, e:
- if ckan.last_status == 404:
- print "group does not exist, creating"
- group_entity = {
- 'name': group_name,
- 'title': doc.value['metadata']["Agency"],
- 'description': doc.value['metadata']["Agency"],
- 'packages': [pkg_name],
- # 'type': "organization" # not allowed via API, use database query
- # update "group" set type = 'organization';
- }
- print group_entity
- ckan.group_register_post(group_entity)
- else:
- raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
- ckan.last_status, pkg_name, e.args))
+ print "linking package to "+org_name
+
+ #brute force
+ org = ckandirect.action.organization_show(id=org_name)
+ #print org["result"]["packages"]
+ #print pkg
+ org["result"]["packages"].append({"id":pkg['id'], "name": pkg_name})
+ #print org["result"]
+ ckandirect.call_action(action="organization_update", data_dict=org["result"])
+
+ # if only this worked
+ #print ckandirect.action.package_owner_org_update(id=pkg['id'],organization_id=org["result"]["id"])
+
+ except ckanapi.ValidationError, e:
+ print e
+ raise LoaderError('Unexpected status')
+ #except ckanapi.CKANAPIError, e:
+ # print "lol"
+
if 'Download' in doc.value['metadata'].keys():
try:
- pkg = ckan.package_entity_get(pkg_name)
+
resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']:
--- /dev/null
+++ b/documents/datagov-merge.php
@@ -1,1 +1,26 @@
+<?php
+include_once("../include/common.inc.php");
+
+
+setlocale(LC_CTYPE, 'C');
+
+$db = $server->get_db('disclosr-documents');
+$datasets = Array();
+try {
+ $rows = $db->get_view("app", "datasets", null, true)->rows;
+
+ foreach ($rows as $row) {
+ //print_r($row);
+ if ($row->value->url != "http://data.gov.au/data/")
+ $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+ksort($datasets);
+foreach ($datasets as $datasetname => $datasetkey) {
+ print "$datasetname => $datasetkey<br>\n";
+}
+?>
+
--- /dev/null
+++ b/documents/dataqld.py
@@ -1,1 +1,28 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
+from unidecode import unidecode
+import ckanclient
+
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
+
+# Get the package list.
+package_list = ckan.package_register_get()
+for package_name in package_list:
+# Get the details of a package.
+ (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
+ hash = scrape.mkhash(scrape.canonurl(url))
+ print hash
+ doc = scrape.docsdb.get(hash)
+ if "metadata" not in doc.keys() or True:
+ ckan.package_entity_get(package_name)
+ package_entity = ckan.last_message
+ doc['type'] = "dataset"
+ doc['metadata'] = package_entity
+ print package_entity
+ scrape.docsdb.save(doc)
+
--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -5,20 +5,53 @@
from unidecode import unidecode
-listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960"
-(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
- listurl, "gazette", "AGD")
-soup = BeautifulSoup(listhtml)
-for row in soup.find_all('tr'):
- if row.has_key('valign'):
- for col in tr.find_all('td'):
- print col.string
- #url = scrape.fullurl(listurl, atag['href'])
- #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
- # url, "data", "AGIMO")
- #hash = scrape.mkhash(scrape.canonurl(url))
- #doc = scrape.docsdb.get(hash)
- #print doc['metadata']
- #scrape.docsdb.save(doc)
- #time.sleep(2)
+items = 3950
+items = 1
+while True:
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
+ (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+ listurl, "gazette", "AGD", False)
+ for line in listhtml.split('\n'):
+ soup = BeautifulSoup(line)
+ #print line
+ for row in soup.find_all('tr'):
+ print line
+ if row.has_key('valign'):
+ i = 0
+ date = ""
+ id = ""
+ type = ""
+ description = ""
+ name = ""
+ url = ""
+ for col in soup.find_all('td'):
+ #print ''.join(col.stripped_strings)
+ if i == 0:
+ date = ''.join(col.stripped_strings)
+ if i == 1:
+ id = ''.join(col.stripped_strings)
+ if i == 2:
+ type = ''.join(col.stripped_strings)
+ if i == 3:
+ description = ''.join(col.stripped_strings)
+ for link in col.findAll('a'):
+ if link.has_key("href"):
+ url = link['href']
+ name = ''.join(link.stripped_strings)
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ print [date, id, type, description, name, url]
+ itemurl = scrape.fullurl(listurl, url)
+ (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ itemurl, "gazette", "AGD", False)
+ hash = scrape.mkhash(scrape.canonurl(itemurl))
+ doc = scrape.docsdb.get(hash)
+ doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
+ scrape.docsdb.save(doc)
+ #time.sleep(2)
+ i = i + 1;
+ items = items - 25
+ if items <= 0:
+ break
+
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -72,7 +72,8 @@
edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash,
- "date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description), "diff": diff}
+ "date": edate, "title": "Disclosure Log Updated",
+ "description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
foidocsdb.save(doc)
else:
print "already saved"
@@ -199,11 +200,16 @@
return table.find_all('tr')
def getDate(self, content, entry, doc):
- date = ''.join(content.stripped_strings).strip()
- (a, b, c) = date.partition("(")
- date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
- print date
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ strdate = ''.join(content.stripped_strings).strip()
+ (a, b, c) = strdate.partition("(")
+ strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
+ print strdate
+ try:
+ edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ except ValueError:
+ print >> sys.stderr, "ERROR date invalid %s " % strdate
+ print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip()
+ edate = date.today().strftime("%Y-%m-%d")
print edate
doc.update({'date': edate})
return
@@ -266,8 +272,7 @@
'Summary of FOIrequest received by agency/minister',
'Summary of FOI request received', 'Description of FOI Request',
"FOI request", 'Results 1 to 67 of 67']
- if doc['title'] not in badtitles\
- and doc['description'] != '':
+ if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '':
print "saving"
foidocsdb.save(doc)
else:
@@ -277,6 +282,6 @@
print "header row"
else:
- print "ERROR number of columns incorrect"
+ print >> sys.stderr, "ERROR number of columns incorrect"
print row
--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,10 +1,22 @@
-for f in scrapers/*.py;
- do echo "Processing $f file..";
- python $f;
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd $DIR
+echo "" > /tmp/disclosr-error
+for f in scrapers/*.py; do
+ echo "Processing $f file..";
+ md5=`md5sum /tmp/disclosr-error`
+ python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error;
+ md52=`md5sum /tmp/disclosr-error`
+ if [ "$md5" != "$md52" ]; then
+ echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error;
+ fi
if [ "$?" -ne "0" ]; then
echo "error";
- sleep 2;
+ sleep 1;
fi
done
+if [ -s /tmp/disclosr-error ] ; then
+ echo "emailling logs..";
+ mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
+fi
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -7,14 +7,15 @@
from urlparse import urljoin
import time
import os
+import sys
import mimetypes
import urllib
import urlparse
import socket
#couch = couchdb.Server('http://192.168.1.148:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
def mkhash(input):
@@ -89,7 +90,7 @@
def getLastAttachment(docsdb, url):
hash = mkhash(url)
doc = docsdb.get(hash)
- if doc != None:
+ if doc != None and "_attachments" in doc.keys():
last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
return last_attachment
@@ -103,7 +104,7 @@
req = urllib2.Request(url)
print "Fetching %s (%s)" % (url, hash)
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
- print "Not a valid HTTP url"
+ print >> sys.stderr, "Not a valid HTTP url"
return (None, None, None)
doc = docsdb.get(hash)
if doc == None:
@@ -111,10 +112,15 @@
else:
if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
print "Uh oh, trying to scrape URL again too soon!" + hash
- last_attachment_fname = doc["_attachments"].keys()[-1]
- last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
- content = last_attachment
- return (doc['url'], doc['mime_type'], content.read())
+ if "_attachments" in doc.keys():
+ last_attachment_fname = doc["_attachments"].keys()[-1]
+ last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
+ content = last_attachment.read()
+ mime_type = doc['mime_type']
+ else:
+ content = None
+ mime_type = None
+ return (doc['url'], mime_type, content)
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags
@@ -159,13 +165,13 @@
#store as attachment epoch-filename
except (urllib2.URLError, socket.timeout) as e:
- print "error!"
+ print >> sys.stderr,"error!"
error = ""
if hasattr(e, 'reason'):
error = "error %s in downloading %s" % (str(e.reason), url)
elif hasattr(e, 'code'):
error = "error %s in downloading %s" % (e.code, url)
- print error
+ print >> sys.stderr, error
doc['error'] = error
docsdb.save(doc)
return (None, None, None)
--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -42,7 +42,6 @@
'data': {'request': '', 'session': '', 'more': ''}
}
-
- amonpy.exception(data)
+ #amonpy.exception(data)
pass
--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -18,13 +18,13 @@
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent)
- for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'):
+ rowtitle = soup.find(class_ = "wc-title").find("h1").string
+ if rowtitle != None:
+ description = rowtitle + ": "
+ for row in soup.find(class_ ="wc-content").find_all('td'):
if row != None:
- rowtitle = row.find('th').string
- if rowtitle != None:
- description = description + "\n" + rowtitle + ": "
- for text in row.find('td').stripped_strings:
- description = description + text
+ for text in row.stripped_strings:
+ description = description + text + "\n"
for atag in row.find_all("a"):
if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href']))
@@ -37,7 +37,7 @@
def getColumnCount(self):
return 2
def getTable(self,soup):
- return soup.find(class_ = "ms-rteTable-GreyAlternating")
+ return soup.find(class_ = "ms-rteTable-default")
def getColumns(self,columns):
(date, title) = columns
return (title, date, title, title, None)
--- a/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
+++ b/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
@@ -7,7 +7,7 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):
- return soup.find(id = "inner_content")
+ return soup.find(class_="tborder")
def getColumnCount(self):
return 2
def getColumns(self,columns):
--- a/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
+++ b/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
@@ -8,40 +8,14 @@
from datetime import *
#http://www.doughellmann.com/PyMOTW/abc/
-class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(class_ = "inner-column").table
- def getRows(self,table):
- return table.tbody.find_all('tr',recursive=False)
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
def getColumnCount(self):
- return 3
- def getColumns(self,columns):
- (date, title, description) = columns
- return (date, date, title, description, None)
- def getDate(self, content, entry, doc):
- i = 0
- date = ""
- for string in content.stripped_strings:
- if i ==1:
- date = string
- i = i+1
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
- print edate
- doc.update({'date': edate})
- return
- def getTitle(self, content, entry, doc):
- i = 0
- title = ""
- for string in content.stripped_strings:
- if i < 2:
- title = title + string
- i = i+1
- doc.update({'title': title})
- #print title
- return
+ return 0
if __name__ == '__main__':
- print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
- print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2012-13.aspx
+#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2011-12.aspx
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericHTMLDisclogScraper)
ScraperImplementation().doScrape()
--- a/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
+++ b/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
@@ -6,8 +6,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(id = "content_div_50269").table
def getColumns(self,columns):
(id, date, title, description, notes) = columns
return (id, date, title, description, notes)
--- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -21,11 +21,15 @@
d.make_links_absolute(base_url = self.getURL())
for table in d('table').items():
title= table('thead').text()
- print title
+ print self.remove_control_chars(title)
(idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
links = table('a').map(lambda i, e: pq(e).attr('href'))
description = descA+" "+descB
- edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ try:
+ edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ except ValueError:
+ edate = date.today().strftime("%Y-%m-%d")
+ pass
print edate
dochash = scrape.mkhash(self.remove_control_chars(title))
doc = foidocsdb.get(dochash)
--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -18,10 +18,10 @@