From: Alex Sadleir Date: Thu, 11 Apr 2013 12:22:35 +0000 Subject: datagov fixes X-Git-Url: https://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=c64b3fe20debb60737f29c859d2bcf41ef0f70a7 --- datagov fixes Former-commit-id: ed3ba96db4beeb126f802a3168476e27f298aeb8 --- --- a/documents/datagov-export.py +++ b/documents/datagov-export.py @@ -12,8 +12,8 @@ #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52') -#couch = couchdb.Server('http://127.0.0.1:5984/') -couch = couchdb.Server('http://192.168.1.113:5984/') +couch = couchdb.Server('http://127.0.0.1:5984/') +#couch = couchdb.Server('http://192.168.1.113:5984/') # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ SYMBOLS = { @@ -91,6 +91,7 @@ def name_munge(input_name): return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) + #[:100] #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') @@ -117,32 +118,35 @@ if __name__ == "__main__": for doc in docsdb.view('app/datasets'): print doc.id - if doc.value['url'] != "http://data.gov.au/data/": + if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": # Collect the package metadata. - pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); _ + pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); tags = [] - if len(doc.value['metadata']["Keywords / Tags"]) > 0: - if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): - tags = tags + doc.value['metadata']["Keywords / Tags"] - else: - tags = tags + [doc.value['metadata']["Keywords / Tags"]] - if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: - if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): - tags = tags + doc.value['metadata']['data.gov.au Category'] - else: - tags = tags + [doc.value['metadata']['data.gov.au Category']] - tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] - print tags - package_entity = { - 'name': pkg_name, - 'title': doc.value['metadata']['DCTERMS.Title'], - 'url': doc.value['metadata']['DCTERMS.Source.URI'], - 'tags': tags, #tags are mandatory? - 'author': doc.value['metadata']["DCTERMS.Creator"], - 'maintainer': doc.value['metadata']["DCTERMS.Creator"], - 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), - 'notes': html2text.html2text(doc.value['metadata']['Description']), - } + if doc.value['agencyID'] == "AGIMO": + if len(doc.value['metadata']["Keywords / Tags"]) > 0: + if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): + tags = tags + doc.value['metadata']["Keywords / Tags"] + else: + tags = tags + [doc.value['metadata']["Keywords / Tags"]] + if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: + if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): + tags = tags + doc.value['metadata']['data.gov.au Category'] + else: + tags = tags + [doc.value['metadata']['data.gov.au Category']] + tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] + print tags + package_entity = { + 'name': pkg_name, + 'title': doc.value['metadata']['DCTERMS.Title'], + 'url': doc.value['metadata']['DCTERMS.Source.URI'], + 'tags': tags, #tags are mandatory? + 'author': doc.value['metadata']["DCTERMS.Creator"], + 'maintainer': doc.value['metadata']["DCTERMS.Creator"], + 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), + 'notes': html2text.html2text(doc.value['metadata']['Description']), + } + if doc.value['agencyID'] == "qld": + package_entity = doc.value['metadata'] try: print package_entity @@ -182,6 +186,8 @@ } print group_entity ckan.group_register_post(group_entity) + elif ckan.last_status == 409: + print "group already exists" else: raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( ckan.last_status, pkg_name, e.args)) --- /dev/null +++ b/documents/dataqld.py @@ -1,1 +1,28 @@ +import sys, os +import time +import scrape +from bs4 import BeautifulSoup +from unidecode import unidecode +import ckanclient + +# Instantiate the CKAN client. +ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api') + +# Get the package list. +package_list = ckan.package_register_get() +for package_name in package_list: +# Get the details of a package. + (url, mime_type, html) = scrape.fetchURL(scrape.docsdb, + "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False) + hash = scrape.mkhash(scrape.canonurl(url)) + print hash + doc = scrape.docsdb.get(hash) + if "metadata" not in doc.keys() or True: + ckan.package_entity_get(package_name) + package_entity = ckan.last_message + doc['type'] = "dataset" + doc['metadata'] = package_entity + print package_entity + scrape.docsdb.save(doc) + --- a/documents/gazette.py +++ b/documents/gazette.py @@ -5,20 +5,53 @@ from unidecode import unidecode -listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960" -(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, - listurl, "gazette", "AGD") -soup = BeautifulSoup(listhtml) -for row in soup.find_all('tr'): - if row.has_key('valign'): - for col in tr.find_all('td'): - print col.string - #url = scrape.fullurl(listurl, atag['href']) - #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, - # url, "data", "AGIMO") - #hash = scrape.mkhash(scrape.canonurl(url)) - #doc = scrape.docsdb.get(hash) - #print doc['metadata'] - #scrape.docsdb.save(doc) - #time.sleep(2) +items = 3950 +items = 1 +while True: + print str(items) + " (" +str(items/25) +" screens to go)" + listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items) + (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, + listurl, "gazette", "AGD", False) + for line in listhtml.split('\n'): + soup = BeautifulSoup(line) + #print line + for row in soup.find_all('tr'): + print line + if row.has_key('valign'): + i = 0 + date = "" + id = "" + type = "" + description = "" + name = "" + url = "" + for col in soup.find_all('td'): + #print ''.join(col.stripped_strings) + if i == 0: + date = ''.join(col.stripped_strings) + if i == 1: + id = ''.join(col.stripped_strings) + if i == 2: + type = ''.join(col.stripped_strings) + if i == 3: + description = ''.join(col.stripped_strings) + for link in col.findAll('a'): + if link.has_key("href"): + url = link['href'] + name = ''.join(link.stripped_strings) + print str(items) + " (" +str(items/25) +" screens to go)" + print [date, id, type, description, name, url] + itemurl = scrape.fullurl(listurl, url) + (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb, + itemurl, "gazette", "AGD", False) + hash = scrape.mkhash(scrape.canonurl(itemurl)) + doc = scrape.docsdb.get(hash) + doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url} + scrape.docsdb.save(doc) + #time.sleep(2) + i = i + 1; + items = items - 25 + if items <= 0: + break + --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -201,7 +201,7 @@ def getDate(self, content, entry, doc): date = ''.join(content.stripped_strings).strip() (a, b, c) = date.partition("(") - date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")) + date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012").replace("Janurary","January")) print date edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") print edate --- a/documents/runScrapers.sh +++ b/documents/runScrapers.sh @@ -1,10 +1,14 @@ -for f in scrapers/*.py; - do echo "Processing $f file.."; - python $f; +rm /tmp/disclosr-error +for f in scrapers/*.py; do + echo "Processing $f file.."; + python $f 2>/tmp/disclosr-error; if [ "$?" -ne "0" ]; then echo "error"; - sleep 2; + sleep 2; fi done +if [ -s /tmp/disclosr-error ] ; then + mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ; +fi --- a/documents/scrape.py +++ b/documents/scrape.py @@ -13,8 +13,8 @@ import socket #couch = couchdb.Server('http://192.168.1.148:5984/') -couch = couchdb.Server('http://192.168.1.113:5984/') -#couch = couchdb.Server('http://127.0.0.1:5984/') +#couch = couchdb.Server('http://192.168.1.113:5984/') +couch = couchdb.Server('http://127.0.0.1:5984/') def mkhash(input):