From: Alex Sadleir Date: Thu, 11 Apr 2013 12:22:35 +0000 Subject: datagov fixes X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=c64b3fe20debb60737f29c859d2bcf41ef0f70a7 --- datagov fixes Former-commit-id: ed3ba96db4beeb126f802a3168476e27f298aeb8 --- --- a/documents/datagov-export.py +++ b/documents/datagov-export.py @@ -2,14 +2,19 @@ import couchdb from ckanclient import CkanApiError import re +import html2text # aaronsw :( + class LoaderError(Exception): pass # Instantiate the CKAN client. -ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', - api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') -# (use your own api_key from http://thedatahub.org/user/me ) +#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') +ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', + api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52') +couch = couchdb.Server('http://127.0.0.1:5984/') +#couch = couchdb.Server('http://192.168.1.113:5984/') + # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ SYMBOLS = { 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), @@ -86,51 +91,113 @@ def name_munge(input_name): return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) + #[:100] #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') -couch = couchdb.Server('http://127.0.0.1:5984/') + +def get_licence_id(licencename): + map = { + "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', + "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', + 'Otherpleasespecify': 'notspecified', + '': 'notspecified', + "Publicly available data": 'notspecified', + "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", + "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", + 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', + "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', + 'CreativeCommonsAttributionCCBY25': 'cc-by', + "PublicDomain": 'other-pd', + } + if licencename not in map.keys(): + raise Exception(licencename + " not found"); + return map[licencename]; + docsdb = couch['disclosr-documents'] if __name__ == "__main__": for doc in docsdb.view('app/datasets'): print doc.id - if doc.value['url'] != "http://data.gov.au/data/": + if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": # Collect the package metadata. - pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100]) - tags = doc.value['metadata']["Keywords / Tags"] - if not hasattr(tags, '__iter__'): - tags = [tags] - [re.sub('[^a-zA-Z0-9-_]', '', tag).lower() for tag in tags] - package_entity = { - 'name': pkg_name, - 'title': doc.value['metadata']['DCTERMS.Title'], - 'url': doc.value['metadata']['DCTERMS.Source.URI'], - 'tags': tags, - 'author': doc.value['metadata']["DCTERMS.Creator"], - 'maintainer': doc.value['metadata']["DCTERMS.Creator"], - 'licence_id': doc.value['metadata']['DCTERMS.License'], #todo licence id mapping - 'notes': doc.value['metadata']['Description'], - } + pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); + tags = [] + if doc.value['agencyID'] == "AGIMO": + if len(doc.value['metadata']["Keywords / Tags"]) > 0: + if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): + tags = tags + doc.value['metadata']["Keywords / Tags"] + else: + tags = tags + [doc.value['metadata']["Keywords / Tags"]] + if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: + if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): + tags = tags + doc.value['metadata']['data.gov.au Category'] + else: + tags = tags + [doc.value['metadata']['data.gov.au Category']] + tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] + print tags + package_entity = { + 'name': pkg_name, + 'title': doc.value['metadata']['DCTERMS.Title'], + 'url': doc.value['metadata']['DCTERMS.Source.URI'], + 'tags': tags, #tags are mandatory? + 'author': doc.value['metadata']["DCTERMS.Creator"], + 'maintainer': doc.value['metadata']["DCTERMS.Creator"], + 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), + 'notes': html2text.html2text(doc.value['metadata']['Description']), + } + if doc.value['agencyID'] == "qld": + package_entity = doc.value['metadata'] + try: - #print doc.id + print package_entity ckan.package_register_post(package_entity) except CkanApiError, e: if ckan.last_status == 409: - print "already exists" + print "package already exists" else: raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( ckan.last_status, pkg_name, e.args)) - print package_entity - #todo add to organisation (author/creator/maintainer) - #if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group + + #add to group + + group_name = name_munge(doc.value['metadata']["Agency"][:100]) + try: + print ckan.group_entity_get(group_name) + + # Update the group details + group_entity = ckan.last_message + print "group exists" + if 'packages' in group_entity.keys(): + group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) + else: + group_entity['packages'] = [pkg_name] + ckan.group_entity_put(group_entity) + except CkanApiError, e: + if ckan.last_status == 404: + print "group does not exist, creating" + group_entity = { + 'name': group_name, + 'title': doc.value['metadata']["Agency"], + 'description': doc.value['metadata']["Agency"], + 'packages': [pkg_name], + # 'type': "organization" # not allowed via API, use database query + # update "group" set type = 'organization'; + } + print group_entity + ckan.group_register_post(group_entity) + elif ckan.last_status == 409: + print "group already exists" + else: + raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( + ckan.last_status, pkg_name, e.args)) if 'Download' in doc.value['metadata'].keys(): try: pkg = ckan.package_entity_get(pkg_name) resources = pkg.get('resources', []) if len(resources) < len(doc.value['metadata']['Download']): for resource in doc.value['metadata']['Download']: - #print resource + # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html # (KML/KMZ) / (Shapefile) /(Other) format = "plain" @@ -141,6 +208,7 @@ name = resource['href'] if 'name' in resource.keys(): name = resource['name'] + print resource ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', format=format, size=human2bytes(resource['size'].replace(',', ''))) else: --- /dev/null +++ b/documents/datagov-merge.php @@ -1,1 +1,26 @@ +get_db('disclosr-documents'); +$datasets = Array(); +try { + $rows = $db->get_view("app", "datasets", null, true)->rows; + + foreach ($rows as $row) { + //print_r($row); + if ($row->value->url != "http://data.gov.au/data/") + $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id; + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +ksort($datasets); +foreach ($datasets as $datasetname => $datasetkey) { + print "$datasetname => $datasetkey
\n"; +} +?> + --- a/documents/datagov.py +++ b/documents/datagov.py @@ -13,7 +13,7 @@ if atag.has_key('href'): url = scrape.fullurl(listurl, atag['href']) (url, mime_type, html) = scrape.fetchURL(scrape.docsdb, - url, "data", "AGIMO") + url, "data", "AGIMO", False) hash = scrape.mkhash(scrape.canonurl(url)) doc = scrape.docsdb.get(hash) if "metadata" not in doc.keys() or True: --- /dev/null +++ b/documents/dataqld.py @@ -1,1 +1,28 @@ +import sys, os +import time +import scrape +from bs4 import BeautifulSoup +from unidecode import unidecode +import ckanclient + +# Instantiate the CKAN client. +ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api') + +# Get the package list. +package_list = ckan.package_register_get() +for package_name in package_list: +# Get the details of a package. + (url, mime_type, html) = scrape.fetchURL(scrape.docsdb, + "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False) + hash = scrape.mkhash(scrape.canonurl(url)) + print hash + doc = scrape.docsdb.get(hash) + if "metadata" not in doc.keys() or True: + ckan.package_entity_get(package_name) + package_entity = ckan.last_message + doc['type'] = "dataset" + doc['metadata'] = package_entity + print package_entity + scrape.docsdb.save(doc) + --- a/documents/gazette.py +++ b/documents/gazette.py @@ -5,20 +5,53 @@ from unidecode import unidecode -listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960" -(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, - listurl, "gazette", "AGD") -soup = BeautifulSoup(listhtml) -for row in soup.find_all('tr'): - if row.has_key('valign'): - for col in tr.find_all('td'): - print col.string - #url = scrape.fullurl(listurl, atag['href']) - #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, - # url, "data", "AGIMO") - #hash = scrape.mkhash(scrape.canonurl(url)) - #doc = scrape.docsdb.get(hash) - #print doc['metadata'] - #scrape.docsdb.save(doc) - #time.sleep(2) +items = 3950 +items = 1 +while True: + print str(items) + " (" +str(items/25) +" screens to go)" + listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items) + (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, + listurl, "gazette", "AGD", False) + for line in listhtml.split('\n'): + soup = BeautifulSoup(line) + #print line + for row in soup.find_all('tr'): + print line + if row.has_key('valign'): + i = 0 + date = "" + id = "" + type = "" + description = "" + name = "" + url = "" + for col in soup.find_all('td'): + #print ''.join(col.stripped_strings) + if i == 0: + date = ''.join(col.stripped_strings) + if i == 1: + id = ''.join(col.stripped_strings) + if i == 2: + type = ''.join(col.stripped_strings) + if i == 3: + description = ''.join(col.stripped_strings) + for link in col.findAll('a'): + if link.has_key("href"): + url = link['href'] + name = ''.join(link.stripped_strings) + print str(items) + " (" +str(items/25) +" screens to go)" + print [date, id, type, description, name, url] + itemurl = scrape.fullurl(listurl, url) + (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb, + itemurl, "gazette", "AGD", False) + hash = scrape.mkhash(scrape.canonurl(itemurl)) + doc = scrape.docsdb.get(hash) + doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url} + scrape.docsdb.save(doc) + #time.sleep(2) + i = i + 1; + items = items - 25 + if items <= 0: + break + --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -201,7 +201,7 @@ def getDate(self, content, entry, doc): date = ''.join(content.stripped_strings).strip() (a, b, c) = date.partition("(") - date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")) + date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012").replace("Janurary","January")) print date edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") print edate --- a/documents/runScrapers.sh +++ b/documents/runScrapers.sh @@ -1,10 +1,14 @@ -for f in scrapers/*.py; - do echo "Processing $f file.."; - python $f; +rm /tmp/disclosr-error +for f in scrapers/*.py; do + echo "Processing $f file.."; + python $f 2>/tmp/disclosr-error; if [ "$?" -ne "0" ]; then echo "error"; - sleep 2; + sleep 2; fi done +if [ -s /tmp/disclosr-error ] ; then + mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ; +fi --- a/documents/scrape.py +++ b/documents/scrape.py @@ -12,6 +12,11 @@ import urlparse import socket +#couch = couchdb.Server('http://192.168.1.148:5984/') +#couch = couchdb.Server('http://192.168.1.113:5984/') +couch = couchdb.Server('http://127.0.0.1:5984/') + + def mkhash(input): return hashlib.md5(input).hexdigest().encode("utf-8") @@ -104,14 +109,11 @@ if doc == None: doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'} else: - if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14): + if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)): print "Uh oh, trying to scrape URL again too soon!" + hash last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment = docsdb.get_attachment(doc, last_attachment_fname) content = last_attachment - return (doc['url'], doc['mime_type'], content.read()) - if scrape_again == False: - print "Not scraping this URL again as requested" return (doc['url'], doc['mime_type'], content.read()) req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") @@ -207,9 +209,6 @@ #print linkurl scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID) -#couch = couchdb.Server('http://192.168.1.148:5984/') -#couch = couchdb.Server('http://192.168.1.113:5984/') -couch = couchdb.Server('http://127.0.0.1:5984/') # select database agencydb = couch['disclosr-agencies'] docsdb = couch['disclosr-documents']