load datasets into scrapr then into ckan filestore
Former-commit-id: ef39f297007c1ad1e7edee2c2819723b076ae3f4
--- a/admin/refreshDesignDoc.php
+++ b/admin/refreshDesignDoc.php
@@ -42,6 +42,7 @@
$obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}";
$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}";
+$obj->views->datasetGroups->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n doc.metadata[\"data.gov.au Category\"] && doc.metadata[\"data.gov.au Category\"].forEach(function(tag) {\n emit(tag, doc.url); \n });\n}\n}";
$obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}";
$docdb->save($obj, true);
--- /dev/null
+++ b/disclosr.iml
@@ -1,1 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="WEB_MODULE" version="4">
+ <component name="FacetManager">
+ <facet type="Python" name="Python">
+ <configuration sdkName="" />
+ </facet>
+ </component>
+ <component name="NewModuleRootManager" inherit-compiler-output="true">
+ <exclude-output />
+ <content url="file://$MODULE_DIR$" />
+ <orderEntry type="inheritedJdk" />
+ <orderEntry type="sourceFolder" forTests="false" />
+ </component>
+</module>
+
--- /dev/null
+++ b/documents/datagov-export-groups.py
@@ -1,1 +1,81 @@
+import ckanclient
+import couchdb
+from ckanclient import CkanApiError
+import re
+
+class LoaderError(Exception):
+ pass
+
+# Instantiate the CKAN client.
+#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+ api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
+# https://github.com/okfn/ckanext-importlib
+def munge(name):
+ # convert spaces to underscores
+ name = re.sub(' ', '_', name).lower()
+ # convert symbols to dashes
+ name = re.sub('[:]', '_-', name).lower()
+ name = re.sub('[/]', '-', name).lower()
+ # take out not-allowed characters
+ name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
+ # remove double underscores
+ name = re.sub('__', '_', name).lower()
+ return name
+
+
+def name_munge(input_name):
+ return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+
+
+docsdb = couch['disclosr-documents']
+
+if __name__ == "__main__":
+ groups = {}
+ for doc in docsdb.view('app/datasetGroups'):
+ group_name = doc.key
+ if group_name != "Not specified":
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ if group_name in groups.keys():
+ groups[group_name] = list(set(groups[group_name] + [pkg_name]))
+ else:
+ groups[group_name] = [pkg_name]
+
+ # add dataset to group(s)
+ for group_name in groups.keys():
+ if group_name != "Not specified":
+ group_url = name_munge(group_name[:100])
+ print group_name
+ print groups[group_name]
+ try:
+ # Update the group details
+ group_entity = ckan.group_entity_get(group_url)
+ print "group "+group_name+" exists"
+ if 'packages' in group_entity.keys():
+ group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
+ else:
+ group_entity['packages'] = groups[group_name]
+ ckan.group_entity_put(group_entity)
+ except CkanApiError, e:
+ if ckan.last_status == 404:
+ print "group "+group_name+" does not exist, creating"
+ group_entity = {
+ 'name': group_url,
+ 'title': group_name,
+ 'description': group_name,
+ 'packages': groups[group_name]
+ }
+ #print group_entity
+ ckan.group_register_post(group_entity)
+ elif ckan.last_status == 409:
+ print "group already exists"
+ else:
+ raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+ ckan.last_status, pkg_name, e.args))
+
--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -1,19 +1,92 @@
+# coding=utf-8
import ckanclient
import couchdb
from ckanclient import CkanApiError
import re
import html2text # aaronsw :(
+import ckanapi # https://github.com/open-data/ckanapi
+import scrape
+import datetime, os, hashlib
class LoaderError(Exception):
pass
+
+def add_package_resource_cachedurl(ckan, package_name, url, name, format, size, **kwargs):
+ # fileupload
+ ts = datetime.datetime.isoformat(datetime.datetime.now()).replace(':', '').split('.')[0]
+
+ file_key = os.path.join(ts, name)
+
+ auth_dict = ckan.storage_auth_get('/form/' + file_key, {})
+
+ fields = [(kv['name'].encode('ascii'), kv['value'].encode('ascii'))
+ for kv in auth_dict['fields']]
+ (url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
+ url, "dataset_resource", "AGIMO", False)
+
+ files = [('file', os.path.basename(file_key), content)]
+
+ errcode, body = ckan._post_multipart(auth_dict['action'].encode('ascii'), fields, files)
+
+ if errcode == 200:
+ file_metadata = ckan.storage_metadata_get(file_key)
+ (url, msg) = file_metadata['_location'], ''
+ else:
+ (url, msg) = '', body
+ # fileupload done
+
+ if url == '':
+ raise CkanApiError(msg)
+ m = hashlib.sha1(msg)
+ #todo mime-type dectection based on content
+ r = dict(name=name,
+ mimetype=mime_type,
+ hash=m.hexdigest(), size=size, url=url)
+
+ r.update(kwargs)
+ if not r.has_key('name'): r['name'] = url
+
+ p = ckan.package_entity_get(package_name)
+ p['resources'].append(r)
+ return ckan.package_entity_put(p)
+
+
# Instantiate the CKAN client.
-#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
- api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
+api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
+server = 'data.disclosurelo.gs'
+
+ckan = ckanclient.CkanClient(base_location='http://' + server + '/api',
+ api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://' + server, api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
+import urllib
+import urlparse
+
+
+def url_fix(s, charset='utf-8'):
+ """Sometimes you get an URL by a user that just isn't a real
+ URL because it contains unsafe characters like ' ' and so on. This
+ function can fix some of the problems in a similar way browsers
+ handle data entered by the user:
+
+ >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
+ 'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
+
+ :param charset: The target charset for the URL if the url was
+ given as unicode string.
+ """
+ if isinstance(s, unicode):
+ s = s.encode(charset, 'ignore')
+ if not urlparse.urlparse(s).scheme:
+ s = "http://" + s
+ scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
+ path = urllib.quote(path, '/%')
+ qs = urllib.quote_plus(qs, ':&=')
+ return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = {
@@ -24,6 +97,7 @@
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
+
def human2bytes(s):
"""
@@ -53,6 +127,9 @@
...
ValueError: can't interpret '12 foo'
"""
+ if s == None:
+ return 0
+ s = s.replace(',', '')
init = s
num = ""
while s and s[0:1].isdigit() or s[0:1] == '.':
@@ -91,7 +168,6 @@
def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
- #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
def get_licence_id(licencename):
@@ -112,105 +188,142 @@
raise Exception(licencename + " not found");
return map[licencename];
+
+gooddata = ["afl-in-victoria", "annual-budget-initiatives-by-suburb-brisbane-city-council"]
+#athletics-in-victoria-gfyl,bicycle-racks-mosman-municipal-council,boat-ramps-brisbane-city-council,brisbane-access-ratings-database,bus-stops-brisbane-city-council,cemeteries-brisbane-city-council,cfa-locations,citycycle-stations-brisbane-city-council,community-gardens-brisbane-city-council,community-halls-brisbane-city-council,cooking-classes-gfyl,court-locations-victoria,customer-service-centres-brisbane-city-council,dance-in-victoria-gfyl,disability-activity-gfyl,dog-parks-brisbane-city-council,ferry-terminals-brisbane-city-council,fishing-club-in-victoria-gfyl,fitness-centres-in-victoria-gfyl,gardens-reserves-gfyl,golf-courses-brisbane-city-council,gymnastics-in-victoria-gfyl,historic-cemeteries-brisbane-city-council,ice-skating-centres-gfyl,immunisation-clinics-brisbane-city-council,libraries-brisbane-city-council,licenced-venues-victoria,lifesaving-locations-victoria,loading-zones-brisbane-city-council,major-projects-victoria,markets-in-victoria,martial-arts-in-victoria-gfyl,melbourne-water-use-by-postcode,members-of-parliament-both-houses-nsw,members-of-the-legislative-assembly-nsw,members-of-the-legislative-council-nsw,mfb-locations-vic,ministers-of-the-nsw-parliament,mosman-local-government-area,mosman-rider-route,mosman-wwii-honour-roll,neighbourhood-houses-gfyl,news-feeds-mosman-municipal-council,off-street-car-parks-mosman-municipal-council,orienteering-clubs-gfyl,parking-meter-areas-brisbane-city-council,parks-and-reserves-mosman-municipal-council,parks-brisbane-city-council,personal-training-gfyl,picnic-areas-brisbane-city-council,playgrounds-brisbane-city-council,playgrounds-mosman-municipal-council,police-region-crime-statistics-victoria,police-service-area-crime-statistics-victoria,pony-clubs-in-victoria-gfyl,prison-locations-victoria,public-amenities-maintained-by-mosman-council,public-art-brisbane-city-council,public-internet-locations-vic,public-toilets-brisbane-city-council,racecourse-locations-victoria,recent-development-applications-mosman-municipal-council,recreation-groups-gfyl,recreational-fishing-spots,regional-business-centres-brisbane-city-council,reports-of-swooping-birds-mosman-municipal-council,restricted-parking-areas-brisbane-city-council,rollerskating-centres-in-victoria-gfyl,sailing-clubs-gfyl,school-locations-victoria,shadow-ministers-of-the-nsw-parliament,skate-parks-gfyl,sporting-clubs-and-organisations-gfyl,stakeboard-parks-brisbane-city-council,state-bodies-gfyl,street-names-brisbane-city-council,suburbs-and-adjoining-suburbs-brisbane-city-council,swimming-pools-brisbane-city-council,swimming-pools-gfyl,tennis-courts-brisbane-city-council,top-40-book-club-reads-brisbane-city-council,tracks-and-trails-gfyl,triathlon-clubs-gfyl,urban-water-restrictions-victoria,veterinary-services-in-mosman,victorian-microbreweries,volunteering-centres-services-and-groups-victoria,walking-groups-gfyl,ward-offices-brisbane-city-council,waste-collection-days-brisbane-city-council,waste-transfer-stations-brisbane-city-council,water-consumption-in-melbourne,water-sports-in-victoria-gfyl,wifi-hot-spots-brisbane-city-council,yoga-pilates-and-tai-chi-in-victoria-gfyl,2809cycling-in-new-south-wales-what-the-data-tells-us2809-and-related-data,act-barbecue-bbq-locations,act-tafe-locations,ausindustry-locations,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,austender-contract-notice-export,australian-gas-light-company-maps,australian-gas-light-company-maps,australian-ports,australian-public-service-statistical-bulletin-2011-12,australian-public-service-statistical-bulletin-snapshot-at-december-31-2011,australian-public-service-statistical-bulletin-tables-0910,austrics-timetable-set,capital-works-call-tender-schedule,collection-item-usage-state-library-of-victoria,country-and-commodity-trade-data-spreadsheet,country-and-commodity-trade-data-spreadsheet-2,country-by-level-of-processing-trade-data-spreadsheet,crime-incident-type-and-frequency-by-capital-city-and-nationally,csiro-locations,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,data-from-the-oaic-public-sector-information-survey-2012,department-of-finance-and-deregulation-office-locations,digitised-maps,diisr-division-locations-excluding-ausindustry-enterprise-connect-and-nmi,diisr-locations,diisr-portfolio-agency-locations-excluding-csiro,distance-to-legal-service-providers-from-disadvantaged-suburbs,enterprise-connect-locations,fire-insurance-maps-sydney-block-plans-1919-1940,fire-insurance-maps-sydney-block-plans-1919-1940,first-fleet-collection,first-fleet-collection,first-fleet-maps,first-fleet-maps,freedom-of-information-annual-estimated-costs-and-staff-time-statistical-data-2011-12,freedom-of-information-quarterly-request-and-review-statistical-data-2011-12,freedom-of-information-requests-estimated-costs-and-charges-collected-1982-83-to-2011-12,higher-education-course-completions,higher-education-enrolments,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,historical-australian-government-contract-data,journey-planner-data-nt,library-catalogue-search-terms-state-library-of-victoria,location-of-act-schools,location-of-centrelink-offices,location-of-european-wasps-nests,location-of-lawyers-and-legal-service-providers-by-town,location-of-legal-assistance-service-providers,location-of-medicare-offices,location-of-medicare-offices,maps-of-the-southern-hemisphere-16th-18th-centuries,maps-of-the-southern-hemisphere-16th-18th-centuries,music-queensland,national-measurement-institute-locations,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,new-south-wales-officers-and-men-of-the-australian-imperial-force-a-i-f-and-the-australian-naval-for,photographs-of-nsw-life-pre-1955,photographs-of-nsw-life-pre-1955,photographs-of-sydney-before-1885,photographs-of-sydney-before-1885,picture-queensland,plgr-28093-playgrounds-act,police-station-locations,queensland-public-libraries,rare-printed-books,rare-printed-books,real-estate-maps,regional-australia-funding-projects,sa-memory-state-library-of-south-australia,search-engine-terms-state-library-of-victoria,south-australian-photographs-state-library-of-south-australia,south-australian-sheet-music-state-library-of-south-australia,sydney-bond-store-maps-1894,sydney-bond-store-maps-1894,sydney-maps-1917,sydney-maps-1917,tafe-institute-locations-victoria,tafe-sa-campus-locations,tolt-public-toilets-act,victorian-public-library-branches-state-library-of-victoria,western-australia-public-library-network,world-war-one-photographs-by-frank-hurley,world-war-one-photographs-by-frank-hurley,citycat-timetables-brisbane-city-council,cityferry-timetables-brisbane-city-council,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,cost-of-salinity-to-local-infrastructure-1996-97-summary-of-component-costs-of-salinity-by-reporting,downstream-cost-calculator-model-and-data-for-199697-or-2001-prices,economics-of-australian-soil-conditions-199697-limiting-factor-or-relative-yield-min-of-ry_salt2000-,geographical-names-register-gnr-of-nsw,victorian-dryland-salinity-assessment-2000-d01cac_ramsar_final-xls,victorian-dryland-salinity-assessment-2000-d02cac_fauna_final-xls,victorian-dryland-salinity-assessment-2000-d03cac_fauna_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc04cac_hydrol_final-xls,victorian-dryland-salinity-assessment-2000-dc05cac_wetland_final-xls,victorian-dryland-salinity-assessment-2000-dc06cac_util_final-xls,victorian-dryland-salinity-assessment-2000-dc07cac_road_final-xls,victorian-dryland-salinity-assessment-2000-dc08cac_towns_final-xls,victorian-dryland-salinity-assessment-2000-dc09cac_flora_final-xls,victorian-dryland-salinity-assessment-2000-dc10cac_flora_dist_final-xls,victorian-dryland-salinity-assessment-2000-dc12cac_infrastructure-xls,victorian-dryland-salinity-assessment-2000-dc13cac_natural_envt-xls,victorian-dryland-salinity-assessment-2000-dc14cac_agriculture-xls,victorian-dryland-salinity-assessment-2000-dc16cac_agric_cost-xls,victorian-dryland-salinity-assessment-2000-dc17cac_shallow_wt-xls,victorian-dryland-salinity-assessment-2000-dc18cac_agric_cost_time-xls,victorian-dryland-salinity-assessment-2000-dc21cac_water_resources_new-xls,victorian-dryland-salinity-assessment-2000-dc22cac_risk-xls,licensed-broadcasting-transmitter-data,nsw-crime-data,recorded-crime-dataset-nsw,crime-statistics-in-nsw-by-month,2001-02-to-2007-08-local-government-survey-victoria,2009-green-light-report,annual-statistical-reports-fire-brigades-nsw-200304,annual-statistical-reports-fire-brigades-nsw-200405,annual-statistical-reports-fire-brigades-nsw-200506,annual-statistical-reports-fire-brigades-nsw-200607,arts-on-the-map,assets-and-liabilities-of-australian-located-operations,assets-of-australian-located-operations,assets-of-australian-located-operations-by-country,assets-of-financial-institutions,back-issues-of-monthly-banking-statistics,banks-assets,banks-consolidated-group-capital,banks-consolidated-group-impaired-assets,banks-consolidated-group-off-balance-sheet-business,banks-liabilities,building-societies-selected-assets-and-liabilities,byteback2842-locations-vic,cash-management-trusts,city-of-melbourne-street-furniture-database,community-services-nsw,consolidated-exposures-immediate-and-ultimate-risk-basis,consolidated-exposures-immediate-risk-basis-foreign-claims-by-country,consolidated-exposures-immediate-risk-basis-international-claims-by-country,consolidated-exposures-ultimate-risk-basis,consolidated-exposures-ultimate-risk-basis-foreign-claims-by-country,cosolidated-exposures-immediate-risk-basis,credit-unions-selected-assets-and-liabilities,daily-net-foreign-exchange-transactions,detox-your-home,education-national-assessment-program-literacy-and-numeracy-nsw,employment-data-by-nsw-regions,excise-beer-clearance-data-updated-each-month-beer-clearance-summary-data,finance-companies-and-general-financiers-selected-assets-and-liabilities,foreign-exchange-transactions-and-holdings-of-official-reserve-assets,half-yearly-life-insurance-bulletin-december-2010,health-behaviours-in-nsw,international-liabilities-by-country-of-the-australian-located-operations-of-banks-and-rfcs,liabilities-and-assets-monthly,liabilities-and-assets-weekly,liabilities-of-australian-located-operations,life-insurance-offices-statutory-funds,managed-funds,monetary-policy-changes,money-market-corporations-selected-assets-and-liabilities,monthly-airport-traffic-data-for-top-ten-airports-january-1985-to-december-2008,monthly-banking-statistics-april-2011,monthly-banking-statistics-june-2011,monthly-banking-statistics-may-2011,open-market-operations-2009-to-current,projected-households-vic-rvic-msd-2006-2056,projected-population-by-age-and-sex-vic-rvic-msd-2006-2056,public-unit-trust,quarterly-bank-performance-statistics,quarterly-general-insurance-performance-statistics-march-2011,quarterly-superannuation-performance-march-2011,recorded-crime-dataset-nsw,residential-land-bulletin,resourcesmart-retailers,resourcesmart-retailers-vic,road-fatalities-nsw,securitisation-vehicles,selected-asset-and-liabilities-of-the-private-non-financial-sectors,seperannuation-funds-outside-life-offices,solar-report-vic,towns-in-time-victoria,vif2008-projected-population-by-5-year-age-groups-and-sex-sla-lga-ssd-sd-2006-2026,vif2008-projected-population-totals-and-components-vic-rvic-msd-2006-2056,vif2008-projected-population-totals-sla-lga-ssd-sd-2006-2026,arts-festivals-victoria,arts-organisations-victoria,arts-spaces-and-places-victoria,ausgrid-average-electricity-use,collecting-institutions-victoria,indigenous-arts-organisations-victoria,latest-coastal-weather-observations-for-coolangatta-qld,top-10-fiction-books-brisbane-city-council];
+
+
docsdb = couch['disclosr-documents']
if __name__ == "__main__":
+ orgs_list = []
+ orgs_ids = {}
for doc in docsdb.view('app/datasets'):
+ print " --- "
print doc.id
- if doc.value['url'] != "http://data.gov.au/data/":
+
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+
+
# Collect the package metadata.
- pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
- tags = []
- if len(doc.value['metadata']["Keywords / Tags"]) > 0:
- if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
- tags = tags + doc.value['metadata']["Keywords / Tags"]
- else:
- tags = tags + [doc.value['metadata']["Keywords / Tags"]]
- if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0:
- if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'):
- tags = tags + doc.value['metadata']['data.gov.au Category']
- else:
- tags = tags + [doc.value['metadata']['data.gov.au Category']]
- tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
- print tags
- package_entity = {
- 'name': pkg_name,
- 'title': doc.value['metadata']['DCTERMS.Title'],
- 'url': doc.value['metadata']['DCTERMS.Source.URI'],
- 'tags': tags, #tags are mandatory?
- 'author': doc.value['metadata']["DCTERMS.Creator"],
- 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
- 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
- 'notes': html2text.html2text(doc.value['metadata']['Description']),
- }
-
- try:
- print package_entity
- ckan.package_register_post(package_entity)
- except CkanApiError, e:
- if ckan.last_status == 409:
- print "package already exists"
- else:
- raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
- ckan.last_status, pkg_name, e.args))
-
-
- #add to group
-
- group_name = name_munge(doc.value['metadata']["Agency"][:100])
- try:
- print ckan.group_entity_get(group_name)
-
- # Update the group details
- group_entity = ckan.last_message
- print "group exists"
- if 'packages' in group_entity.keys():
- group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name]))
- else:
- group_entity['packages'] = [pkg_name]
- ckan.group_entity_put(group_entity)
- except CkanApiError, e:
- if ckan.last_status == 404:
- print "group does not exist, creating"
- group_entity = {
- 'name': group_name,
- 'title': doc.value['metadata']["Agency"],
- 'description': doc.value['metadata']["Agency"],
- 'packages': [pkg_name],
- # 'type': "organization" # not allowed via API, use database query
- # update "group" set type = 'organization';
- }
- print group_entity
- ckan.group_register_post(group_entity)
- else:
- raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
- ckan.last_status, pkg_name, e.args))
- if 'Download' in doc.value['metadata'].keys():
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ print pkg_name
+ if pkg_name in gooddata:
+
+ #add to or create organization using direct API
+ agency = doc.value['metadata']["Agency"]
+ if agency == "APS":
+ agency = "Australian Public Service Commission"
+ if agency == "Shared Services, Treasury Directorate":
+ agency = "Shared Services Procurement, Treasury Directorate"
+ if agency == "Treasury - Shared Services":
+ agency = "Shared Services Procurement, Treasury Directorate"
+ if agency == "Territory and Municipal Services (TAMS)":
+ agency = "Territory and Municipal Services Directorate"
+ if agency == "State Library of NSW":
+ agency = "State Library of New South Wales"
+ org_name = name_munge(agency[:100])
+ if org_name not in orgs_list:
+ orgs_list = ckandirect.action.organization_list()['result']
+ #print orgs_list
+ if org_name not in orgs_list:
+ try:
+ print "org not found, creating " + org_name
+ ckandirect.action.organization_create(name=org_name, title=agency,
+ description=agency)
+ orgs_list.append(org_name)
+ except ckanapi.ValidationError, e:
+ print e
+ raise LoaderError('Unexpected status')
+ else:
+ print "org found, adding dataset to " + org_name
+
+ # cache org names -> id mapping
+ if org_name not in orgs_ids:
+ org = ckandirect.action.organization_show(id=org_name)
+ orgs_ids[org_name] = org["result"]["id"]
+ org_id = orgs_ids[org_name]
+ print "org id is " + org_id
+ tags = []
+ creator = doc.value['metadata']["DCTERMS.Creator"]
+ if doc.value['agencyID'] == "AGIMO":
+ if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+ if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+ tags = tags + doc.value['metadata']["Keywords / Tags"]
+ else:
+ tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+
+ tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
+ #print tags
+ extras = []
+
+ for extra_key in doc.value['metadata'].keys():
+ if extra_key not in ["Description", "Content-Language", "DCTERMS.Description",
+ "Keywords / Tags",
+ "data.gov.au Category", "Download", "Permalink", "DCTERMS.Identifier"]:
+ if doc.value['metadata'][extra_key] != None and doc.value['metadata'][extra_key] != "":
+ extras.append([extra_key, doc.value['metadata'][extra_key]])
+
+ package_entity = {
+ 'name': pkg_name,
+ 'title': doc.value['metadata']['DCTERMS.Title'],
+ 'url': doc.value['metadata']['DCTERMS.Source.URI'],
+ 'tags': tags, #tags are mandatory?
+ 'author': creator,
+ 'maintainer': creator,
+ 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+ 'notes': html2text.html2text(doc.value['metadata']['Description']),
+ 'owner_org': org_id,
+ 'extras': extras
+ }
+
try:
- pkg = ckan.package_entity_get(pkg_name)
- resources = pkg.get('resources', [])
- if len(resources) < len(doc.value['metadata']['Download']):
- for resource in doc.value['metadata']['Download']:
-
- # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
- # (KML/KMZ) / (Shapefile) /(Other)
- format = "plain"
- if resource['format'] == '(XML)':
- format = 'xml'
- if resource['format'] == '(CSV/XLS)':
- format = 'csv'
- name = resource['href']
- if 'name' in resource.keys():
- name = resource['name']
- print resource
- ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
- format=format, size=human2bytes(resource['size'].replace(',', '')))
+ #print package_entity
+ ckan.package_register_post(package_entity)
+ except CkanApiError, e:
+ if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
+ print "package already exists"
else:
- print "resources already exist"
- except CkanApiError, e:
- if ckan.last_status == 404:
- print "parent dataset does not exist"
- else:
+ print ckan.last_message
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args))
-
+ pkg = ckan.package_entity_get(pkg_name)
+
+
+ # add resources (downloadable data files)
+ if 'Download' in doc.value['metadata'].keys():
+ try:
+
+ resources = pkg.get('resources', [])
+ if len(resources) < len(doc.value['metadata']['Download']):
+ for resource in doc.value['metadata']['Download']:
+
+ # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
+ # (KML/KMZ) / (Shapefile) /(Other)
+ format = "plain"
+ if resource['format'] == '(XML)':
+ format = 'xml'
+ if resource['format'] == '(CSV/XLS)':
+ format = 'csv'
+ if resource['format'] == '(Shapefile)':
+ format = 'shp'
+ if resource['format'] == '(KML/KMZ)':
+ format = 'kml'
+ name = resource['href']
+ if 'name' in resource.keys():
+ name = resource['name']
+ print resource
+ add_package_resource_cachedurl(ckan, pkg_name, url_fix(resource['href']), name,
+ format,
+ human2bytes(resource.get('size', '0B')),
+ resource_type='data')
+ else:
+ print "resources already exist"
+ except CkanApiError, e:
+ if ckan.last_status == 404:
+ print "parent dataset does not exist"
+ else:
+ raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
+ ckan.last_status, pkg_name, e.args))
+
--- /dev/null
+++ b/documents/datagov-merge.php
@@ -1,1 +1,26 @@
+<?php
+include_once("../include/common.inc.php");
+
+
+setlocale(LC_CTYPE, 'C');
+
+$db = $server->get_db('disclosr-documents');
+$datasets = Array();
+try {
+ $rows = $db->get_view("app", "datasets", null, true)->rows;
+
+ foreach ($rows as $row) {
+ //print_r($row);
+ if ($row->value->url != "http://data.gov.au/data/")
+ $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+ksort($datasets);
+foreach ($datasets as $datasetname => $datasetkey) {
+ print "$datasetname => $datasetkey<br>\n";
+}
+?>
+
--- /dev/null
+++ b/documents/datagov-resourcereport.py
@@ -1,1 +1,81 @@
+import couchdb
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+import urllib
+import urlparse
+import httplib2
+import httplib
+import csv
+
+
+def url_fix(s, charset='utf-8'):
+ """Sometimes you get an URL by a user that just isn't a real
+ URL because it contains unsafe characters like ' ' and so on. This
+ function can fix some of the problems in a similar way browsers
+ handle data entered by the user:
+
+ :param charset: The target charset for the URL if the url was
+ given as unicode string.
+ """
+ if isinstance(s, unicode):
+ s = s.encode(charset, 'ignore')
+ if not urlparse.urlparse(s).scheme:
+ s = "http://"+s
+ scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
+ path = urllib.quote(path, '/%')
+ qs = urllib.quote_plus(qs, ':&=')
+ return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
+
+# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
+SYMBOLS = {
+ 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
+ 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
+ 'zetta', 'iotta'),
+ 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
+ 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
+ 'zebi', 'yobi'),
+}
+
+
+docsdb = couch['disclosr-documents']
+out = csv.writer(open("output.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
+if __name__ == "__main__":
+ for doc in docsdb.view('app/datasets'):
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+ # Collect the package metadata.
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ if 'Download' in doc.value['metadata'].keys() and len(doc.value['metadata']['Download']) > 0:
+ for resource in doc.value['metadata']['Download']:
+ # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
+ # (KML/KMZ) / (Shapefile) /(Other)
+ format = "plain"
+ if resource['format'] == '(XML)':
+ format = 'xml'
+ if resource['format'] == '(CSV/XLS)':
+ format = 'csv'
+ if resource['format'] == '(Shapefile)':
+ format = 'shp'
+ if resource['format'] == '(KML/KMZ)':
+ format = 'kml'
+ name = resource['href']
+ if 'name' in resource.keys():
+ name = resource['name']
+ if resource['href'].startswith("ftp"):
+ out.writerow([pkg_name, url_fix(resource['href']), name,format, "ftp", ""])
+ else:
+ try:
+ h = httplib2.Http(disable_ssl_certificate_validation=True)
+ resp = h.request(url_fix(resource['href']), 'HEAD')
+ content_type = resp[0]['content-type'] if 'content-type' in resp[0].keys() else ""
+ out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, resp[0]['status'], content_type])
+ except httplib2.ServerNotFoundError:
+ out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, "500","badurl"])
+ except httplib.InvalidURL:
+ out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, "500","badurl"])
+ except httplib2.RelativeURIError:
+ out.writerow([pkg_name.encode('ascii', 'ignore'), url_fix(resource['href']).encode('ascii', 'ignore'), name.encode('ascii', 'ignore'),format, "500","badurl"])
+ else:
+ out.writerow([pkg_name.encode('ascii', 'ignore')])
+
--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -39,7 +39,9 @@
link = item.find("a")
format = item.find(property="dc:format")
linkobj = {"href":link['href'].replace("/bye?","").strip(),
- "format": format.string.strip(), "size": format.next_sibling.string.strip()}
+ "format": format.string.strip()}
+ if format.next_sibling.string != None:
+ linkobj["size"] = format.next_sibling.string.strip()
if link.string != None:
linkobj["name"] = link.string.strip()
doc['metadata'][last_title].append(linkobj)
--- /dev/null
+++ b/documents/dataqld.py
@@ -1,1 +1,28 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
+from unidecode import unidecode
+import ckanclient
+
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
+
+# Get the package list.
+package_list = ckan.package_register_get()
+for package_name in package_list:
+# Get the details of a package.
+ (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
+ hash = scrape.mkhash(scrape.canonurl(url))
+ print hash
+ doc = scrape.docsdb.get(hash)
+ if "metadata" not in doc.keys() or True:
+ ckan.package_entity_get(package_name)
+ package_entity = ckan.last_message
+ doc['type'] = "dataset"
+ doc['metadata'] = package_entity
+ print package_entity
+ scrape.docsdb.save(doc)
+
--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -5,20 +5,53 @@
from unidecode import unidecode
-listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960"
-(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
- listurl, "gazette", "AGD")
-soup = BeautifulSoup(listhtml)
-for row in soup.find_all('tr'):
- if row.has_key('valign'):
- for col in tr.find_all('td'):
- print col.string
- #url = scrape.fullurl(listurl, atag['href'])
- #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
- # url, "data", "AGIMO")
- #hash = scrape.mkhash(scrape.canonurl(url))
- #doc = scrape.docsdb.get(hash)
- #print doc['metadata']
- #scrape.docsdb.save(doc)
- #time.sleep(2)
+items = 3950
+items = 1
+while True:
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
+ (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+ listurl, "gazette", "AGD", False)
+ for line in listhtml.split('\n'):
+ soup = BeautifulSoup(line)
+ #print line
+ for row in soup.find_all('tr'):
+ print line
+ if row.has_key('valign'):
+ i = 0
+ date = ""
+ id = ""
+ type = ""
+ description = ""
+ name = ""
+ url = ""
+ for col in soup.find_all('td'):
+ #print ''.join(col.stripped_strings)
+ if i == 0:
+ date = ''.join(col.stripped_strings)
+ if i == 1:
+ id = ''.join(col.stripped_strings)
+ if i == 2:
+ type = ''.join(col.stripped_strings)
+ if i == 3:
+ description = ''.join(col.stripped_strings)
+ for link in col.findAll('a'):
+ if link.has_key("href"):
+ url = link['href']
+ name = ''.join(link.stripped_strings)
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ print [date, id, type, description, name, url]
+ itemurl = scrape.fullurl(listurl, url)
+ (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ itemurl, "gazette", "AGD", False)
+ hash = scrape.mkhash(scrape.canonurl(itemurl))
+ doc = scrape.docsdb.get(hash)
+ doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
+ scrape.docsdb.save(doc)
+ #time.sleep(2)
+ i = i + 1;
+ items = items - 25
+ if items <= 0:
+ break
+
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -72,7 +72,8 @@
edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash,
- "date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description), "diff": diff}
+ "date": edate, "title": "Disclosure Log Updated",
+ "description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
foidocsdb.save(doc)
else:
print "already saved"
@@ -199,11 +200,16 @@
return table.find_all('tr')
def getDate(self, content, entry, doc):
- date = ''.join(content.stripped_strings).strip()
- (a, b, c) = date.partition("(")
- date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
- print date
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ strdate = ''.join(content.stripped_strings).strip()
+ (a, b, c) = strdate.partition("(")
+ strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
+ print strdate
+ try:
+ edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ except ValueError:
+ print >> sys.stderr, "ERROR date invalid %s " % strdate
+ print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip()
+ edate = date.today().strftime("%Y-%m-%d")
print edate
doc.update({'date': edate})
return
@@ -266,8 +272,7 @@
'Summary of FOIrequest received by agency/minister',
'Summary of FOI request received', 'Description of FOI Request',
"FOI request", 'Results 1 to 67 of 67']
- if doc['title'] not in badtitles\
- and doc['description'] != '':
+ if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '':
print "saving"
foidocsdb.save(doc)
else:
@@ -277,6 +282,6 @@
print "header row"
else:
- print "ERROR number of columns incorrect"
+ print >> sys.stderr, "ERROR number of columns incorrect"
print row
--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,10 +1,22 @@
-for f in scrapers/*.py;
- do echo "Processing $f file..";
- python $f;
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd $DIR
+echo "" > /tmp/disclosr-error
+for f in scrapers/*.py; do
+ echo "Processing $f file..";
+ md5=`md5sum /tmp/disclosr-error`
+ python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error;
+ md52=`md5sum /tmp/disclosr-error`
+ if [ "$md5" != "$md52" ]; then
+ echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error;
+ fi
if [ "$?" -ne "0" ]; then
echo "error";
- sleep 2;
+ sleep 1;
fi
done
+if [ -s /tmp/disclosr-error ] ; then
+ echo "emailling logs..";
+ mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
+fi
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -7,14 +7,15 @@
from urlparse import urljoin
import time
import os
+import sys
import mimetypes
import urllib
import urlparse
import socket
#couch = couchdb.Server('http://192.168.1.148:5984/')
-couch = couchdb.Server('http://192.168.1.113:5984/')
-#couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
def mkhash(input):
@@ -89,7 +90,7 @@
def getLastAttachment(docsdb, url):
hash = mkhash(url)
doc = docsdb.get(hash)
- if doc != None:
+ if doc != None and "_attachments" in doc.keys():
last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
return last_attachment
@@ -103,7 +104,7 @@
req = urllib2.Request(url)
print "Fetching %s (%s)" % (url, hash)
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
- print "Not a valid HTTP url"
+ print >> sys.stderr, "Not a valid HTTP url"
return (None, None, None)
doc = docsdb.get(hash)
if doc == None:
@@ -111,10 +112,15 @@
else:
if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
print "Uh oh, trying to scrape URL again too soon!" + hash
- last_attachment_fname = doc["_attachments"].keys()[-1]
- last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
- content = last_attachment
- return (doc['url'], doc['mime_type'], content.read())
+ if "_attachments" in doc.keys():
+ last_attachment_fname = doc["_attachments"].keys()[-1]
+ last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
+ content = last_attachment.read()
+ mime_type = doc['mime_type']
+ else:
+ content = None
+ mime_type = None
+ return (doc['url'], mime_type, content)
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags
@@ -159,13 +165,13 @@
#store as attachment epoch-filename
except (urllib2.URLError, socket.timeout) as e:
- print "error!"
+ print >> sys.stderr,"error!"
error = ""
if hasattr(e, 'reason'):
error = "error %s in downloading %s" % (str(e.reason), url)
elif hasattr(e, 'code'):
error = "error %s in downloading %s" % (e.code, url)
- print error
+ print >> sys.stderr, error
doc['error'] = error
docsdb.save(doc)
return (None, None, None)
--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -42,7 +42,6 @@
'data': {'request': '', 'session': '', 'more': ''}
}
-
- amonpy.exception(data)
+ #amonpy.exception(data)
pass
--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -18,13 +18,13 @@
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent)
- for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'):
+ rowtitle = soup.find(class_ = "wc-title").find("h1").string
+ if rowtitle != None:
+ description = rowtitle + ": "
+ for row in soup.find(class_ ="wc-content").find_all('td'):
if row != None:
- rowtitle = row.find('th').string
- if rowtitle != None:
- description = description + "\n" + rowtitle + ": "
- for text in row.find('td').stripped_strings:
- description = description + text
+ for text in row.stripped_strings:
+ description = description + text + "\n"
for atag in row.find_all("a"):
if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href']))
@@ -37,7 +37,7 @@
def getColumnCount(self):
return 2
def getTable(self,soup):
- return soup.find(class_ = "ms-rteTable-GreyAlternating")
+ return soup.find(class_ = "ms-rteTable-default")
def getColumns(self,columns):
(date, title) = columns
return (title, date, title, title, None)
--- a/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
+++ b/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py
@@ -7,7 +7,7 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):
- return soup.find(id = "inner_content")
+ return soup.find(class_="tborder")
def getColumnCount(self):
return 2
def getColumns(self,columns):
--- a/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
+++ b/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py
@@ -8,40 +8,14 @@
from datetime import *
#http://www.doughellmann.com/PyMOTW/abc/
-class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(class_ = "inner-column").table
- def getRows(self,table):
- return table.tbody.find_all('tr',recursive=False)
+class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper):
def getColumnCount(self):
- return 3
- def getColumns(self,columns):
- (date, title, description) = columns
- return (date, date, title, description, None)
- def getDate(self, content, entry, doc):
- i = 0
- date = ""
- for string in content.stripped_strings:
- if i ==1:
- date = string
- i = i+1
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
- print edate
- doc.update({'date': edate})
- return
- def getTitle(self, content, entry, doc):
- i = 0
- title = ""
- for string in content.stripped_strings:
- if i < 2:
- title = title + string
- i = i+1
- doc.update({'title': title})
- #print title
- return
+ return 0
if __name__ == '__main__':
- print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
- print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2012-13.aspx
+#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2011-12.aspx
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericHTMLDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericHTMLDisclogScraper)
ScraperImplementation().doScrape()
--- a/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
+++ b/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py
@@ -6,8 +6,6 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
- def getTable(self,soup):
- return soup.find(id = "content_div_50269").table
def getColumns(self,columns):
(id, date, title, description, notes) = columns
return (id, date, title, description, notes)
--- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
+++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py
@@ -21,11 +21,15 @@
d.make_links_absolute(base_url = self.getURL())
for table in d('table').items():
title= table('thead').text()
- print title
+ print self.remove_control_chars(title)
(idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text())
links = table('a').map(lambda i, e: pq(e).attr('href'))
description = descA+" "+descB
- edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ try:
+ edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ except ValueError:
+ edate = date.today().strftime("%Y-%m-%d")
+ pass
print edate
dochash = scrape.mkhash(self.remove_control_chars(title))
doc = foidocsdb.get(dochash)
--- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
+++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py
@@ -18,10 +18,10 @@
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent)
- for text in soup.find(id="divFullWidthColumn").stripped_strings:
+ for text in soup.find(class_ = "mainContent").stripped_strings:
description = description + text.encode('ascii', 'ignore')
- for atag in soup.find(id="divFullWidthColumn").find_all("a"):
+ for atag in soup.find(id="SortingTable").find_all("a"):
if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href']))
--- a/documents/scrapers/a687a9eaab9e10e9e118d3fd7cf0e13a.py
+++ b/documents/scrapers/a687a9eaab9e10e9e118d3fd7cf0e13a.py
@@ -7,11 +7,11 @@
#http://www.doughellmann.com/PyMOTW/abc/
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
def getTable(self,soup):
- return soup.find(id="ctl00_ContentPlaceHolderMainNoAjax_EdtrTD1494_2").table
+ return soup.find(id="int-content").table
def getColumnCount(self):
- return 4
+ return 3
def getColumns(self,columns):
- (blank,id, title,date) = columns
+ (id, title,date) = columns
return (id, date, title, title, None)
if __name__ == '__main__':
--- /dev/null
+++ b/documents/scrapers/b0ca7fddcd1c965787daea47f2d32e0a.py
@@ -1,1 +1,17 @@
+import sys,os
+sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../'))
+import genericScrapers
+import scrape
+from bs4 import BeautifulSoup
+#http://www.doughellmann.com/PyMOTW/abc/
+class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper):
+ def getColumns(self,columns):
+ (id, date, title, description, notes) = columns
+ return (id, date, title, description, notes)
+
+if __name__ == '__main__':
+ print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
+ print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper)
+ ScraperImplementation().doScrape()
+
--- a/documents/scrapers/dfd7414bb0c21a0076ab559901ae0588.py
+++ b/documents/scrapers/dfd7414bb0c21a0076ab559901ae0588.py
@@ -10,7 +10,7 @@
(id, date, title, description, notes) = columns
return (id, date, title, description, notes)
def getTable(self,soup):
- return soup.find(class_ = "content")
+ return soup.find(class_ = "simpletable")
if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)
--- a/documents/scrapers/f2ab2908d8ee56ed8d995ef4187e75e6.py
+++ b/documents/scrapers/f2ab2908d8ee56ed8d995ef4187e75e6.py
@@ -10,7 +10,7 @@
(id, date, title, description, notes) = columns
return (id, date, title, description, notes)
def getTable(self,soup):
- return soup.find(id = "content").table
+ return soup.find("table")
if __name__ == '__main__':
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)