From: Alex Sadleir Date: Thu, 11 Apr 2013 12:24:20 +0000 Subject: Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr X-Git-Url: http://maxious.lambdacomplex.org/git/?p=disclosr.git&a=commitdiff&h=5948d2a9216855b7a214dd4f5fcb82ed6af548d3 --- Merge branch 'master' of ssh://apples.lambdacomplex.org/git/disclosr Conflicts: documents/genericScrapers.py documents/runScrapers.sh Former-commit-id: a6f8697ed080934b51ab7b63a3d4428ff5ccdb2b --- --- a/documents/datagov-export.py +++ b/documents/datagov-export.py @@ -2,14 +2,80 @@ import couchdb from ckanclient import CkanApiError import re +import html2text # aaronsw :( + + class LoaderError(Exception): pass + +# Instantiate the CKAN client. +#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') +ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', + api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52') +couch = couchdb.Server('http://127.0.0.1:5984/') +#couch = couchdb.Server('http://192.168.1.113:5984/') + +# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ +SYMBOLS = { + 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), + 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', + 'zetta', 'iotta'), + 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), + 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', + 'zebi', 'yobi'), +} + +def human2bytes(s): + """ + Attempts to guess the string format based on default symbols + set and return the corresponding bytes as an integer. + When unable to recognize the format ValueError is raised. + + >>> human2bytes('0 B') + 0 + >>> human2bytes('1 K') + 1024 + >>> human2bytes('1 M') + 1048576 + >>> human2bytes('1 Gi') + 1073741824 + >>> human2bytes('1 tera') + 1099511627776 + + >>> human2bytes('0.5kilo') + 512 + >>> human2bytes('0.1 byte') + 0 + >>> human2bytes('1 k') # k is an alias for K + 1024 + >>> human2bytes('12 foo') + Traceback (most recent call last): + ... + ValueError: can't interpret '12 foo' + """ + init = s + num = "" + while s and s[0:1].isdigit() or s[0:1] == '.': + num += s[0] + s = s[1:] + num = float(num) + letter = s.strip() + for name, sset in SYMBOLS.items(): + if letter in sset: + break + else: + if letter == 'k': + # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs + sset = SYMBOLS['customary'] + letter = letter.upper() + else: + raise ValueError("can't interpret %r" % init) + prefix = {sset[0]: 1} + for i, s in enumerate(sset[1:]): + prefix[s] = 1 << (i + 1) * 10 + return int(num * prefix[letter]) + # https://github.com/okfn/ckanext-importlib -# Instantiate the CKAN client. -ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', - api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') -# (use your own api_key from http://thedatahub.org/user/me ) - def munge(name): # convert spaces to underscores name = re.sub(' ', '_', name).lower() @@ -21,38 +87,136 @@ # remove double underscores name = re.sub('__', '_', name).lower() return name + + def name_munge(input_name): return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) + #[:100] #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') -couch = couchdb.Server('http://127.0.0.1:5984/') + +def get_licence_id(licencename): + map = { + "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', + "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', + 'Otherpleasespecify': 'notspecified', + '': 'notspecified', + "Publicly available data": 'notspecified', + "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", + "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", + 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', + "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', + 'CreativeCommonsAttributionCCBY25': 'cc-by', + "PublicDomain": 'other-pd', + } + if licencename not in map.keys(): + raise Exception(licencename + " not found"); + return map[licencename]; + docsdb = couch['disclosr-documents'] if __name__ == "__main__": for doc in docsdb.view('app/datasets'): print doc.id - if doc.value['url'] != "http://data.gov.au/data/": + if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": # Collect the package metadata. - pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100]) - package_entity = { - 'name': pkg_name, - 'title': doc.value['metadata']['DCTERMS.Title'], - 'url': doc.value['metadata']['DCTERMS.Source.URI'], - 'tags': doc.value['metadata']["Keywords / Tags"], #todo must be alphanumeric characters or symbols - - 'author': doc.value['metadata']["DCTERMS.Creator"], - 'maintainer': doc.value['metadata']["DCTERMS.Creator"], - 'licence_id': doc.value['metadata']['DCTERMS.License'], - 'notes': doc.value['metadata']['Description'], + pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); + tags = [] + if doc.value['agencyID'] == "AGIMO": + if len(doc.value['metadata']["Keywords / Tags"]) > 0: + if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): + tags = tags + doc.value['metadata']["Keywords / Tags"] + else: + tags = tags + [doc.value['metadata']["Keywords / Tags"]] + if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: + if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): + tags = tags + doc.value['metadata']['data.gov.au Category'] + else: + tags = tags + [doc.value['metadata']['data.gov.au Category']] + tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] + print tags + package_entity = { + 'name': pkg_name, + 'title': doc.value['metadata']['DCTERMS.Title'], + 'url': doc.value['metadata']['DCTERMS.Source.URI'], + 'tags': tags, #tags are mandatory? + 'author': doc.value['metadata']["DCTERMS.Creator"], + 'maintainer': doc.value['metadata']["DCTERMS.Creator"], + 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), + 'notes': html2text.html2text(doc.value['metadata']['Description']), } + if doc.value['agencyID'] == "qld": + package_entity = doc.value['metadata'] + try: + print package_entity ckan.package_register_post(package_entity) except CkanApiError, e: if ckan.last_status == 409: - print "already exists" + print "package already exists" else: - raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (ckan.last_status, doc.id, e.args)) - - print package_entity - ckan.add_package_resource(pkg_name, 'http://example.org/', name='Foo', resource_type='data', format='csv') - + raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( + ckan.last_status, pkg_name, e.args)) + + + #add to group + + group_name = name_munge(doc.value['metadata']["Agency"][:100]) + try: + print ckan.group_entity_get(group_name) + + # Update the group details + group_entity = ckan.last_message + print "group exists" + if 'packages' in group_entity.keys(): + group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) + else: + group_entity['packages'] = [pkg_name] + ckan.group_entity_put(group_entity) + except CkanApiError, e: + if ckan.last_status == 404: + print "group does not exist, creating" + group_entity = { + 'name': group_name, + 'title': doc.value['metadata']["Agency"], + 'description': doc.value['metadata']["Agency"], + 'packages': [pkg_name], + # 'type': "organization" # not allowed via API, use database query + # update "group" set type = 'organization'; + } + print group_entity + ckan.group_register_post(group_entity) + elif ckan.last_status == 409: + print "group already exists" + else: + raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( + ckan.last_status, pkg_name, e.args)) + if 'Download' in doc.value['metadata'].keys(): + try: + pkg = ckan.package_entity_get(pkg_name) + resources = pkg.get('resources', []) + if len(resources) < len(doc.value['metadata']['Download']): + for resource in doc.value['metadata']['Download']: + + # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html + # (KML/KMZ) / (Shapefile) /(Other) + format = "plain" + if resource['format'] == '(XML)': + format = 'xml' + if resource['format'] == '(CSV/XLS)': + format = 'csv' + name = resource['href'] + if 'name' in resource.keys(): + name = resource['name'] + print resource + ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', + format=format, size=human2bytes(resource['size'].replace(',', ''))) + else: + print "resources already exist" + except CkanApiError, e: + if ckan.last_status == 404: + print "parent dataset does not exist" + else: + raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( + ckan.last_status, pkg_name, e.args)) + --- /dev/null +++ b/documents/datagov-merge.php @@ -1,1 +1,26 @@ +get_db('disclosr-documents'); +$datasets = Array(); +try { + $rows = $db->get_view("app", "datasets", null, true)->rows; + + foreach ($rows as $row) { + //print_r($row); + if ($row->value->url != "http://data.gov.au/data/") + $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id; + } +} catch (SetteeRestClientException $e) { + setteErrorHandler($e); +} +ksort($datasets); +foreach ($datasets as $datasetname => $datasetkey) { + print "$datasetname => $datasetkey
\n"; +} +?> + --- a/documents/datagov.py +++ b/documents/datagov.py @@ -13,7 +13,7 @@ if atag.has_key('href'): url = scrape.fullurl(listurl, atag['href']) (url, mime_type, html) = scrape.fetchURL(scrape.docsdb, - url, "data", "AGIMO") + url, "data", "AGIMO", False) hash = scrape.mkhash(scrape.canonurl(url)) doc = scrape.docsdb.get(hash) if "metadata" not in doc.keys() or True: --- /dev/null +++ b/documents/dataqld.py @@ -1,1 +1,28 @@ +import sys, os +import time +import scrape +from bs4 import BeautifulSoup +from unidecode import unidecode +import ckanclient + +# Instantiate the CKAN client. +ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api') + +# Get the package list. +package_list = ckan.package_register_get() +for package_name in package_list: +# Get the details of a package. + (url, mime_type, html) = scrape.fetchURL(scrape.docsdb, + "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False) + hash = scrape.mkhash(scrape.canonurl(url)) + print hash + doc = scrape.docsdb.get(hash) + if "metadata" not in doc.keys() or True: + ckan.package_entity_get(package_name) + package_entity = ckan.last_message + doc['type'] = "dataset" + doc['metadata'] = package_entity + print package_entity + scrape.docsdb.save(doc) + --- a/documents/gazette.py +++ b/documents/gazette.py @@ -5,20 +5,53 @@ from unidecode import unidecode -listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960" -(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, - listurl, "gazette", "AGD") -soup = BeautifulSoup(listhtml) -for row in soup.find_all('tr'): - if row.has_key('valign'): - for col in tr.find_all('td'): - print col.string - #url = scrape.fullurl(listurl, atag['href']) - #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, - # url, "data", "AGIMO") - #hash = scrape.mkhash(scrape.canonurl(url)) - #doc = scrape.docsdb.get(hash) - #print doc['metadata'] - #scrape.docsdb.save(doc) - #time.sleep(2) +items = 3950 +items = 1 +while True: + print str(items) + " (" +str(items/25) +" screens to go)" + listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items) + (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, + listurl, "gazette", "AGD", False) + for line in listhtml.split('\n'): + soup = BeautifulSoup(line) + #print line + for row in soup.find_all('tr'): + print line + if row.has_key('valign'): + i = 0 + date = "" + id = "" + type = "" + description = "" + name = "" + url = "" + for col in soup.find_all('td'): + #print ''.join(col.stripped_strings) + if i == 0: + date = ''.join(col.stripped_strings) + if i == 1: + id = ''.join(col.stripped_strings) + if i == 2: + type = ''.join(col.stripped_strings) + if i == 3: + description = ''.join(col.stripped_strings) + for link in col.findAll('a'): + if link.has_key("href"): + url = link['href'] + name = ''.join(link.stripped_strings) + print str(items) + " (" +str(items/25) +" screens to go)" + print [date, id, type, description, name, url] + itemurl = scrape.fullurl(listurl, url) + (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb, + itemurl, "gazette", "AGD", False) + hash = scrape.mkhash(scrape.canonurl(itemurl)) + doc = scrape.docsdb.get(hash) + doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url} + scrape.docsdb.save(doc) + #time.sleep(2) + i = i + 1; + items = items - 25 + if items <= 0: + break + --- a/documents/genericScrapers.py +++ b/documents/genericScrapers.py @@ -72,7 +72,8 @@ edate = date.today().strftime("%Y-%m-%d") doc = {'_id': dochash, 'agencyID': self.getAgencyID() , 'url': self.getURL(), 'docID': dochash, - "date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description), "diff": diff} + "date": edate, "title": "Disclosure Log Updated", + "description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)} foidocsdb.save(doc) else: print "already saved" @@ -199,11 +200,16 @@ return table.find_all('tr') def getDate(self, content, entry, doc): - date = ''.join(content.stripped_strings).strip() - (a, b, c) = date.partition("(") - date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")) - print date - edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + strdate = ''.join(content.stripped_strings).strip() + (a, b, c) = strdate.partition("(") + strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012")) + print strdate + try: + edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + except ValueError: + print >> sys.stderr, "ERROR date invalid %s " % strdate + print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip() + edate = date.today().strftime("%Y-%m-%d") print edate doc.update({'date': edate}) return @@ -266,8 +272,7 @@ 'Summary of FOIrequest received by agency/minister', 'Summary of FOI request received', 'Description of FOI Request', "FOI request", 'Results 1 to 67 of 67'] - if doc['title'] not in badtitles\ - and doc['description'] != '': + if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '': print "saving" foidocsdb.save(doc) else: @@ -277,6 +282,6 @@ print "header row" else: - print "ERROR number of columns incorrect" + print >> sys.stderr, "ERROR number of columns incorrect" print row --- a/documents/runScrapers.sh +++ b/documents/runScrapers.sh @@ -1,10 +1,22 @@ -for f in scrapers/*.py; - do echo "Processing $f file.."; - python $f; +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $DIR +echo "" > /tmp/disclosr-error +for f in scrapers/*.py; do + echo "Processing $f file.."; + md5=`md5sum /tmp/disclosr-error` + python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error; + md52=`md5sum /tmp/disclosr-error` + if [ "$md5" != "$md52" ]; then + echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error; + fi if [ "$?" -ne "0" ]; then echo "error"; - sleep 2; + sleep 1; fi done +if [ -s /tmp/disclosr-error ] ; then + echo "emailling logs.."; + mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ; +fi --- a/documents/scrape.py +++ b/documents/scrape.py @@ -7,10 +7,16 @@ from urlparse import urljoin import time import os +import sys import mimetypes import urllib import urlparse import socket + +#couch = couchdb.Server('http://192.168.1.148:5984/') +#couch = couchdb.Server('http://192.168.1.113:5984/') +couch = couchdb.Server('http://127.0.0.1:5984/') + def mkhash(input): return hashlib.md5(input).hexdigest().encode("utf-8") @@ -84,7 +90,7 @@ def getLastAttachment(docsdb, url): hash = mkhash(url) doc = docsdb.get(hash) - if doc != None: + if doc != None and "_attachments" in doc.keys(): last_attachment_fname = doc["_attachments"].keys()[-1] last_attachment = docsdb.get_attachment(doc, last_attachment_fname) return last_attachment @@ -98,21 +104,23 @@ req = urllib2.Request(url) print "Fetching %s (%s)" % (url, hash) if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": - print "Not a valid HTTP url" + print >> sys.stderr, "Not a valid HTTP url" return (None, None, None) doc = docsdb.get(hash) if doc == None: doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'} else: - if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14): + if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)): print "Uh oh, trying to scrape URL again too soon!" + hash - last_attachment_fname = doc["_attachments"].keys()[-1] - last_attachment = docsdb.get_attachment(doc, last_attachment_fname) - content = last_attachment - return (doc['url'], doc['mime_type'], content.read()) - if scrape_again == False: - print "Not scraping this URL again as requested" - return (doc['url'], doc['mime_type'], content.read()) + if "_attachments" in doc.keys(): + last_attachment_fname = doc["_attachments"].keys()[-1] + last_attachment = docsdb.get_attachment(doc, last_attachment_fname) + content = last_attachment.read() + mime_type = doc['mime_type'] + else: + content = None + mime_type = None + return (doc['url'], mime_type, content) req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") #if there is a previous version stored in couchdb, load caching helper tags @@ -157,13 +165,13 @@ #store as attachment epoch-filename except (urllib2.URLError, socket.timeout) as e: - print "error!" + print >> sys.stderr,"error!" error = "" if hasattr(e, 'reason'): error = "error %s in downloading %s" % (str(e.reason), url) elif hasattr(e, 'code'): error = "error %s in downloading %s" % (e.code, url) - print error + print >> sys.stderr, error doc['error'] = error docsdb.save(doc) return (None, None, None) @@ -207,9 +215,6 @@ #print linkurl scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID) -#couch = couchdb.Server('http://192.168.1.148:5984/') -#couch = couchdb.Server('http://192.168.1.113:5984/') -couch = couchdb.Server('http://127.0.0.1:5984/') # select database agencydb = couch['disclosr-agencies'] docsdb = couch['disclosr-documents'] --- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py +++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py @@ -42,7 +42,6 @@ 'data': {'request': '', 'session': '', 'more': ''} } - - amonpy.exception(data) + #amonpy.exception(data) pass --- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py +++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py @@ -18,13 +18,13 @@ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(htcontent) - for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'): + rowtitle = soup.find(class_ = "wc-title").find("h1").string + if rowtitle != None: + description = rowtitle + ": " + for row in soup.find(class_ ="wc-content").find_all('td'): if row != None: - rowtitle = row.find('th').string - if rowtitle != None: - description = description + "\n" + rowtitle + ": " - for text in row.find('td').stripped_strings: - description = description + text + for text in row.stripped_strings: + description = description + text + "\n" for atag in row.find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(link,atag['href'])) @@ -37,7 +37,7 @@ def getColumnCount(self): return 2 def getTable(self,soup): - return soup.find(class_ = "ms-rteTable-GreyAlternating") + return soup.find(class_ = "ms-rteTable-default") def getColumns(self,columns): (date, title) = columns return (title, date, title, title, None) --- a/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py +++ b/documents/scrapers/24bd71114d3975ed9a63ad29624c62c9.py @@ -7,7 +7,7 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getTable(self,soup): - return soup.find(id = "inner_content") + return soup.find(class_="tborder") def getColumnCount(self): return 2 def getColumns(self,columns): --- a/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py +++ b/documents/scrapers/3d5871a44abbbc81ef5b3a420070755d.py @@ -8,40 +8,14 @@ from datetime import * #http://www.doughellmann.com/PyMOTW/abc/ -class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getTable(self,soup): - return soup.find(class_ = "inner-column").table - def getRows(self,table): - return table.tbody.find_all('tr',recursive=False) +class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): def getColumnCount(self): - return 3 - def getColumns(self,columns): - (date, title, description) = columns - return (date, date, title, description, None) - def getDate(self, content, entry, doc): - i = 0 - date = "" - for string in content.stripped_strings: - if i ==1: - date = string - i = i+1 - edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") - print edate - doc.update({'date': edate}) - return - def getTitle(self, content, entry, doc): - i = 0 - title = "" - for string in content.stripped_strings: - if i < 2: - title = title + string - i = i+1 - doc.update({'title': title}) - #print title - return + return 0 if __name__ == '__main__': - print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) - print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) +#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2012-13.aspx +#http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2011-12.aspx + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericHTMLDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericHTMLDisclogScraper) ScraperImplementation().doScrape() --- a/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py +++ b/documents/scrapers/6fa04af95fbe7de96daa2c7560e0aad3.py @@ -6,8 +6,6 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): - def getTable(self,soup): - return soup.find(id = "content_div_50269").table def getColumns(self,columns): (id, date, title, description, notes) = columns return (id, date, title, description, notes) --- a/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py +++ b/documents/scrapers/7c6adc1d41cf029bf1a0959e5156477a.py @@ -21,11 +21,15 @@ d.make_links_absolute(base_url = self.getURL()) for table in d('table').items(): title= table('thead').text() - print title + print self.remove_control_chars(title) (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text()) links = table('a').map(lambda i, e: pq(e).attr('href')) description = descA+" "+descB - edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + try: + edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") + except ValueError: + edate = date.today().strftime("%Y-%m-%d") + pass print edate dochash = scrape.mkhash(self.remove_control_chars(title)) doc = foidocsdb.get(dochash) --- a/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py +++ b/documents/scrapers/8e874a2fde8aa0ccdc6d14573d766540.py @@ -18,10 +18,10 @@ if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": # http://www.crummy.com/software/BeautifulSoup/documentation.html soup = BeautifulSoup(htcontent) - for text in soup.find(id="divFullWidthColumn").stripped_strings: + for text in soup.find(class_ = "mainContent").stripped_strings: description = description + text.encode('ascii', 'ignore') - for atag in soup.find(id="divFullWidthColumn").find_all("a"): + for atag in soup.find(id="SortingTable").find_all("a"): if atag.has_key('href'): links.append(scrape.fullurl(link,atag['href'])) --- a/documents/scrapers/a687a9eaab9e10e9e118d3fd7cf0e13a.py +++ b/documents/scrapers/a687a9eaab9e10e9e118d3fd7cf0e13a.py @@ -7,11 +7,11 @@ #http://www.doughellmann.com/PyMOTW/abc/ class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): def getTable(self,soup): - return soup.find(id="ctl00_ContentPlaceHolderMainNoAjax_EdtrTD1494_2").table + return soup.find(id="int-content").table def getColumnCount(self): - return 4 + return 3 def getColumns(self,columns): - (blank,id, title,date) = columns + (id, title,date) = columns return (id, date, title, title, None) if __name__ == '__main__': --- /dev/null +++ b/documents/scrapers/b0ca7fddcd1c965787daea47f2d32e0a.py @@ -1,1 +1,17 @@ +import sys,os +sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) +import genericScrapers +import scrape +from bs4 import BeautifulSoup +#http://www.doughellmann.com/PyMOTW/abc/ +class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): + def getColumns(self,columns): + (id, date, title, description, notes) = columns + return (id, date, title, description, notes) + +if __name__ == '__main__': + print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) + print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) + ScraperImplementation().doScrape() + --- a/documents/scrapers/dfd7414bb0c21a0076ab559901ae0588.py +++ b/documents/scrapers/dfd7414bb0c21a0076ab559901ae0588.py @@ -10,7 +10,7 @@ (id, date, title, description, notes) = columns return (id, date, title, description, notes) def getTable(self,soup): - return soup.find(class_ = "content") + return soup.find(class_ = "simpletable") if __name__ == '__main__': print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) --- a/documents/scrapers/f2ab2908d8ee56ed8d995ef4187e75e6.py +++ b/documents/scrapers/f2ab2908d8ee56ed8d995ef4187e75e6.py @@ -10,7 +10,7 @@ (id, date, title, description, notes) = columns return (id, date, title, description, notes) def getTable(self,soup): - return soup.find(id = "content").table + return soup.find("table") if __name__ == '__main__': print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper)