cleanse data group/org name for datagov
Former-commit-id: 864798958ce17be8aab36096ea9ed641e6415467
--- a/admin/refreshDesignDoc.php
+++ b/admin/refreshDesignDoc.php
@@ -42,6 +42,7 @@
$obj->views->byWebServer->map = "function(doc) {\n emit(doc.web_server, doc);\n}";
$obj->views->datasets->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n emit(doc._id, doc);\n}\n}";
+$obj->views->datasetGroups->map = "function(doc) {\nif (doc.fieldName == \"data\") {\n doc.metadata[\"data.gov.au Category\"] && doc.metadata[\"data.gov.au Category\"].forEach(function(tag) {\n emit(tag, doc.url); \n });\n}\n}";
$obj->views->getValidationRequired->map = "function(doc) {\nif (doc.mime_type == \"text/html\" \n&& typeof(doc.validation) == \"undefined\") {\n emit(doc._id, doc._attachments);\n}\n}";
$docdb->save($obj, true);
--- /dev/null
+++ b/disclosr.iml
@@ -1,1 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="WEB_MODULE" version="4">
+ <component name="FacetManager">
+ <facet type="Python" name="Python">
+ <configuration sdkName="" />
+ </facet>
+ </component>
+ <component name="NewModuleRootManager" inherit-compiler-output="true">
+ <exclude-output />
+ <content url="file://$MODULE_DIR$" />
+ <orderEntry type="inheritedJdk" />
+ <orderEntry type="sourceFolder" forTests="false" />
+ </component>
+</module>
+
--- /dev/null
+++ b/documents/datagov-export-groups.py
@@ -1,1 +1,81 @@
+import ckanclient
+import couchdb
+from ckanclient import CkanApiError
+import re
+
+class LoaderError(Exception):
+ pass
+
+# Instantiate the CKAN client.
+#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a'
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+ api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
+# https://github.com/okfn/ckanext-importlib
+def munge(name):
+ # convert spaces to underscores
+ name = re.sub(' ', '_', name).lower()
+ # convert symbols to dashes
+ name = re.sub('[:]', '_-', name).lower()
+ name = re.sub('[/]', '-', name).lower()
+ # take out not-allowed characters
+ name = re.sub('[^a-zA-Z0-9-_]', '', name).lower()
+ # remove double underscores
+ name = re.sub('__', '_', name).lower()
+ return name
+
+
+def name_munge(input_name):
+ return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
+
+
+docsdb = couch['disclosr-documents']
+
+if __name__ == "__main__":
+ groups = {}
+ for doc in docsdb.view('app/datasetGroups'):
+ group_name = doc.key
+ if group_name != "Not specified":
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value.replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ if group_name in groups.keys():
+ groups[group_name] = list(set(groups[group_name] + [pkg_name]))
+ else:
+ groups[group_name] = [pkg_name]
+
+ # add dataset to group(s)
+ for group_name in groups.keys():
+ if group_name != "Not specified":
+ group_url = name_munge(group_name[:100])
+ print group_name
+ print groups[group_name]
+ try:
+ # Update the group details
+ group_entity = ckan.group_entity_get(group_url)
+ print "group "+group_name+" exists"
+ if 'packages' in group_entity.keys():
+ group_entity['packages'] = list(set(group_entity['packages'] + groups[group_name]))
+ else:
+ group_entity['packages'] = groups[group_name]
+ ckan.group_entity_put(group_entity)
+ except CkanApiError, e:
+ if ckan.last_status == 404:
+ print "group "+group_name+" does not exist, creating"
+ group_entity = {
+ 'name': group_url,
+ 'title': group_name,
+ 'description': group_name,
+ 'packages': groups[group_name]
+ }
+ #print group_entity
+ ckan.group_register_post(group_entity)
+ elif ckan.last_status == 409:
+ print "group already exists"
+ else:
+ raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % (
+ ckan.last_status, pkg_name, e.args))
+
--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -2,14 +2,22 @@
import couchdb
from ckanclient import CkanApiError
import re
+import html2text # aaronsw :(
+import ckanapi # https://github.com/open-data/ckanapi
+
class LoaderError(Exception):
pass
# Instantiate the CKAN client.
-ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api',
- api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
-# (use your own api_key from http://thedatahub.org/user/me )
+#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b')
+api_key = 'b3ab75e4-afbb-465b-a09d-8171c8c69a7a'
+ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api',
+ api_key=api_key)
+ckandirect = ckanapi.RemoteCKAN('http://data.disclosurelo.gs', api_key=api_key)
+couch = couchdb.Server('http://127.0.0.1:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/
SYMBOLS = {
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'),
@@ -19,6 +27,7 @@
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
+
def human2bytes(s):
"""
@@ -86,51 +95,123 @@
def name_munge(input_name):
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
- #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and')
-
-couch = couchdb.Server('http://127.0.0.1:5984/')
+
+
+def get_licence_id(licencename):
+ map = {
+ "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
+ "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
+ 'Otherpleasespecify': 'notspecified',
+ '': 'notspecified',
+ "Publicly available data": 'notspecified',
+ "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
+ "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
+ 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
+ "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
+ 'CreativeCommonsAttributionCCBY25': 'cc-by',
+ "PublicDomain": 'other-pd',
+ }
+ if licencename not in map.keys():
+ raise Exception(licencename + " not found");
+ return map[licencename];
+
+
docsdb = couch['disclosr-documents']
if __name__ == "__main__":
+ orgs_list = []
+ orgs_ids = {}
for doc in docsdb.view('app/datasets'):
+ print " --- "
print doc.id
- if doc.value['url'] != "http://data.gov.au/data/":
+
+ if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld":
+
+
# Collect the package metadata.
- pkg_name = name_munge(doc.value['metadata']['DCTERMS.Title'][:100])
- tags = doc.value['metadata']["Keywords / Tags"]
- if not hasattr(tags, '__iter__'):
- tags = [tags]
- [re.sub('[^a-zA-Z0-9-_]', '', tag).lower() for tag in tags]
- package_entity = {
- 'name': pkg_name,
- 'title': doc.value['metadata']['DCTERMS.Title'],
- 'url': doc.value['metadata']['DCTERMS.Source.URI'],
- 'tags': tags,
- 'author': doc.value['metadata']["DCTERMS.Creator"],
- 'maintainer': doc.value['metadata']["DCTERMS.Creator"],
- 'licence_id': doc.value['metadata']['DCTERMS.License'], #todo licence id mapping
- 'notes': doc.value['metadata']['Description'],
- }
+ pkg_name = filter(lambda x: x in '0123456789abcdefghijklmnopqrstuvwxyz-_',
+ doc.value['url'].replace("http://data.gov.au/dataset/", '').replace('/', '')[:100]);
+ print pkg_name
+ #add to or create organization using direct API
+ agency = doc.value['metadata']["Agency"]
+ if agency == "APS":
+ agency = "Australian Public Service Commission"
+ if agency == "Shared Services, Treasury Directorate":
+ agency = "Shared Services Procurement, Treasury Directorate"
+ if agency == "Treasury - Shared Services":
+ agency = "Shared Services Procurement, Treasury Directorate"
+ if agency == "Territory and Municipal Services (TAMS)":
+ agency = "Territory and Municipal Services Directorate"
+ if agency == "State Library of NSW":
+ agency = "State Library of New South Wales"
+ org_name = name_munge(agency[:100])
+ if org_name not in orgs_list:
+ orgs_list = ckandirect.action.organization_list()['result']
+ #print orgs_list
+ if org_name not in orgs_list:
+ try:
+ print "org not found, creating " + org_name
+ ckandirect.action.organization_create(name=org_name, title=agency,
+ description=agency)
+ orgs_list.append(org_name)
+ except ckanapi.ValidationError, e:
+ print e
+ raise LoaderError('Unexpected status')
+ else:
+ print "org found, adding dataset to " + org_name
+
+ # cache org names -> id mapping
+ if org_name not in orgs_ids:
+ org = ckandirect.action.organization_show(id=org_name)
+ orgs_ids[org_name] = org["result"]["id"]
+ org_id = orgs_ids[org_name]
+ print "org id is "+org_id
+ tags = []
+ creator = doc.value['metadata']["DCTERMS.Creator"]
+ if doc.value['agencyID'] == "AGIMO":
+ if len(doc.value['metadata']["Keywords / Tags"]) > 0:
+ if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'):
+ tags = tags + doc.value['metadata']["Keywords / Tags"]
+ else:
+ tags = tags + [doc.value['metadata']["Keywords / Tags"]]
+
+ tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag]
+ #print tags
+ package_entity = {
+ 'name': pkg_name,
+ 'title': doc.value['metadata']['DCTERMS.Title'],
+ 'url': doc.value['metadata']['DCTERMS.Source.URI'],
+ 'tags': tags, #tags are mandatory?
+ 'author': creator,
+ 'maintainer': creator,
+ 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
+ 'notes': html2text.html2text(doc.value['metadata']['Description']),
+ 'owner_org': org_id
+ #todo add missing key values like jurasdiction
+ }
+
+
try:
- #print doc.id
+ #print package_entity
ckan.package_register_post(package_entity)
except CkanApiError, e:
- if ckan.last_status == 409:
- print "already exists"
+ if ckan.last_message == "{\"name\": [\"That URL is already in use.\"]}":
+ print "package already exists"
else:
+ print ckan.last_message
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % (
ckan.last_status, pkg_name, e.args))
-
- print package_entity
- #todo add to organisation (author/creator/maintainer)
- #if 'data.gov.au Category' in doc.value['metadata'].keys(): #todo add to group
+ pkg = ckan.package_entity_get(pkg_name)
+
+
+ # add resources (downloadable data files)
if 'Download' in doc.value['metadata'].keys():
try:
- pkg = ckan.package_entity_get(pkg_name)
+
resources = pkg.get('resources', [])
if len(resources) < len(doc.value['metadata']['Download']):
for resource in doc.value['metadata']['Download']:
- #print resource
+
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html
# (KML/KMZ) / (Shapefile) /(Other)
format = "plain"
@@ -141,8 +222,10 @@
name = resource['href']
if 'name' in resource.keys():
name = resource['name']
+ print resource
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data',
- format=format, size=human2bytes(resource['size'].replace(',', '')))
+ format=format,
+ size=human2bytes(resource['size'].replace(',', '')))
else:
print "resources already exist"
except CkanApiError, e:
--- /dev/null
+++ b/documents/datagov-merge.php
@@ -1,1 +1,26 @@
+<?php
+include_once("../include/common.inc.php");
+
+
+setlocale(LC_CTYPE, 'C');
+
+$db = $server->get_db('disclosr-documents');
+$datasets = Array();
+try {
+ $rows = $db->get_view("app", "datasets", null, true)->rows;
+
+ foreach ($rows as $row) {
+ //print_r($row);
+ if ($row->value->url != "http://data.gov.au/data/")
+ $datasets[str_replace(Array("http://data.gov.au/dataset/","/"),"",$row->value->url)] = $row->id;
+ }
+} catch (SetteeRestClientException $e) {
+ setteErrorHandler($e);
+}
+ksort($datasets);
+foreach ($datasets as $datasetname => $datasetkey) {
+ print "$datasetname => $datasetkey<br>\n";
+}
+?>
+
--- a/documents/datagov.py
+++ b/documents/datagov.py
@@ -13,7 +13,7 @@
if atag.has_key('href'):
url = scrape.fullurl(listurl, atag['href'])
(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
- url, "data", "AGIMO")
+ url, "data", "AGIMO", False)
hash = scrape.mkhash(scrape.canonurl(url))
doc = scrape.docsdb.get(hash)
if "metadata" not in doc.keys() or True:
--- /dev/null
+++ b/documents/dataqld.py
@@ -1,1 +1,28 @@
+import sys, os
+import time
+import scrape
+from bs4 import BeautifulSoup
+from unidecode import unidecode
+import ckanclient
+
+# Instantiate the CKAN client.
+ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api')
+
+# Get the package list.
+package_list = ckan.package_register_get()
+for package_name in package_list:
+# Get the details of a package.
+ (url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ "https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False)
+ hash = scrape.mkhash(scrape.canonurl(url))
+ print hash
+ doc = scrape.docsdb.get(hash)
+ if "metadata" not in doc.keys() or True:
+ ckan.package_entity_get(package_name)
+ package_entity = ckan.last_message
+ doc['type'] = "dataset"
+ doc['metadata'] = package_entity
+ print package_entity
+ scrape.docsdb.save(doc)
+
--- a/documents/gazette.py
+++ b/documents/gazette.py
@@ -5,20 +5,53 @@
from unidecode import unidecode
-listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3960"
-(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
- listurl, "gazette", "AGD")
-soup = BeautifulSoup(listhtml)
-for row in soup.find_all('tr'):
- if row.has_key('valign'):
- for col in tr.find_all('td'):
- print col.string
- #url = scrape.fullurl(listurl, atag['href'])
- #(url, mime_type, html) = scrape.fetchURL(scrape.docsdb,
- # url, "data", "AGIMO")
- #hash = scrape.mkhash(scrape.canonurl(url))
- #doc = scrape.docsdb.get(hash)
- #print doc['metadata']
- #scrape.docsdb.save(doc)
- #time.sleep(2)
+items = 3950
+items = 1
+while True:
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items)
+ (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb,
+ listurl, "gazette", "AGD", False)
+ for line in listhtml.split('\n'):
+ soup = BeautifulSoup(line)
+ #print line
+ for row in soup.find_all('tr'):
+ print line
+ if row.has_key('valign'):
+ i = 0
+ date = ""
+ id = ""
+ type = ""
+ description = ""
+ name = ""
+ url = ""
+ for col in soup.find_all('td'):
+ #print ''.join(col.stripped_strings)
+ if i == 0:
+ date = ''.join(col.stripped_strings)
+ if i == 1:
+ id = ''.join(col.stripped_strings)
+ if i == 2:
+ type = ''.join(col.stripped_strings)
+ if i == 3:
+ description = ''.join(col.stripped_strings)
+ for link in col.findAll('a'):
+ if link.has_key("href"):
+ url = link['href']
+ name = ''.join(link.stripped_strings)
+ print str(items) + " (" +str(items/25) +" screens to go)"
+ print [date, id, type, description, name, url]
+ itemurl = scrape.fullurl(listurl, url)
+ (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb,
+ itemurl, "gazette", "AGD", False)
+ hash = scrape.mkhash(scrape.canonurl(itemurl))
+ doc = scrape.docsdb.get(hash)
+ doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url}
+ scrape.docsdb.save(doc)
+ #time.sleep(2)
+ i = i + 1;
+ items = items - 25
+ if items <= 0:
+ break
+
--- a/documents/genericScrapers.py
+++ b/documents/genericScrapers.py
@@ -72,7 +72,8 @@
edate = date.today().strftime("%Y-%m-%d")
doc = {'_id': dochash, 'agencyID': self.getAgencyID()
, 'url': self.getURL(), 'docID': dochash,
- "date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description), "diff": diff}
+ "date": edate, "title": "Disclosure Log Updated",
+ "description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)}
foidocsdb.save(doc)
else:
print "already saved"
@@ -199,11 +200,16 @@
return table.find_all('tr')
def getDate(self, content, entry, doc):
- date = ''.join(content.stripped_strings).strip()
- (a, b, c) = date.partition("(")
- date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012"))
- print date
- edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ strdate = ''.join(content.stripped_strings).strip()
+ (a, b, c) = strdate.partition("(")
+ strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012"))
+ print strdate
+ try:
+ edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d")
+ except ValueError:
+ print >> sys.stderr, "ERROR date invalid %s " % strdate
+ print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip()
+ edate = date.today().strftime("%Y-%m-%d")
print edate
doc.update({'date': edate})
return
@@ -266,8 +272,7 @@
'Summary of FOIrequest received by agency/minister',
'Summary of FOI request received', 'Description of FOI Request',
"FOI request", 'Results 1 to 67 of 67']
- if doc['title'] not in badtitles\
- and doc['description'] != '':
+ if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '':
print "saving"
foidocsdb.save(doc)
else:
@@ -277,6 +282,6 @@
print "header row"
else:
- print "ERROR number of columns incorrect"
+ print >> sys.stderr, "ERROR number of columns incorrect"
print row
--- a/documents/runScrapers.sh
+++ b/documents/runScrapers.sh
@@ -1,10 +1,22 @@
-for f in scrapers/*.py;
- do echo "Processing $f file..";
- python $f;
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd $DIR
+echo "" > /tmp/disclosr-error
+for f in scrapers/*.py; do
+ echo "Processing $f file..";
+ md5=`md5sum /tmp/disclosr-error`
+ python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error;
+ md52=`md5sum /tmp/disclosr-error`
+ if [ "$md5" != "$md52" ]; then
+ echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error;
+ fi
if [ "$?" -ne "0" ]; then
echo "error";
- sleep 2;
+ sleep 1;
fi
done
+if [ -s /tmp/disclosr-error ] ; then
+ echo "emailling logs..";
+ mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ;
+fi
--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -7,10 +7,16 @@
from urlparse import urljoin
import time
import os
+import sys
import mimetypes
import urllib
import urlparse
import socket
+
+#couch = couchdb.Server('http://192.168.1.148:5984/')
+#couch = couchdb.Server('http://192.168.1.113:5984/')
+couch = couchdb.Server('http://127.0.0.1:5984/')
+
def mkhash(input):
return hashlib.md5(input).hexdigest().encode("utf-8")
@@ -84,7 +90,7 @@
def getLastAttachment(docsdb, url):
hash = mkhash(url)
doc = docsdb.get(hash)
- if doc != None:
+ if doc != None and "_attachments" in doc.keys():
last_attachment_fname = doc["_attachments"].keys()[-1]
last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
return last_attachment
@@ -98,21 +104,23 @@
req = urllib2.Request(url)
print "Fetching %s (%s)" % (url, hash)
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "":
- print "Not a valid HTTP url"
+ print >> sys.stderr, "Not a valid HTTP url"
return (None, None, None)
doc = docsdb.get(hash)
if doc == None:
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'}
else:
- if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60 * 24 * 14):
+ if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
print "Uh oh, trying to scrape URL again too soon!" + hash
- last_attachment_fname = doc["_attachments"].keys()[-1]
- last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
- content = last_attachment
- return (doc['url'], doc['mime_type'], content.read())
- if scrape_again == False:
- print "Not scraping this URL again as requested"
- return (doc['url'], doc['mime_type'], content.read())
+ if "_attachments" in doc.keys():
+ last_attachment_fname = doc["_attachments"].keys()[-1]
+ last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
+ content = last_attachment.read()
+ mime_type = doc['mime_type']
+ else:
+ content = None
+ mime_type = None
+ return (doc['url'], mime_type, content)
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)")
#if there is a previous version stored in couchdb, load caching helper tags
@@ -157,13 +165,13 @@
#store as attachment epoch-filename
except (urllib2.URLError, socket.timeout) as e:
- print "error!"
+ print >> sys.stderr,"error!"
error = ""
if hasattr(e, 'reason'):
error = "error %s in downloading %s" % (str(e.reason), url)
elif hasattr(e, 'code'):
error = "error %s in downloading %s" % (e.code, url)
- print error
+ print >> sys.stderr, error
doc['error'] = error
docsdb.save(doc)
return (None, None, None)
@@ -207,9 +215,6 @@
#print linkurl
scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID)
-#couch = couchdb.Server('http://192.168.1.148:5984/')
-#couch = couchdb.Server('http://192.168.1.113:5984/')
-couch = couchdb.Server('http://127.0.0.1:5984/')
# select database
agencydb = couch['disclosr-agencies']
docsdb = couch['disclosr-documents']
--- a/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
+++ b/documents/scrapers/0049d35216493c545ef5f7f000e6b252.py
@@ -42,7 +42,6 @@
'data': {'request': '', 'session': '', 'more': ''}
}
-
- amonpy.exception(data)
+ #amonpy.exception(data)
pass
--- a/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
+++ b/documents/scrapers/227cb6eb7d2c9f8a6e846df7447d6caa.py
@@ -18,13 +18,13 @@
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml":
# http://www.crummy.com/software/BeautifulSoup/documentation.html
soup = BeautifulSoup(htcontent)
- for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'):
+ rowtitle = soup.find(class_ = "wc-title").find("h1").string
+ if rowtitle != None:
+ description = rowtitle + ": "
+ for row in soup.find(class_ ="wc-content").find_all('td'):
if row != None:
- rowtitle = row.find('th').string
- if rowtitle != None:
- description = description + "\n" + rowtitle + ": "
- for text in row.find('td').stripped_strings:
- description = description + text
+ for text in row.stripped_strings:
+ description = description + text + "\n"
for atag in row.find_all("a"):
if atag.has_key('href'):
links.append(scrape.fullurl(link,atag['href']))
@@ -37,7 +37,7 @@
def getColumnCount(self):
return 2
def getTable(self,soup):
- return