handle invalid input in file size and description markup for datagov import
handle invalid input in file size and description markup for datagov import


Former-commit-id: 6c34fd9f8a95d5075ab5fb36f9ee2721736dcaec

--- a/documents/datagov-export.py
+++ b/documents/datagov-export.py
@@ -13,9 +13,20 @@
     pass
 
 import tempfile
-def add_package_resource_cachedurl(ckan, package_name, url, name, format, size, **kwargs):
+def add_package_resource_cachedurl(ckan, package_name, url, name, format, license_id, size,**kwargs):
+    if "xls" in url:
+	format = "xls"
+    if "pdf" in url:
+	format = "pdf"
+    if "xlsx" in url:
+	format = "xlsx"
     (returned_url, mime_type, content) = scrape.fetchURL(scrape.docsdb,
                                                 url, "dataset_resource", "AGIMO", False)
+    if mime_type in ["application/vnd.ms-excel","application/msexcel","application/x-msexcel","application/x-ms-excel","application/x-excel","application/x-dos_ms_excel","application/xls","application/x-xls"]:
+	format = "xls"
+    if mime_type in ["application/xlsx","application/x-xlsx","application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"]:
+	format = "xlsx"
+
     if content != None:
 	    tf = tempfile.NamedTemporaryFile(delete=False)
 	    tfName = os.path.abspath(tf.name)
@@ -23,12 +34,12 @@
 	    tf.seek(0)
 	    tf.write(content)
 	    tf.flush()
-	    ckan.add_package_resource (package_name, tfName, name=name)
+	    ckan.add_package_resource (package_name, tfName, name=name, format=format, license_id=license_id)
     else:
 	print "fetch error"
-	ckan.add_package_resource(package_name, url, name=name, resource_type='data',
+	return ckan.add_package_resource(package_name, url, name=name, resource_type='data',
                                                       format=format,
-                                                      size=size)
+                                                      size=size, mimetype=mime_type, license_id=license_id)
 
 # Instantiate the CKAN client.
 api_key = 'ff34526e-f794-4068-8235-fcbba38cd8bc'
@@ -147,15 +158,15 @@
     return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and'))
 
 
-def get_licence_id(licencename):
+def get_license_id(licencename):
     map = {
         "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa',
         "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc',
         'Otherpleasespecify': 'notspecified',
         '': 'notspecified',
         "Publicly available data": 'notspecified',
-        "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed",
-        "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed",
+        "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "cc-by-nd",
+        "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "cc-nc-nd",
         'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by',
         "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by',
         'CreativeCommonsAttributionCCBY25': 'cc-by',
@@ -191,6 +202,8 @@
                 agency = doc.value['metadata']["Agency"]
                 if agency == "APS":
                     agency = "Australian Public Service Commission"
+                if agency == "Department of Broadband, Communications and the Digital Ecomomy":
+                    agency = "Department of Broadband, Communications and the Digital Economy"
                 if agency == "Shared Services, Treasury Directorate":
                     agency = "Shared Services Procurement, Treasury Directorate"
                 if agency == "Treasury - Shared Services":
@@ -248,8 +261,8 @@
                         'tags': tags, #tags are mandatory?
                         'author': creator,
                         'maintainer': creator,
-                        'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']),
-                        'notes': html2text.html2text(doc.value['metadata']['Description']),
+                        'license_id': get_license_id(doc.value['metadata']['DCTERMS.License']),
+                        'notes': html2text.html2text(doc.value['metadata']['Description']).replace('AC/a!a','-').replace('AC/a!aC/',"'").replace("AC/a!E",":")replace("A "," "),
                         'owner_org': org_id,
                         'extras': extras,
                         'private': (pkg_name not in goodcsvdata and pkg_name not in goodotherdata)
@@ -292,9 +305,8 @@
                                     name = resource['name']
                                 print resource
                                 add_package_resource_cachedurl(ckan, pkg_name, url_fix(resource['href']), name,
-                                                          format,
-                                                          human2bytes(resource.get('size', '0B')),
-                                                          resource_type='data')
+                                                          format, get_license_id(doc.value['metadata']['DCTERMS.License']),
+                                                          human2bytes(resource.get('size', '0B')))
                         else:
                             print "resources already exist"
                     except CkanApiError, e:

--- a/documents/scrape.py
+++ b/documents/scrape.py
@@ -112,7 +112,7 @@
     else:
         if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)):
             print "Uh oh, trying to scrape URL again too soon!" + hash
-	    if "_attachments" in doc.keys():
+	    if (not doc.has_key('file_size') or doc["file_size"] != "0") and "_attachments" in doc.keys():
 	            last_attachment_fname = doc["_attachments"].keys()[-1]
 	            last_attachment = docsdb.get_attachment(doc, last_attachment_fname)
         	    content = last_attachment.read()