import ckanclient | import ckanclient |
import couchdb | import couchdb |
from ckanclient import CkanApiError | from ckanclient import CkanApiError |
import re | import re |
import html2text # aaronsw :( | import html2text # aaronsw :( |
class LoaderError(Exception): | class LoaderError(Exception): |
pass | pass |
# Instantiate the CKAN client. | # Instantiate the CKAN client. |
#ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') | #ckan = ckanclient.CkanClient(base_location='http://localhost:5000/api', api_key='b47b24cd-591d-40c1-8677-d73101d56d1b') |
ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', | ckan = ckanclient.CkanClient(base_location='http://data.disclosurelo.gs/api', |
api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52') | api_key='482a9dd2-a976-4adf-ac77-d71d92a98a52') |
#couch = couchdb.Server('http://127.0.0.1:5984/') | couch = couchdb.Server('http://127.0.0.1:5984/') |
couch = couchdb.Server('http://192.168.1.113:5984/') | #couch = couchdb.Server('http://192.168.1.113:5984/') |
# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ | # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ |
SYMBOLS = { | SYMBOLS = { |
'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), | 'customary': ('B', 'KB', 'MB', 'GB', 'T', 'P', 'E', 'Z', 'Y'), |
'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', | 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', |
'zetta', 'iotta'), | 'zetta', 'iotta'), |
'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), | 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), |
'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', | 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', |
'zebi', 'yobi'), | 'zebi', 'yobi'), |
} | } |
def human2bytes(s): | def human2bytes(s): |
""" | """ |
Attempts to guess the string format based on default symbols | Attempts to guess the string format based on default symbols |
set and return the corresponding bytes as an integer. | set and return the corresponding bytes as an integer. |
When unable to recognize the format ValueError is raised. | When unable to recognize the format ValueError is raised. |
>>> human2bytes('0 B') | >>> human2bytes('0 B') |
0 | 0 |
>>> human2bytes('1 K') | >>> human2bytes('1 K') |
1024 | 1024 |
>>> human2bytes('1 M') | >>> human2bytes('1 M') |
1048576 | 1048576 |
>>> human2bytes('1 Gi') | >>> human2bytes('1 Gi') |
1073741824 | 1073741824 |
>>> human2bytes('1 tera') | >>> human2bytes('1 tera') |
1099511627776 | 1099511627776 |
>>> human2bytes('0.5kilo') | >>> human2bytes('0.5kilo') |
512 | 512 |
>>> human2bytes('0.1 byte') | >>> human2bytes('0.1 byte') |
0 | 0 |
>>> human2bytes('1 k') # k is an alias for K | >>> human2bytes('1 k') # k is an alias for K |
1024 | 1024 |
>>> human2bytes('12 foo') | >>> human2bytes('12 foo') |
Traceback (most recent call last): | Traceback (most recent call last): |
... | ... |
ValueError: can't interpret '12 foo' | ValueError: can't interpret '12 foo' |
""" | """ |
init = s | init = s |
num = "" | num = "" |
while s and s[0:1].isdigit() or s[0:1] == '.': | while s and s[0:1].isdigit() or s[0:1] == '.': |
num += s[0] | num += s[0] |
s = s[1:] | s = s[1:] |
num = float(num) | num = float(num) |
letter = s.strip() | letter = s.strip() |
for name, sset in SYMBOLS.items(): | for name, sset in SYMBOLS.items(): |
if letter in sset: | if letter in sset: |
break | break |
else: | else: |
if letter == 'k': | if letter == 'k': |
# treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs | # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs |
sset = SYMBOLS['customary'] | sset = SYMBOLS['customary'] |
letter = letter.upper() | letter = letter.upper() |
else: | else: |
raise ValueError("can't interpret %r" % init) | raise ValueError("can't interpret %r" % init) |
prefix = {sset[0]: 1} | prefix = {sset[0]: 1} |
for i, s in enumerate(sset[1:]): | for i, s in enumerate(sset[1:]): |
prefix[s] = 1 << (i + 1) * 10 | prefix[s] = 1 << (i + 1) * 10 |
return int(num * prefix[letter]) | return int(num * prefix[letter]) |
# https://github.com/okfn/ckanext-importlib | # https://github.com/okfn/ckanext-importlib |
def munge(name): | def munge(name): |
# convert spaces to underscores | # convert spaces to underscores |
name = re.sub(' ', '_', name).lower() | name = re.sub(' ', '_', name).lower() |
# convert symbols to dashes | # convert symbols to dashes |
name = re.sub('[:]', '_-', name).lower() | name = re.sub('[:]', '_-', name).lower() |
name = re.sub('[/]', '-', name).lower() | name = re.sub('[/]', '-', name).lower() |
# take out not-allowed characters | # take out not-allowed characters |
name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() | name = re.sub('[^a-zA-Z0-9-_]', '', name).lower() |
# remove double underscores | # remove double underscores |
name = re.sub('__', '_', name).lower() | name = re.sub('__', '_', name).lower() |
return name | return name |
def name_munge(input_name): | def name_munge(input_name): |
return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) | return munge(input_name.replace(' ', '').replace('.', '_').replace('&', 'and')) |
#[:100] | |
#return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') | #return input_name.replace(' ', '').replace('.', '_').replace('&', 'and') |
def get_licence_id(licencename): | def get_licence_id(licencename): |
map = { | map = { |
"Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', | "Creative Commons - Attribution-Share Alike 2.0 Australia (CC-SA)\nThe downloadable version of the database is licensed under CC-BY-SA Creative Commons Attribution Share Alike and contains only the database fields that are released under that license. These fields are object title, object number, object description as well as temporal, spatial and dimension details. It also contains a persistent URL for each record.": 'cc-by-sa', |
"CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', | "CreativeCommonsAttributionNonCommercial30AustraliaCCBYNC30": 'cc-nc', |
'Otherpleasespecify': 'notspecified', | 'Otherpleasespecify': 'notspecified', |
'': 'notspecified', | '': 'notspecified', |
"Publicly available data": 'notspecified', | "Publicly available data": 'notspecified', |
"CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", | "CreativeCommonsAttributionNoDerivativeWorks30AustraliaCCBYND30": "other-closed", |
"CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", | "CreativeCommonsAttributionNonCommercialNoDerivs30AustraliaCCBYNCND30": "other-closed", |
'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', | 'CreativeCommonsAttribution30AustraliaCCBY30': 'cc-by', |
"Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', | "Creative Commons - Attribution 2.5 Australia (CC-BY)": 'cc-by', |
'CreativeCommonsAttributionCCBY25': 'cc-by', | 'CreativeCommonsAttributionCCBY25': 'cc-by', |
"PublicDomain": 'other-pd', | "PublicDomain": 'other-pd', |
} | } |
if licencename not in map.keys(): | if licencename not in map.keys(): |
raise Exception(licencename + " not found"); | raise Exception(licencename + " not found"); |
return map[licencename]; | return map[licencename]; |
docsdb = couch['disclosr-documents'] | docsdb = couch['disclosr-documents'] |
if __name__ == "__main__": | if __name__ == "__main__": |
for doc in docsdb.view('app/datasets'): | for doc in docsdb.view('app/datasets'): |
print doc.id | print doc.id |
if doc.value['url'] != "http://data.gov.au/data/": | if doc.value['url'] != "http://data.gov.au/data/" and doc.value['agencyID'] != "qld": |
# Collect the package metadata. | # Collect the package metadata. |
pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); _ | pkg_name = doc.value['url'].replace("http://data.gov.au/dataset/",'').replace('/',''); |
tags = [] | tags = [] |
if len(doc.value['metadata']["Keywords / Tags"]) > 0: | if doc.value['agencyID'] == "AGIMO": |
if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): | if len(doc.value['metadata']["Keywords / Tags"]) > 0: |
tags = tags + doc.value['metadata']["Keywords / Tags"] | if hasattr(doc.value['metadata']["Keywords / Tags"], '__iter__'): |
else: | tags = tags + doc.value['metadata']["Keywords / Tags"] |
tags = tags + [doc.value['metadata']["Keywords / Tags"]] | else: |
if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: | tags = tags + [doc.value['metadata']["Keywords / Tags"]] |
if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): | if 'data.gov.au Category' in doc.value['metadata'].keys() and len(doc.value['metadata']['data.gov.au Category']) > 0: |
tags = tags + doc.value['metadata']['data.gov.au Category'] | if hasattr(doc.value['metadata']['data.gov.au Category'], '__iter__'): |
else: | tags = tags + doc.value['metadata']['data.gov.au Category'] |
tags = tags + [doc.value['metadata']['data.gov.au Category']] | else: |
tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] | tags = tags + [doc.value['metadata']['data.gov.au Category']] |
print tags | tags = [re.sub('[^a-zA-Z0-9-_.]', '', tag.replace('&', 'and')).lower() for tag in tags if tag] |
package_entity = { | print tags |
'name': pkg_name, | package_entity = { |
'title': doc.value['metadata']['DCTERMS.Title'], | 'name': pkg_name, |
'url': doc.value['metadata']['DCTERMS.Source.URI'], | 'title': doc.value['metadata']['DCTERMS.Title'], |
'tags': tags, #tags are mandatory? | 'url': doc.value['metadata']['DCTERMS.Source.URI'], |
'author': doc.value['metadata']["DCTERMS.Creator"], | 'tags': tags, #tags are mandatory? |
'maintainer': doc.value['metadata']["DCTERMS.Creator"], | 'author': doc.value['metadata']["DCTERMS.Creator"], |
'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), | 'maintainer': doc.value['metadata']["DCTERMS.Creator"], |
'notes': html2text.html2text(doc.value['metadata']['Description']), | 'licence_id': get_licence_id(doc.value['metadata']['DCTERMS.License']), |
} | 'notes': html2text.html2text(doc.value['metadata']['Description']), |
} | |
if doc.value['agencyID'] == "qld": | |
package_entity = doc.value['metadata'] | |
try: | try: |
print package_entity | print package_entity |
ckan.package_register_post(package_entity) | ckan.package_register_post(package_entity) |
except CkanApiError, e: | except CkanApiError, e: |
if ckan.last_status == 409: | if ckan.last_status == 409: |
print "package already exists" | print "package already exists" |
else: | else: |
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( | raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( |
ckan.last_status, pkg_name, e.args)) | ckan.last_status, pkg_name, e.args)) |
#add to group | #add to group |
group_name = name_munge(doc.value['metadata']["Agency"][:100]) | group_name = name_munge(doc.value['metadata']["Agency"][:100]) |
try: | try: |
print ckan.group_entity_get(group_name) | print ckan.group_entity_get(group_name) |
# Update the group details | # Update the group details |
group_entity = ckan.last_message | group_entity = ckan.last_message |
print "group exists" | print "group exists" |
if 'packages' in group_entity.keys(): | if 'packages' in group_entity.keys(): |
group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) | group_entity['packages'] = list(set(group_entity['packages'] + [pkg_name])) |
else: | else: |
group_entity['packages'] = [pkg_name] | group_entity['packages'] = [pkg_name] |
ckan.group_entity_put(group_entity) | ckan.group_entity_put(group_entity) |
except CkanApiError, e: | except CkanApiError, e: |
if ckan.last_status == 404: | if ckan.last_status == 404: |
print "group does not exist, creating" | print "group does not exist, creating" |
group_entity = { | group_entity = { |
'name': group_name, | 'name': group_name, |
'title': doc.value['metadata']["Agency"], | 'title': doc.value['metadata']["Agency"], |
'description': doc.value['metadata']["Agency"], | 'description': doc.value['metadata']["Agency"], |
'packages': [pkg_name], | 'packages': [pkg_name], |
# 'type': "organization" # not allowed via API, use database query | # 'type': "organization" # not allowed via API, use database query |
# update "group" set type = 'organization'; | # update "group" set type = 'organization'; |
} | } |
print group_entity | print group_entity |
ckan.group_register_post(group_entity) | ckan.group_register_post(group_entity) |
elif ckan.last_status == 409: | |
print "group already exists" | |
else: | else: |
raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( | raise LoaderError('Unexpected status %s adding to group under \'%s\': %r' % ( |
ckan.last_status, pkg_name, e.args)) | ckan.last_status, pkg_name, e.args)) |
if 'Download' in doc.value['metadata'].keys(): | if 'Download' in doc.value['metadata'].keys(): |
try: | try: |
pkg = ckan.package_entity_get(pkg_name) | pkg = ckan.package_entity_get(pkg_name) |
resources = pkg.get('resources', []) | resources = pkg.get('resources', []) |
if len(resources) < len(doc.value['metadata']['Download']): | if len(resources) < len(doc.value['metadata']['Download']): |
for resource in doc.value['metadata']['Download']: | for resource in doc.value['metadata']['Download']: |
# http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html | # http://docs.ckan.org/en/ckan-1.7/domain-model-resource.html |
# (KML/KMZ) / (Shapefile) /(Other) | # (KML/KMZ) / (Shapefile) /(Other) |
format = "plain" | format = "plain" |
if resource['format'] == '(XML)': | if resource['format'] == '(XML)': |
format = 'xml' | format = 'xml' |
if resource['format'] == '(CSV/XLS)': | if resource['format'] == '(CSV/XLS)': |
format = 'csv' | format = 'csv' |
name = resource['href'] | name = resource['href'] |
if 'name' in resource.keys(): | if 'name' in resource.keys(): |
name = resource['name'] | name = resource['name'] |
print resource | print resource |
ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', | ckan.add_package_resource(pkg_name, resource['href'], name=name, resource_type='data', |
format=format, size=human2bytes(resource['size'].replace(',', ''))) | format=format, size=human2bytes(resource['size'].replace(',', ''))) |
else: | else: |
print "resources already exist" | print "resources already exist" |
except CkanApiError, e: | except CkanApiError, e: |
if ckan.last_status == 404: | if ckan.last_status == 404: |
print "parent dataset does not exist" | print "parent dataset does not exist" |
else: | else: |
raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( | raise LoaderError('Unexpected status %s checking for package under \'%s\': %r' % ( |
ckan.last_status, pkg_name, e.args)) | ckan.last_status, pkg_name, e.args)) |
import sys, os | |
import time | |
import scrape | |
from bs4 import BeautifulSoup | |
from unidecode import unidecode | |
import ckanclient | |
# Instantiate the CKAN client. | |
ckan = ckanclient.CkanClient(base_location='https://data.qld.gov.au/api') | |
# Get the package list. | |
package_list = ckan.package_register_get() | |
for package_name in package_list: | |
# Get the details of a package. | |
(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, | |
"https://data.qld.gov.au/dataset/"+package_name , "data", "qld", False) | |
hash = scrape.mkhash(scrape.canonurl(url)) | |
print hash | |
doc = scrape.docsdb.get(hash) | |
if "metadata" not in doc.keys() or True: | |
ckan.package_entity_get(package_name) | |
package_entity = ckan.last_message | |
doc['type'] = "dataset" | |
doc['metadata'] = package_entity | |
print package_entity | |
scrape.docsdb.save(doc) | |
import sys, os | import sys, os |
import time | import time |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
from unidecode import unidecode | from unidecode import unidecode |
listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=3950" | items = 3950 |
(url, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, | items = 1 |
listurl, "gazette", "AGD") | while True: |
for line in listhtml.split('\n'): | print str(items) + " (" +str(items/25) +" screens to go)" |
soup = BeautifulSoup(line) | listurl = "http://gazettes.ag.gov.au/portal/govgazonline.nsf/publications?OpenView&Start=" + str(items) |
#print line | (listurl, mime_type, listhtml) = scrape.fetchURL(scrape.docsdb, |
for row in soup.find_all('tr'): | listurl, "gazette", "AGD", False) |
print line | for line in listhtml.split('\n'): |
if row.has_key('valign'): | soup = BeautifulSoup(line) |
i = 0 | #print line |
date = "" | for row in soup.find_all('tr'): |
id = "" | print line |
type = "" | if row.has_key('valign'): |
description = "" | i = 0 |
name = "" | date = "" |
url = "" | id = "" |
for col in soup.find_all('td'): | type = "" |
#print ''.join(col.stripped_strings) | description = "" |
if i == 0: | name = "" |
date = ''.join(col.stripped_strings) | url = "" |
if i == 1: | for col in soup.find_all('td'): |
id = ''.join(col.stripped_strings) | #print ''.join(col.stripped_strings) |
if i == 2: | if i == 0: |
type = ''.join(col.stripped_strings) | date = ''.join(col.stripped_strings) |
if i == 3: | if i == 1: |
description = ''.join(col.stripped_strings) | id = ''.join(col.stripped_strings) |
for link in col.findAll('a'): | if i == 2: |
if link.has_key("href"): | type = ''.join(col.stripped_strings) |
url = link['href'] | if i == 3: |
name = ''.join(link.stripped_strings) | description = ''.join(col.stripped_strings) |
print [date,id,type,description,name, url] | for link in col.findAll('a'): |
i = i +1; | if link.has_key("href"): |
#url = scrape.fullurl(listurl, atag['href']) | url = link['href'] |
#(url, mime_type, html) = scrape.fetchURL(scrape.docsdb, | name = ''.join(link.stripped_strings) |
# url, "data", "AGIMO") | print str(items) + " (" +str(items/25) +" screens to go)" |
#hash = scrape.mkhash(scrape.canonurl(url)) | print [date, id, type, description, name, url] |
#doc = scrape.docsdb.get(hash) | itemurl = scrape.fullurl(listurl, url) |
#print doc['metadata'] | (itemurl, mime_type, html) = scrape.fetchURL(scrape.docsdb, |
#scrape.docsdb.save(doc) | itemurl, "gazette", "AGD", False) |
#time.sleep(2) | hash = scrape.mkhash(scrape.canonurl(itemurl)) |
doc = scrape.docsdb.get(hash) | |
doc['metadata'] = {"date": date, "date": id, "type":type, "description":description,"name": name,"url": url} | |
scrape.docsdb.save(doc) | |
#time.sleep(2) | |
i = i + 1; | |
items = items - 25 | |
if items <= 0: | |
break | |
import sys | import sys |
import os | import os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
from time import mktime | from time import mktime |
import feedparser | import feedparser |
import abc | import abc |
import unicodedata | import unicodedata |
import re | import re |
import dateutil | import dateutil |
from dateutil.parser import * | from dateutil.parser import * |
from datetime import * | from datetime import * |
import codecs | import codecs |
import difflib | import difflib |
from StringIO import StringIO | from StringIO import StringIO |
from pdfminer.pdfparser import PDFDocument, PDFParser | from pdfminer.pdfparser import PDFDocument, PDFParser |
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf | from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf |
from pdfminer.pdfdevice import PDFDevice, TagExtractor | from pdfminer.pdfdevice import PDFDevice, TagExtractor |
from pdfminer.converter import TextConverter | from pdfminer.converter import TextConverter |
from pdfminer.cmapdb import CMapDB | from pdfminer.cmapdb import CMapDB |
from pdfminer.layout import LAParams | from pdfminer.layout import LAParams |
class GenericDisclogScraper(object): | class GenericDisclogScraper(object): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
agencyID = None | agencyID = None |
disclogURL = None | disclogURL = None |
def remove_control_chars(self, input): | def remove_control_chars(self, input): |
return "".join([i for i in input if ord(i) in range(32, 127)]) | return "".join([i for i in input if ord(i) in range(32, 127)]) |
def getAgencyID(self): | def getAgencyID(self): |
""" disclosr agency id """ | """ disclosr agency id """ |
if self.agencyID is None: | if self.agencyID is None: |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") | self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") |
return self.agencyID | return self.agencyID |
def getURL(self): | def getURL(self): |
""" disclog URL""" | """ disclog URL""" |
if self.disclogURL is None: | if self.disclogURL is None: |
agency = scrape.agencydb.get(self.getAgencyID()) | agency = scrape.agencydb.get(self.getAgencyID()) |
self.disclogURL = agency['FOIDocumentsURL'] | self.disclogURL = agency['FOIDocumentsURL'] |
return self.disclogURL | return self.disclogURL |
@abc.abstractmethod | @abc.abstractmethod |
def doScrape(self): | def doScrape(self): |
""" do the scraping """ | """ do the scraping """ |
return | return |
class GenericHTMLDisclogScraper(GenericDisclogScraper): | class GenericHTMLDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, rcontent) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) | self.getURL(), "foidocuments", self.getAgencyID()) |
content = rcontent | content = rcontent |
dochash = scrape.mkhash(content) | dochash = scrape.mkhash(content) |
doc = foidocsdb.get(dochash) | doc = foidocsdb.get(dochash) |
if doc is None: | if doc is None: |
print "saving " + dochash | print "saving " + dochash |
description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" | description = "This log may have updated but as it was not in a table last time we viewed it, we cannot extract what has changed. Please refer to the agency's website Disclosure Log to see the most recent entries" |
last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) | last_attach = scrape.getLastAttachment(scrape.docsdb, self.getURL()) |
if last_attach != None: | if last_attach != None: |
html_diff = difflib.HtmlDiff() | html_diff = difflib.HtmlDiff() |
diff = html_diff.make_table(last_attach.read().split('\n'), | diff = html_diff.make_table(last_attach.read().split('\n'), |
content.split('\n')) | content.split('\n')) |
edate = date.today().strftime("%Y-%m-%d") | edate = date.today().strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() | doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
, 'url': self.getURL(), 'docID': dochash, | , 'url': self.getURL(), 'docID': dochash, |
"date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description), "diff": diff} | "date": edate, "title": "Disclosure Log Updated", |
"description": self.remove_control_chars(description), "diff": self.remove_control_chars(diff)} | |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
class GenericPDFDisclogScraper(GenericDisclogScraper): | class GenericPDFDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) | self.getURL(), "foidocuments", self.getAgencyID()) |
laparams = LAParams() | laparams = LAParams() |
rsrcmgr = PDFResourceManager(caching=True) | rsrcmgr = PDFResourceManager(caching=True) |
outfp = StringIO() | outfp = StringIO() |
device = TextConverter(rsrcmgr, outfp, codec='utf-8', | device = TextConverter(rsrcmgr, outfp, codec='utf-8', |
laparams=laparams) | laparams=laparams) |
fp = StringIO() | fp = StringIO() |
fp.write(content) | fp.write(content) |
process_pdf(rsrcmgr, device, fp, set(), caching=True, | process_pdf(rsrcmgr, device, fp, set(), caching=True, |
check_extractable=True) | check_extractable=True) |
description = outfp.getvalue() | description = outfp.getvalue() |
fp.close() | fp.close() |
device.close() | device.close() |
outfp.close() | outfp.close() |
dochash = scrape.mkhash(description) | dochash = scrape.mkhash(description) |
doc = foidocsdb.get(dochash) | doc = foidocsdb.get(dochash) |
if doc is None: | if doc is None: |
print "saving " + dochash | print "saving " + dochash |
edate = date.today().strftime("%Y-%m-%d") | edate = date.today().strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() | doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
, 'url': self.getURL(), 'docID': dochash, | , 'url': self.getURL(), 'docID': dochash, |
"date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description)} | "date": edate, "title": "Disclosure Log Updated", "description": self.remove_control_chars(description)} |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
class GenericDOCXDisclogScraper(GenericDisclogScraper): | class GenericDOCXDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb | (url, mime_type, content) = scrape.fetchURL(scrape.docsdb |
, self.getURL(), "foidocuments", self.getAgencyID()) | , self.getURL(), "foidocuments", self.getAgencyID()) |
mydoc = zipfile.ZipFile(file) | mydoc = zipfile.ZipFile(file) |
xmlcontent = mydoc.read('word/document.xml') | xmlcontent = mydoc.read('word/document.xml') |
document = etree.fromstring(xmlcontent) | document = etree.fromstring(xmlcontent) |
## Fetch all the text out of the document we just created | ## Fetch all the text out of the document we just created |
paratextlist = getdocumenttext(document) | paratextlist = getdocumenttext(document) |
# Make explicit unicode version | # Make explicit unicode version |
newparatextlist = [] | newparatextlist = [] |
for paratext in paratextlist: | for paratext in paratextlist: |
newparatextlist.append(paratext.encode("utf-8")) | newparatextlist.append(paratext.encode("utf-8")) |
## Print our documnts test with two newlines under each paragraph | ## Print our documnts test with two newlines under each paragraph |
description = '\n\n'.join(newparatextlist).strip(' \t\n\r') | description = '\n\n'.join(newparatextlist).strip(' \t\n\r') |
dochash = scrape.mkhash(description) | dochash = scrape.mkhash(description) |
doc = foidocsdb.get(dochash) | doc = foidocsdb.get(dochash) |
if doc is None: | if doc is None: |
print "saving " + dochash | print "saving " + dochash |
edate = time().strftime("%Y-%m-%d") | edate = time().strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() | doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
, 'url': self.getURL(), 'docID': dochash, | , 'url': self.getURL(), 'docID': dochash, |
"date": edate, "title": "Disclosure Log Updated", "description": description} | "date": edate, "title": "Disclosure Log Updated", "description": description} |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
class GenericRSSDisclogScraper(GenericDisclogScraper): | class GenericRSSDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) | self.getURL(), "foidocuments", self.getAgencyID()) |
feed = feedparser.parse(content) | feed = feedparser.parse(content) |
for entry in feed.entries: | for entry in feed.entries: |
#print entry | #print entry |
print entry.id | print entry.id |
dochash = scrape.mkhash(entry.id) | dochash = scrape.mkhash(entry.id) |
doc = foidocsdb.get(dochash) | doc = foidocsdb.get(dochash) |
#print doc | #print doc |
if doc is None: | if doc is None: |
print "saving " + dochash | print "saving " + dochash |
edate = datetime.fromtimestamp( | edate = datetime.fromtimestamp( |
mktime(entry.published_parsed)).strftime("%Y-%m-%d") | mktime(entry.published_parsed)).strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID(), | doc = {'_id': dochash, 'agencyID': self.getAgencyID(), |
'url': entry.link, 'docID': entry.id, | 'url': entry.link, 'docID': entry.id, |
"date": edate, "title": entry.title} | "date": edate, "title": entry.title} |
self.getDescription(entry, entry, doc) | self.getDescription(entry, entry, doc) |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
doc.update({'description': content.summary}) | doc.update({'description': content.summary}) |
return | return |
class GenericOAICDisclogScraper(GenericDisclogScraper): | class GenericOAICDisclogScraper(GenericDisclogScraper): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
@abc.abstractmethod | @abc.abstractmethod |
def getColumns(self, columns): | def getColumns(self, columns): |
""" rearranges columns if required """ | """ rearranges columns if required """ |
return | return |
def getColumnCount(self): | def getColumnCount(self): |
return 5 | return 5 |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
descriptiontxt = "" | descriptiontxt = "" |
for string in content.stripped_strings: | for string in content.stripped_strings: |
descriptiontxt = descriptiontxt + " \n" + string | descriptiontxt = descriptiontxt + " \n" + string |
doc.update({'description': descriptiontxt}) | doc.update({'description': descriptiontxt}) |
def getTitle(self, content, entry, doc): | def getTitle(self, content, entry, doc): |
doc.update({'title': (''.join(content.stripped_strings))}) | doc.update({'title': (''.join(content.stripped_strings))}) |
def getTable(self, soup): | def getTable(self, soup): |
return soup.table | return soup.table |
def getRows(self, table): | def getRows(self, table): |
return table.find_all('tr') | return table.find_all('tr') |
def getDate(self, content, entry, doc): | def getDate(self, content, entry, doc): |
date = ''.join(content.stripped_strings).strip() | strdate = ''.join(content.stripped_strings).strip() |
(a, b, c) = date.partition("(") | (a, b, c) = strdate.partition("(") |
date = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")) | strdate = self.remove_control_chars(a.replace("Octber", "October").replace("1012","2012")replace("Janrurary", "January").replace("1012","2012")) |
print date | print strdate |
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") | try: |
edate = parse(strdate, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") | |
except ValueError: | |
print >> sys.stderr, "ERROR date invalid %s " % strdate | |
print >> sys.stderr, "ERROR date originally %s " % ''.join(content.stripped_strings).strip() | |
edate = date.today().strftime("%Y-%m-%d") | |
print edate | print edate |
doc.update({'date': edate}) | doc.update({'date': edate}) |
return | return |
def getLinks(self, content, entry, doc): | def getLinks(self, content, entry, doc): |
links = [] | links = [] |
for atag in entry.find_all("a"): | for atag in entry.find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(content, atag['href'])) | links.append(scrape.fullurl(content, atag['href'])) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
return | return |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) | self.getURL(), "foidocuments", self.getAgencyID()) |
if content is not None: | if content is not None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
print "parsing" | print "parsing" |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
table = self.getTable(soup) | table = self.getTable(soup) |
for row in self.getRows(table): | for row in self.getRows(table): |
columns = row.find_all('td') | columns = row.find_all('td') |
if len(columns) is self.getColumnCount(): | if len(columns) is self.getColumnCount(): |
(id, date, title, | (id, date, title, |
description, notes) = self.getColumns(columns) | description, notes) = self.getColumns(columns) |
print self.remove_control_chars( | print self.remove_control_chars( |
''.join(id.stripped_strings)) | ''.join(id.stripped_strings)) |
if id.string is None: | if id.string is None: |
dochash = scrape.mkhash( | dochash = scrape.mkhash( |
self.remove_control_chars( | self.remove_control_chars( |
url + (''.join(date.stripped_strings)))) | url + (''.join(date.stripped_strings)))) |
else: | else: |
dochash = scrape.mkhash( | dochash = scrape.mkhash( |
self.remove_control_chars( | self.remove_control_chars( |
url + (''.join(id.stripped_strings)))) | url + (''.join(id.stripped_strings)))) |
doc = foidocsdb.get(dochash) | doc = foidocsdb.get(dochash) |
if doc is None: | if doc is None: |
print "saving " + dochash | print "saving " + dochash |
doc = {'_id': dochash, | doc = {'_id': dochash, |
'agencyID': self.getAgencyID(), | 'agencyID': self.getAgencyID(), |
'url': self.getURL(), | 'url': self.getURL(), |
'docID': (''.join(id.stripped_strings))} | 'docID': (''.join(id.stripped_strings))} |
self.getLinks(self.getURL(), row, doc) | self.getLinks(self.getURL(), row, doc) |
self.getTitle(title, row, doc) | self.getTitle(title, row, doc) |
self.getDate(date, row, doc) | self.getDate(date, row, doc) |
self.getDescription(description, row, doc) | self.getDescription(description, row, doc) |
if notes is not None: | if notes is not None: |
doc.update({'notes': ( | doc.update({'notes': ( |
''.join(notes.stripped_strings))}) | ''.join(notes.stripped_strings))}) |
badtitles = ['-', 'Summary of FOI Request' | badtitles = ['-', 'Summary of FOI Request' |
, 'FOI request(in summary form)' | , 'FOI request(in summary form)' |
, 'Summary of FOI request received by the ASC', | , 'Summary of FOI request received by the ASC', |
'Summary of FOI request received by agency/minister', | 'Summary of FOI request received by agency/minister', |
'Description of Documents Requested', 'FOI request', | 'Description of Documents Requested', 'FOI request', |
'Description of FOI Request', 'Summary of request', 'Description', 'Summary', | 'Description of FOI Request', 'Summary of request', 'Description', 'Summary', |
'Summary of FOIrequest received by agency/minister', | 'Summary of FOIrequest received by agency/minister', |
'Summary of FOI request received', 'Description of FOI Request', | 'Summary of FOI request received', 'Description of FOI Request', |
"FOI request", 'Results 1 to 67 of 67'] | "FOI request", 'Results 1 to 67 of 67'] |
if doc['title'] not in badtitles\ | if doc['title'] not in badtitles and 'description' in doc.keys() and doc['description'] != '': |
and doc['description'] != '': | |
print "saving" | print "saving" |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved " + dochash | print "already saved " + dochash |
elif len(row.find_all('th')) is self.getColumnCount(): | elif len(row.find_all('th')) is self.getColumnCount(): |
print "header row" | print "header row" |
else: | else: |
print "ERROR number of columns incorrect" | print >> sys.stderr, "ERROR number of columns incorrect" |
print row | print row |
for f in scrapers/*.py; | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" |
do echo "Processing $f file.."; | cd $DIR |
python $f; | echo "" > /tmp/disclosr-error |
for f in scrapers/*.py; do | |
echo "Processing $f file.."; | |
md5=`md5sum /tmp/disclosr-error` | |
python $f 3>&1 1>&2 2>&3 | tee --append /tmp/disclosr-error; | |
md52=`md5sum /tmp/disclosr-error` | |
if [ "$md5" != "$md52" ]; then | |
echo "^^^^^^^^^^^^^^ $f" >> /tmp/disclosr-error; | |
fi | |
if [ "$?" -ne "0" ]; then | if [ "$?" -ne "0" ]; then |
echo "error"; | echo "error"; |
sleep 2; | sleep 1; |
fi | fi |
done | done |
if [ -s /tmp/disclosr-error ] ; then | |
echo "emailling logs.."; | |
mail -E -s "Disclosr errors" maxious@lambdacomplex.org < /tmp/disclosr-error ; | |
fi | |
#http://packages.python.org/CouchDB/client.html | #http://packages.python.org/CouchDB/client.html |
import couchdb | import couchdb |
import urllib2 | import urllib2 |
from BeautifulSoup import BeautifulSoup | from BeautifulSoup import BeautifulSoup |
import re | import re |
import hashlib | import hashlib |
from urlparse import urljoin | from urlparse import urljoin |
import time | import time |
import os | import os |
import sys | |
import mimetypes | import mimetypes |
import urllib | import urllib |
import urlparse | import urlparse |
import socket | import socket |
#couch = couchdb.Server('http://192.168.1.148:5984/') | #couch = couchdb.Server('http://192.168.1.148:5984/') |
#couch = couchdb.Server('http://192.168.1.113:5984/') | #couch = couchdb.Server('http://192.168.1.113:5984/') |
couch = couchdb.Server('http://127.0.0.1:5984/') | couch = couchdb.Server('http://127.0.0.1:5984/') |
def mkhash(input): | def mkhash(input): |
return hashlib.md5(input).hexdigest().encode("utf-8") | return hashlib.md5(input).hexdigest().encode("utf-8") |
def canonurl(url): | def canonurl(url): |
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' | r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' |
if the URL looks invalid. | if the URL looks invalid. |
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws | >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws |
'http://xn--hgi.ws/' | 'http://xn--hgi.ws/' |
""" | """ |
# strip spaces at the ends and ensure it's prefixed with 'scheme://' | # strip spaces at the ends and ensure it's prefixed with 'scheme://' |
url = url.strip() | url = url.strip() |
if not url: | if not url: |
return '' | return '' |
if not urlparse.urlsplit(url).scheme: | if not urlparse.urlsplit(url).scheme: |
url = 'http://' + url | url = 'http://' + url |
# turn it into Unicode | # turn it into Unicode |
#try: | #try: |
# url = unicode(url, 'utf-8') | # url = unicode(url, 'utf-8') |
#except UnicodeDecodeError: | #except UnicodeDecodeError: |
# return '' # bad UTF-8 chars in URL | # return '' # bad UTF-8 chars in URL |
# parse the URL into its components | # parse the URL into its components |
parsed = urlparse.urlsplit(url) | parsed = urlparse.urlsplit(url) |
scheme, netloc, path, query, fragment = parsed | scheme, netloc, path, query, fragment = parsed |
# ensure scheme is a letter followed by letters, digits, and '+-.' chars | # ensure scheme is a letter followed by letters, digits, and '+-.' chars |
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): | if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): |
return '' | return '' |
scheme = str(scheme) | scheme = str(scheme) |
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] | # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] |
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) | match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) |
if not match: | if not match: |
return '' | return '' |
domain, port = match.groups() | domain, port = match.groups() |
netloc = domain + (port if port else '') | netloc = domain + (port if port else '') |
netloc = netloc.encode('idna') | netloc = netloc.encode('idna') |
# ensure path is valid and convert Unicode chars to %-encoded | # ensure path is valid and convert Unicode chars to %-encoded |
if not path: | if not path: |
path = '/' # eg: 'http://google.com' -> 'http://google.com/' | path = '/' # eg: 'http://google.com' -> 'http://google.com/' |
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') | path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') |
# ensure query is valid | # ensure query is valid |
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') | query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') |
# ensure fragment is valid | # ensure fragment is valid |
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) | fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) |
# piece it all back together, truncating it to a maximum of 4KB | # piece it all back together, truncating it to a maximum of 4KB |
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) | url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) |
return url[:4096] | return url[:4096] |
def fullurl(url, href): | def fullurl(url, href): |
href = href.replace(" ", "%20") | href = href.replace(" ", "%20") |
href = re.sub('#.*$', '', href) | href = re.sub('#.*$', '', href) |
return urljoin(url, href) | return urljoin(url, href) |
#http://diveintopython.org/http_web_services/etags.html | #http://diveintopython.org/http_web_services/etags.html |
class NotModifiedHandler(urllib2.BaseHandler): | class NotModifiedHandler(urllib2.BaseHandler): |
def http_error_304(self, req, fp, code, message, headers): | def http_error_304(self, req, fp, code, message, headers): |
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) | addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
addinfourl.code = code | addinfourl.code = code |
return addinfourl | return addinfourl |
def getLastAttachment(docsdb, url): | def getLastAttachment(docsdb, url): |
hash = mkhash(url) | hash = mkhash(url) |
doc = docsdb.get(hash) | doc = docsdb.get(hash) |
if doc != None: | if doc != None and "_attachments" in doc.keys(): |
last_attachment_fname = doc["_attachments"].keys()[-1] | last_attachment_fname = doc["_attachments"].keys()[-1] |
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) | last_attachment = docsdb.get_attachment(doc, last_attachment_fname) |
return last_attachment | return last_attachment |
else: | else: |
return None | return None |
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): | def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): |
url = canonurl(url) | url = canonurl(url) |
hash = mkhash(url) | hash = mkhash(url) |
req = urllib2.Request(url) | req = urllib2.Request(url) |
print "Fetching %s (%s)" % (url, hash) | print "Fetching %s (%s)" % (url, hash) |
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": | if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": |
print "Not a valid HTTP url" | print >> sys.stderr, "Not a valid HTTP url" |
return (None, None, None) | return (None, None, None) |
doc = docsdb.get(hash) | doc = docsdb.get(hash) |
if doc == None: | if doc == None: |
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'} | doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName': fieldName, 'type': 'website'} |
else: | else: |
if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)): | if (('page_scraped' in doc) and ((time.time() - doc['page_scraped']) < 60 * 24 * 14) or (scrape_again == False)): |
print "Uh oh, trying to scrape URL again too soon!" + hash | print "Uh oh, trying to scrape URL again too soon!" + hash |
last_attachment_fname = doc["_attachments"].keys()[-1] | if "_attachments" in doc.keys(): |
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) | last_attachment_fname = doc["_attachments"].keys()[-1] |
content = last_attachment | last_attachment = docsdb.get_attachment(doc, last_attachment_fname) |
return (doc['url'], doc['mime_type'], content.read()) | content = last_attachment.read() |
mime_type = doc['mime_type'] | |
else: | |
content = None | |
mime_type = None | |
return (doc['url'], mime_type, content) | |
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") | req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") |
#if there is a previous version stored in couchdb, load caching helper tags | #if there is a previous version stored in couchdb, load caching helper tags |
if doc.has_key('etag'): | if doc.has_key('etag'): |
req.add_header("If-None-Match", doc['etag']) | req.add_header("If-None-Match", doc['etag']) |
if doc.has_key('last_modified'): | if doc.has_key('last_modified'): |
req.add_header("If-Modified-Since", doc['last_modified']) | req.add_header("If-Modified-Since", doc['last_modified']) |
opener = urllib2.build_opener(NotModifiedHandler()) | opener = urllib2.build_opener(NotModifiedHandler()) |
try: | try: |
url_handle = opener.open(req, None, 20) | url_handle = opener.open(req, None, 20) |
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url | doc['url'] = url_handle.geturl() # may have followed a redirect to a new url |
headers = url_handle.info() # the addinfourls have the .info() too | headers = url_handle.info() # the addinfourls have the .info() too |
doc['etag'] = headers.getheader("ETag") | doc['etag'] = headers.getheader("ETag") |
doc['last_modified'] = headers.getheader("Last-Modified") | doc['last_modified'] = headers.getheader("Last-Modified") |
doc['date'] = headers.getheader("Date") | doc['date'] = headers.getheader("Date") |
doc['page_scraped'] = time.time() | doc['page_scraped'] = time.time() |
doc['web_server'] = headers.getheader("Server") | doc['web_server'] = headers.getheader("Server") |
doc['via'] = headers.getheader("Via") | doc['via'] = headers.getheader("Via") |
doc['powered_by'] = headers.getheader("X-Powered-By") | doc['powered_by'] = headers.getheader("X-Powered-By") |
doc['file_size'] = headers.getheader("Content-Length") | doc['file_size'] = headers.getheader("Content-Length") |
content_type = headers.getheader("Content-Type") | content_type = headers.getheader("Content-Type") |
if content_type != None: | if content_type != None: |
doc['mime_type'] = content_type.split(";")[0] | doc['mime_type'] = content_type.split(";")[0] |
else: | else: |
(type, encoding) = mimetypes.guess_type(url) | (type, encoding) = mimetypes.guess_type(url) |
doc['mime_type'] = type | doc['mime_type'] = type |
if hasattr(url_handle, 'code'): | if hasattr(url_handle, 'code'): |
if url_handle.code == 304: | if url_handle.code == 304: |
print "the web page has not been modified" + hash | print "the web page has not been modified" + hash |
last_attachment_fname = doc["_attachments"].keys()[-1] | last_attachment_fname = doc["_attachments"].keys()[-1] |
last_attachment = docsdb.get_attachment(doc, last_attachment_fname) | last_attachment = docsdb.get_attachment(doc, last_attachment_fname) |
content = last_attachment | content = last_attachment |
return (doc['url'], doc['mime_type'], content.read()) | return (doc['url'], doc['mime_type'], content.read()) |
else: | else: |
print "new webpage loaded" | print "new webpage loaded" |
content = url_handle.read() | content = url_handle.read() |
docsdb.save(doc) | docsdb.save(doc) |
doc = docsdb.get(hash) # need to get a _rev | doc = docsdb.get(hash) # need to get a _rev |
docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type']) | docsdb.put_attachment(doc, content, str(time.time()) + "-" + os.path.basename(url), doc['mime_type']) |
return (doc['url'], doc['mime_type'], content) | return (doc['url'], doc['mime_type'], content) |
#store as attachment epoch-filename | #store as attachment epoch-filename |
except (urllib2.URLError, socket.timeout) as e: | except (urllib2.URLError, socket.timeout) as e: |
print "error!" | print >> sys.stderr,"error!" |
error = "" | error = "" |
if hasattr(e, 'reason'): | if hasattr(e, 'reason'): |
error = "error %s in downloading %s" % (str(e.reason), url) | error = "error %s in downloading %s" % (str(e.reason), url) |
elif hasattr(e, 'code'): | elif hasattr(e, 'code'): |
error = "error %s in downloading %s" % (e.code, url) | error = "error %s in downloading %s" % (e.code, url) |
print error | print >> sys.stderr, error |
doc['error'] = error | doc['error'] = error |
docsdb.save(doc) | docsdb.save(doc) |
return (None, None, None) | return (None, None, None) |
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): | def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): |
(url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID) | (url, mime_type, content) = fetchURL(docsdb, url, fieldName, agencyID) |
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] | badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] |
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": | if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type == "application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
navIDs = soup.findAll( | navIDs = soup.findAll( |
id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) | id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) |
for nav in navIDs: | for nav in navIDs: |
print "Removing element", nav['id'] | print "Removing element", nav['id'] |
nav.extract() | nav.extract() |
navClasses = soup.findAll( | navClasses = soup.findAll( |
attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) | attrs={'class': re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) |
for nav in navClasses: | for nav in navClasses: |
print "Removing element", nav['class'] | print "Removing element", nav['class'] |
nav.extract() | nav.extract() |
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) | links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) |
linkurls = set([]) | linkurls = set([]) |
for link in links: | for link in links: |
if link.has_key("href"): | if link.has_key("href"): |
if link['href'].startswith("http"): | if link['href'].startswith("http"): |
# lets not do external links for now | # lets not do external links for now |
# linkurls.add(link['href']) | # linkurls.add(link['href']) |
None | None |
if link['href'].startswith("mailto"): | if link['href'].startswith("mailto"): |
# not http | # not http |
None | None |
if link['href'].startswith("javascript"): | if link['href'].startswith("javascript"): |
# not http | # not http |
None | None |
else: | else: |
# remove anchors and spaces in urls | # remove anchors and spaces in urls |
linkurls.add(fullurl(url, link['href'])) | linkurls.add(fullurl(url, link['href'])) |
for linkurl in linkurls: | for linkurl in linkurls: |
#print linkurl | #print linkurl |
scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID) | scrapeAndStore(docsdb, linkurl, depth - 1, fieldName, agencyID) |
# select database | # select database |
agencydb = couch['disclosr-agencies'] | agencydb = couch['disclosr-agencies'] |
docsdb = couch['disclosr-documents'] | docsdb = couch['disclosr-documents'] |
if __name__ == "__main__": | if __name__ == "__main__": |
for row in agencydb.view('app/all'): #not recently scraped agencies view? | for row in agencydb.view('app/all'): #not recently scraped agencies view? |
agency = agencydb.get(row.id) | agency = agencydb.get(row.id) |
print agency['name'] | print agency['name'] |
for key in agency.keys(): | for key in agency.keys(): |
if key == "FOIDocumentsURL" and "status" not in agency.keys() and False: | if key == "FOIDocumentsURL" and "status" not in agency.keys() and False: |
scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) | scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) |
if key == 'website' and True: | if key == 'website' and True: |
scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) | scrapeAndStore(docsdb, agency[key], 0, key, agency['_id']) |
if "metadata" not in agency.keys(): | if "metadata" not in agency.keys(): |
agency['metadata'] = {} | agency['metadata'] = {} |
agency['metadata']['lastScraped'] = time.time() | agency['metadata']['lastScraped'] = time.time() |
if key.endswith('URL') and False: | if key.endswith('URL') and False: |
print key | print key |
depth = 1 | depth = 1 |
if 'scrapeDepth' in agency.keys(): | if 'scrapeDepth' in agency.keys(): |
depth = agency['scrapeDepth'] | depth = agency['scrapeDepth'] |
scrapeAndStore(docsdb, agency[key], depth, key, agency['_id']) | scrapeAndStore(docsdb, agency[key], depth, key, agency['_id']) |
agencydb.save(agency) | agencydb.save(agency) |
import sys | import sys |
import os | import os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import traceback | import traceback |
try: | try: |
import amonpy | import amonpy |
amonpy.config.address = 'http://amon_instance:port' | amonpy.config.address = 'http://amon_instance:port' |
amonpy.config.secret_key = 'the secret key from /etc/amon.conf' | amonpy.config.secret_key = 'the secret key from /etc/amon.conf' |
amon_available = True | amon_available = True |
except ImportError: | except ImportError: |
amon_available = False | amon_available = False |
class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): | class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): |
def __init__(self): | def __init__(self): |
super(ScraperImplementation, self).__init__() | super(ScraperImplementation, self).__init__() |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ScraperImplementation, | print 'Subclass:', issubclass(ScraperImplementation, |
genericScrapers.GenericPDFDisclogScraper) | genericScrapers.GenericPDFDisclogScraper) |
print 'Instance:', isinstance(ScraperImplementation(), | print 'Instance:', isinstance(ScraperImplementation(), |
genericScrapers.GenericPDFDisclogScraper) | genericScrapers.GenericPDFDisclogScraper) |
try: | try: |
ScraperImplementation().doScrape() | ScraperImplementation().doScrape() |
except Exception, err: | except Exception, err: |
sys.stderr.write('ERROR: %s\n' % str(err)) | sys.stderr.write('ERROR: %s\n' % str(err)) |
print "Error Reason: ", err.__doc__ | print "Error Reason: ", err.__doc__ |
print "Exception: ", err.__class__ | print "Exception: ", err.__class__ |
print traceback.format_exc() | print traceback.format_exc() |
if amon_available: | if amon_available: |
data = { | data = { |
'exception_class': '', | 'exception_class': '', |
'url': '', | 'url': '', |
'backtrace': ['exception line ', 'another exception line'], | 'backtrace': ['exception line ', 'another exception line'], |
'enviroment': '', | 'enviroment': '', |
# In 'data' you can add request information, session variables - it's a recursive | # In 'data' you can add request information, session variables - it's a recursive |
# dictionary, so you can literally add everything important for your specific case | # dictionary, so you can literally add everything important for your specific case |
# The dictionary doesn't have a specified structure, the keys below are only example | # The dictionary doesn't have a specified structure, the keys below are only example |
'data': {'request': '', 'session': '', 'more': ''} | 'data': {'request': '', 'session': '', 'more': ''} |
} | } |
#amonpy.exception(data) | |
amonpy.exception(data) | |
pass | pass |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
#http://www.doughellmann.com/PyMOTW/abc/ | #http://www.doughellmann.com/PyMOTW/abc/ |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getDescription(self,content, entry,doc): | def getDescription(self,content, entry,doc): |
link = None | link = None |
links = [] | links = [] |
description = "" | description = "" |
for atag in entry.find_all('a'): | for atag in entry.find_all('a'): |
if atag.has_key('href'): | if atag.has_key('href'): |
link = scrape.fullurl(self.getURL(),atag['href']) | link = scrape.fullurl(self.getURL(),atag['href']) |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) | (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) |
if htcontent != None: | if htcontent != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(htcontent) | soup = BeautifulSoup(htcontent) |
for row in soup.find(class_ = "ms-rteTable-GreyAlternating").find_all('tr'): | rowtitle = soup.find(class_ = "wc-title").find("h1").string |
if rowtitle != None: | |
description = rowtitle + ": " | |
for row in soup.find(class_ ="wc-content").find_all('td'): | |
if row != None: | if row != None: |
rowtitle = row.find('th').string | for text in row.stripped_strings: |
if rowtitle != None: | description = description + text + "\n" |
description = description + "\n" + rowtitle + ": " | |
for text in row.find('td').stripped_strings: | |
description = description + text | |
for atag in row.find_all("a"): | for atag in row.find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(link,atag['href'])) | links.append(scrape.fullurl(link,atag['href'])) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
if description != "": | if description != "": |
doc.update({ 'description': description}) | doc.update({ 'description': description}) |
def getColumnCount(self): | def getColumnCount(self): |
return 2 | return 2 |
def getTable(self,soup): | def getTable(self,soup): |
return soup.find(class_ = "ms-rteTable-GreyAlternating") | return soup.find(class_ = "ms-rteTable-default") |
def getColumns(self,columns): | def getColumns(self,columns): |
(date, title) = columns | (date, title) = columns |
return (title, date, title, title, None) | return (title, date, title, title, None) |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
ScraperImplementation().doScrape() | ScraperImplementation().doScrape() |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
#http://www.doughellmann.com/PyMOTW/abc/ | #http://www.doughellmann.com/PyMOTW/abc/ |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getTable(self,soup): | def getTable(self,soup): |
return soup.find(id = "inner_content") | return soup.find(class_="tborder") |
def getColumnCount(self): | def getColumnCount(self): |
return 2 | return 2 |
def getColumns(self,columns): | def getColumns(self,columns): |
(date, title) = columns | (date, title) = columns |
return (date, date, title, title, None) | return (date, date, title, title, None) |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
ScraperImplementation().doScrape() | ScraperImplementation().doScrape() |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
import dateutil | import dateutil |
from dateutil.parser import * | from dateutil.parser import * |
from datetime import * | from datetime import * |
#http://www.doughellmann.com/PyMOTW/abc/ | #http://www.doughellmann.com/PyMOTW/abc/ |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class ScraperImplementation(genericScrapers.GenericHTMLDisclogScraper): |
def getTable(self,soup): | |
return soup.find(class_ = "inner-column").table | |
def getRows(self,table): | |
return table.tbody.find_all('tr',recursive=False) | |
def getColumnCount(self): | def getColumnCount(self): |
return 3 | return 0 |
def getColumns(self,columns): | |
(date, title, description) = columns | |
return (date, date, title, description, None) | |
def getDate(self, content, entry, doc): | |
i = 0 | |
date = "" | |
for string in content.stripped_strings: | |
if i ==1: | |
date = string | |
i = i+1 | |
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") | |
print edate | |
doc.update({'date': edate}) | |
return | |
def getTitle(self, content, entry, doc): | |
i = 0 | |
title = "" | |
for string in content.stripped_strings: | |
if i < 2: | |
title = title + string | |
i = i+1 | |
doc.update({'title': title}) | |
#print title | |
return | |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | #http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2012-13.aspx |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | #http://www.csiro.au/Portals/About-CSIRO/How-we-work/Governance/FOI-Request-Disclosure-Log-2011-12.aspx |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericHTMLDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericHTMLDisclogScraper) | |
ScraperImplementation().doScrape() | ScraperImplementation().doScrape() |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
#http://www.doughellmann.com/PyMOTW/abc/ | #http://www.doughellmann.com/PyMOTW/abc/ |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getTable(self,soup): | |
return soup.find(id = "content_div_50269").table | |
def getColumns(self,columns): | def getColumns(self,columns): |
(id, date, title, description, notes) = columns | (id, date, title, description, notes) = columns |
return (id, date, title, description, notes) | return (id, date, title, description, notes) |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
ScraperImplementation().doScrape() | ScraperImplementation().doScrape() |
import sys | import sys |
import os | import os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from datetime import date | from datetime import date |
from pyquery import PyQuery as pq | from pyquery import PyQuery as pq |
from lxml import etree | from lxml import etree |
import urllib | import urllib |
import dateutil | import dateutil |
from dateutil.parser import * | from dateutil.parser import * |
class ACMADisclogScraper(genericScrapers.GenericDisclogScraper): | class ACMADisclogScraper(genericScrapers.GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) | self.getURL(), "foidocuments", self.getAgencyID()) |
d = pq(content) | d = pq(content) |
d.make_links_absolute(base_url = self.getURL()) | d.make_links_absolute(base_url = self.getURL()) |
for table in d('table').items(): | for table in d('table').items(): |
title= table('thead').text() | title= table('thead').text() |
print title | print self.remove_control_chars(title) |
(idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text()) | (idate,descA,descB,link,deldate,notes) = table('tbody tr').map(lambda i, e: pq(e).children().eq(1).text()) |
links = table('a').map(lambda i, e: pq(e).attr('href')) | links = table('a').map(lambda i, e: pq(e).attr('href')) |
description = descA+" "+descB | description = descA+" "+descB |
edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") | try: |
edate = parse(idate[:12], dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") | |
except ValueError: | |
edate = date.today().strftime("%Y-%m-%d") | |
pass | |
print edate | print edate |
dochash = scrape.mkhash(self.remove_control_chars(title)) | dochash = scrape.mkhash(self.remove_control_chars(title)) |
doc = foidocsdb.get(dochash) | doc = foidocsdb.get(dochash) |
if doc is None: | if doc is None: |
print "saving " + dochash | print "saving " + dochash |
edate = date.today().strftime("%Y-%m-%d") | edate = date.today().strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() | doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
, 'url': self.getURL(), 'docID': dochash, | , 'url': self.getURL(), 'docID': dochash, |
"links": links, | "links": links, |
"date": edate, "notes": notes, "title": title, "description": description} | "date": edate, "notes": notes, "title": title, "description": description} |
#print doc | #print doc |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ACMADisclogScraper, | print 'Subclass:', issubclass(ACMADisclogScraper, |
genericScrapers.GenericDisclogScraper) | genericScrapers.GenericDisclogScraper) |
print 'Instance:', isinstance(ACMADisclogScraper(), | print 'Instance:', isinstance(ACMADisclogScraper(), |
genericScrapers.GenericDisclogScraper) | genericScrapers.GenericDisclogScraper) |
ACMADisclogScraper().doScrape() | ACMADisclogScraper().doScrape() |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
import codecs | import codecs |
#http://www.doughellmann.com/PyMOTW/abc/ | #http://www.doughellmann.com/PyMOTW/abc/ |
class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class NewScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getDescription(self,content, entry,doc): | def getDescription(self,content, entry,doc): |
link = None | link = None |
links = [] | links = [] |
description = "" | description = "" |
for atag in entry.find_all('a'): | for atag in entry.find_all('a'): |
if atag.has_key('href'): | if atag.has_key('href'): |
link = scrape.fullurl(self.getURL(),atag['href']) | link = scrape.fullurl(self.getURL(),atag['href']) |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) | (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) |
if htcontent != None: | if htcontent != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(htcontent) | soup = BeautifulSoup(htcontent) |
for text in soup.find(id="divFullWidthColumn").stripped_strings: | for text in soup.find(class_ = "mainContent").stripped_strings: |
description = description + text.encode('ascii', 'ignore') | description = description + text.encode('ascii', 'ignore') |
for atag in soup.find(id="divFullWidthColumn").find_all("a"): | for atag in soup.find(id="SortingTable").find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(link,atag['href'])) | links.append(scrape.fullurl(link,atag['href'])) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
if description != "": | if description != "": |
doc.update({ 'description': description}) | doc.update({ 'description': description}) |
def getColumnCount(self): | def getColumnCount(self): |
return 2 | return 2 |
def getTable(self,soup): | def getTable(self,soup): |
return soup.find(id = "TwoColumnSorting") | return soup.find(id = "TwoColumnSorting") |
def getColumns(self,columns): | def getColumns(self,columns): |
( title, date) = columns | ( title, date) = columns |
return (title, date, title, title, None) | return (title, date, title, title, None) |
class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class OldScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getDescription(self,content, entry,doc): | def getDescription(self,content, entry,doc): |
link = None | link = None |
links = [] | links = [] |
description = "" | description = "" |
for atag in entry.find_all('a'): | for atag in entry.find_all('a'): |
if atag.has_key('href'): | if atag.has_key('href'): |
link = scrape.fullurl(self.getURL(),atag['href']) | link = scrape.fullurl(self.getURL(),atag['href']) |
(url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) | (url,mime_type,htcontent) = scrape.fetchURL(scrape.docsdb, link, "foidocuments", self.getAgencyID(), False) |
if htcontent != None: | if htcontent != None: |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(htcontent) | soup = BeautifulSoup(htcontent) |
for text in soup.find(id="content-item").stripped_strings: | for text in soup.find(id="content-item").stripped_strings: |
description = description + text + " \n" | description = description + text + " \n" |
for atag in soup.find(id="content-item").find_all("a"): | for atag in soup.find(id="content-item").find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(link,atag['href'])) | links.append(scrape.fullurl(link,atag['href'])) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
if description != "": | if description != "": |
doc.update({ 'description': description}) | doc.update({ 'description': description}) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
if description != "": | if description != "": |
doc.update({ 'description': description}) | doc.update({ 'description': description}) |
def getColumnCount(self): | def getColumnCount(self): |
return 2 | return 2 |
def getTable(self,soup): | def getTable(self,soup): |
return soup.find(class_ = "doc-list") | return soup.find(class_ = "doc-list") |
def getColumns(self,columns): | def getColumns(self,columns): |
(date, title) = columns | (date, title) = columns |
return (title, date, title, title, None) | return (title, date, title, title, None) |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | print 'Subclass:', issubclass(NewScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | print 'Instance:', isinstance(NewScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
NewScraperImplementation().doScrape() | NewScraperImplementation().doScrape() |
print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | print 'Subclass:', issubclass(OldScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | print 'Instance:', isinstance(OldScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
osi = OldScraperImplementation() | osi = OldScraperImplementation() |
osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" | osi.disclogURL = "http://archive.treasury.gov.au/content/foi_publications.asp?year=-1&abstract=0&classification=&=&titl=Disclosure+Log+-+Documents+Released+Under+FOI" |
osi.doScrape() | osi.doScrape() |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
#http://www.doughellmann.com/PyMOTW/abc/ | #http://www.doughellmann.com/PyMOTW/abc/ |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getTable(self,soup): | def getTable(self,soup): |
return soup.find(id="ctl00_ContentPlaceHolderMainNoAjax_EdtrTD1494_2").table | return soup.find(id="int-content").table |
def getColumnCount(self): | def getColumnCount(self): |
return 4 | return 3 |
def getColumns(self,columns): | def getColumns(self,columns): |
(blank,id, title,date) = columns | (id, title,date) = columns |
return (id, date, title, title, None) | return (id, date, title, title, None) |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
ScraperImplementation().doScrape() | ScraperImplementation().doScrape() |
import sys,os | |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | |
import genericScrapers | |
import scrape | |
from bs4 import BeautifulSoup | |
#http://www.doughellmann.com/PyMOTW/abc/ | |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | |
def getColumns(self,columns): | |
(id, date, title, description, notes) = columns | |
return (id, date, title, description, notes) | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | |
ScraperImplementation().doScrape() | |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
#http://www.doughellmann.com/PyMOTW/abc/ | #http://www.doughellmann.com/PyMOTW/abc/ |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getColumns(self,columns): | def getColumns(self,columns): |
(id, date, title, description, notes) = columns | (id, date, title, description, notes) = columns |
return (id, date, title, description, notes) | return (id, date, title, description, notes) |
def getTable(self,soup): | def getTable(self,soup): |
return soup.find(class_ = "content") | return soup.find(class_ = "simpletable") |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
ScraperImplementation().doScrape() | ScraperImplementation().doScrape() |
import sys,os | import sys,os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import genericScrapers | import genericScrapers |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
#http://www.doughellmann.com/PyMOTW/abc/ | #http://www.doughellmann.com/PyMOTW/abc/ |
class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): | class ScraperImplementation(genericScrapers.GenericOAICDisclogScraper): |
def getColumns(self,columns): | def getColumns(self,columns): |
(id, date, title, description, notes) = columns | (id, date, title, description, notes) = columns |
return (id, date, title, description, notes) | return (id, date, title, description, notes) |
def getTable(self,soup): | def getTable(self,soup): |
return soup.find(id = "content").table | return soup.find("table") |
if __name__ == '__main__': | if __name__ == '__main__': |
print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) | print 'Subclass:', issubclass(ScraperImplementation, genericScrapers.GenericOAICDisclogScraper) |
print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) | print 'Instance:', isinstance(ScraperImplementation(), genericScrapers.GenericOAICDisclogScraper) |
ScraperImplementation().doScrape() | ScraperImplementation().doScrape() |