{ | |
"venv": "", | |
"project-type": "Import from sources", | |
"name": "disclosr-documents", | |
"license": "GNU General Public License v3", | |
"description": "" | |
} |
import sys | import sys |
import os | import os |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) |
import scrape | import scrape |
from bs4 import BeautifulSoup | from bs4 import BeautifulSoup |
from time import mktime | from time import mktime |
import feedparser | import feedparser |
import abc | import abc |
import unicodedata | import unicodedata |
import re | import re |
import dateutil | import dateutil |
from dateutil.parser import * | from dateutil.parser import * |
from datetime import * | from datetime import * |
import codecs | import codecs |
from StringIO import StringIO | from StringIO import StringIO |
from docx import * | |
from lxml import etree | |
import zipfile | |
from pdfminer.pdfparser import PDFDocument, PDFParser | from pdfminer.pdfparser import PDFDocument, PDFParser |
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf | from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf |
from pdfminer.pdfdevice import PDFDevice, TagExtractor | from pdfminer.pdfdevice import PDFDevice, TagExtractor |
from pdfminer.converter import TextConverter | from pdfminer.converter import TextConverter |
from pdfminer.cmapdb import CMapDB | from pdfminer.cmapdb import CMapDB |
from pdfminer.layout import LAParams | from pdfminer.layout import LAParams |
class GenericDisclogScraper(object): | class GenericDisclogScraper(object): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
agencyID = None | agencyID = None |
disclogURL = None | disclogURL = None |
def remove_control_chars(self, input): | def remove_control_chars(self, input): |
return "".join([i for i in input if ord(i) in range(32, 127)]) | return "".join([i for i in input if ord(i) in range(32, 127)]) |
def getAgencyID(self): | def getAgencyID(self): |
""" disclosr agency id """ | """ disclosr agency id """ |
if self.agencyID is None: | if self.agencyID is None: |
self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") | self.agencyID = os.path.basename(sys.argv[0]).replace(".py", "") |
return self.agencyID | return self.agencyID |
def getURL(self): | def getURL(self): |
""" disclog URL""" | """ disclog URL""" |
if self.disclogURL is None: | if self.disclogURL is None: |
agency = scrape.agencydb.get(self.getAgencyID()) | agency = scrape.agencydb.get(self.getAgencyID()) |
self.disclogURL = agency['FOIDocumentsURL'] | self.disclogURL = agency['FOIDocumentsURL'] |
return self.disclogURL | return self.disclogURL |
@abc.abstractmethod | @abc.abstractmethod |
def doScrape(self): | def doScrape(self): |
""" do the scraping """ | """ do the scraping """ |
return | return |
class GenericPDFDisclogScraper(GenericDisclogScraper): | class GenericPDFDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) | self.getURL(), "foidocuments", self.getAgencyID()) |
laparams = LAParams() | laparams = LAParams() |
rsrcmgr = PDFResourceManager(caching=True) | rsrcmgr = PDFResourceManager(caching=True) |
outfp = StringIO.StringIO() | outfp = StringIO() |
device = TextConverter(rsrcmgr, outfp, codec='utf-8', | device = TextConverter(rsrcmgr, outfp, codec='utf-8', |
laparams=laparams) | laparams=laparams) |
fp = StringIO.StringIO() | fp = StringIO() |
fp.write(content) | fp.write(content.read()) |
description = output.getvalue() | |
process_pdf(rsrcmgr, device, fp, set(), caching=True, | process_pdf(rsrcmgr, device, fp, set(), caching=True, |
check_extractable=True) | check_extractable=True) |
description = outfp.getvalue() | |
fp.close() | fp.close() |
device.close() | device.close() |
outfp.close() | outfp.close() |
dochash = scrape.mkhash(description) | dochash = scrape.mkhash(description) |
doc = foidocsdb.get(dochash) | doc = foidocsdb.get(dochash) |
if doc is None: | if doc is None: |
print "saving " + dochash | print "saving " + dochash |
edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d") | edate = date.today().strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() | doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
, 'url': self.getURL(), 'docID': dochash, | , 'url': self.getURL(), 'docID': dochash, |
"date": edate, "title": "Disclosure Log Updated"} | "date": edate, "title": "Disclosure Log Updated", "description": description} |
self.getDescription(entry, entry, doc) | |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
class GenericDOCXDisclogScraper(GenericDisclogScraper): | class GenericDOCXDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb | (url, mime_type, content) = scrape.fetchURL(scrape.docsdb |
, self.getURL(), "foidocuments", self.getAgencyID()) | , self.getURL(), "foidocuments", self.getAgencyID()) |
mydoc = zipfile.ZipFile(file) | mydoc = zipfile.ZipFile(file) |
xmlcontent = mydoc.read('word/document.xml') | xmlcontent = mydoc.read('word/document.xml') |
document = etree.fromstring(xmlcontent) | document = etree.fromstring(xmlcontent) |
## Fetch all the text out of the document we just created | ## Fetch all the text out of the document we just created |
paratextlist = getdocumenttext(document) | paratextlist = getdocumenttext(document) |
# Make explicit unicode version | # Make explicit unicode version |
newparatextlist = [] | newparatextlist = [] |
for paratext in paratextlist: | for paratext in paratextlist: |
newparatextlist.append(paratext.encode("utf-8")) | newparatextlist.append(paratext.encode("utf-8")) |
## Print our documnts test with two newlines under each paragraph | ## Print our documnts test with two newlines under each paragraph |
description = '\n\n'.join(newparatextlist) | description = '\n\n'.join(newparatextlist).strip(' \t\n\r') |
dochash = scrape.mkhash(description) | dochash = scrape.mkhash(description) |
doc = foidocsdb.get(dochash) | doc = foidocsdb.get(dochash) |
if doc is None: | if doc is None: |
print "saving " + dochash | print "saving " + dochash |
edate = datetime.fromtimestamp(mktime()).strftime("%Y-%m-%d") | edate = time().strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID() | doc = {'_id': dochash, 'agencyID': self.getAgencyID() |
, 'url': self.getURL(), 'docID': dochash, | , 'url': self.getURL(), 'docID': dochash, |
"date": edate, "title": "Disclosure Log Updated"} | "date": edate, "title": "Disclosure Log Updated", "description": description} |
self.getDescription(entry, entry, doc) | |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
class GenericRSSDisclogScraper(GenericDisclogScraper): | class GenericRSSDisclogScraper(GenericDisclogScraper): |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) | self.getURL(), "foidocuments", self.getAgencyID()) |
feed = feedparser.parse(content) | feed = feedparser.parse(content) |
for entry in feed.entries: | for entry in feed.entries: |
#print entry | #print entry |
print entry.id | print entry.id |
dochash = scrape.mkhash(entry.id) | dochash = scrape.mkhash(entry.id) |
doc = foidocsdb.get(dochash) | doc = foidocsdb.get(dochash) |
#print doc | #print doc |
if doc is None: | if doc is None: |
print "saving " + dochash | print "saving " + dochash |
edate = datetime.fromtimestamp( | edate = datetime.fromtimestamp( |
mktime(entry.published_parsed)).strftime("%Y-%m-%d") | mktime(entry.published_parsed)).strftime("%Y-%m-%d") |
doc = {'_id': dochash, 'agencyID': self.getAgencyID(), | doc = {'_id': dochash, 'agencyID': self.getAgencyID(), |
'url': entry.link, 'docID': entry.id, | 'url': entry.link, 'docID': entry.id, |
"date": edate, "title": entry.title} | "date": edate, "title": entry.title} |
self.getDescription(entry, entry, doc) | self.getDescription(entry, entry, doc) |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved" | print "already saved" |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
doc.update({'description': content.summary}) | doc.update({'description': content.summary}) |
return | return |
class GenericOAICDisclogScraper(GenericDisclogScraper): | class GenericOAICDisclogScraper(GenericDisclogScraper): |
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta |
@abc.abstractmethod | @abc.abstractmethod |
def getColumns(self, columns): | def getColumns(self, columns): |
""" rearranges columns if required """ | """ rearranges columns if required """ |
return | return |
def getColumnCount(self): | def getColumnCount(self): |
return 5 | return 5 |
def getDescription(self, content, entry, doc): | def getDescription(self, content, entry, doc): |
""" get description from rss entry""" | """ get description from rss entry""" |
descriptiontxt = "" | descriptiontxt = "" |
for string in content.stripped_strings: | for string in content.stripped_strings: |
descriptiontxt = descriptiontxt + " \n" + string | descriptiontxt = descriptiontxt + " \n" + string |
doc.update({'description': descriptiontxt}) | doc.update({'description': descriptiontxt}) |
def getTitle(self, content, entry, doc): | def getTitle(self, content, entry, doc): |
doc.update({'title': (''.join(content.stripped_strings))}) | doc.update({'title': (''.join(content.stripped_strings))}) |
def getTable(self, soup): | def getTable(self, soup): |
return soup.table | return soup.table |
def getRows(self, table): | def getRows(self, table): |
return table.find_all('tr') | return table.find_all('tr') |
def getDate(self, content, entry, doc): | def getDate(self, content, entry, doc): |
date = ''.join(content.stripped_strings).strip() | date = ''.join(content.stripped_strings).strip() |
(a, b, c) = date.partition("(") | (a, b, c) = date.partition("(") |
date = self.remove_control_chars(a.replace("Octber", "October")) | date = self.remove_control_chars(a.replace("Octber", "October")) |
print date | print date |
edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") | edate = parse(date, dayfirst=True, fuzzy=True).strftime("%Y-%m-%d") |
print edate | print edate |
doc.update({'date': edate}) | doc.update({'date': edate}) |
return | return |
def getLinks(self, content, entry, doc): | def getLinks(self, content, entry, doc): |
links = [] | links = [] |
for atag in entry.find_all("a"): | for atag in entry.find_all("a"): |
if atag.has_key('href'): | if atag.has_key('href'): |
links.append(scrape.fullurl(content, atag['href'])) | links.append(scrape.fullurl(content, atag['href'])) |
if links != []: | if links != []: |
doc.update({'links': links}) | doc.update({'links': links}) |
return | return |
def doScrape(self): | def doScrape(self): |
foidocsdb = scrape.couch['disclosr-foidocuments'] | foidocsdb = scrape.couch['disclosr-foidocuments'] |
(url, mime_type, content) = scrape.fetchURL(scrape.docsdb, | (url, mime_type, content) = scrape.fetchURL(scrape.docsdb, |
self.getURL(), "foidocuments", self.getAgencyID()) | self.getURL(), "foidocuments", self.getAgencyID()) |
if content is not None: | if content is not None: |
if mime_type is "text/html"\ | if mime_type is "text/html"\ |
or mime_type is "application/xhtml+xml"\ | or mime_type is "application/xhtml+xml"\ |
or mime_type is"application/xml": | or mime_type is"application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
table = self.getTable(soup) | table = self.getTable(soup) |
for row in self.getRows(table): | for row in self.getRows(table): |
columns = row.find_all('td') | columns = row.find_all('td') |
if len(columns) is self.getColumnCount(): | if len(columns) is self.getColumnCount(): |
(id, date, title, | (id, date, title, |
description, notes) = self.getColumns(columns) | description, notes) = self.getColumns(columns) |
print self.remove_control_chars( | print self.remove_control_chars( |
''.join(id.stripped_strings)) | ''.join(id.stripped_strings)) |
if id.string is None: | if id.string is None: |
dochash = scrape.mkhash( | dochash = scrape.mkhash( |
self.remove_control_chars( | self.remove_control_chars( |
url + (''.join(date.stripped_strings)))) | url + (''.join(date.stripped_strings)))) |
else: | else: |
dochash = scrape.mkhash( | dochash = scrape.mkhash( |
self.remove_control_chars( | self.remove_control_chars( |
url + (''.join(id.stripped_strings)))) | url + (''.join(id.stripped_strings)))) |
doc = foidocsdb.get(hash) | doc = foidocsdb.get(hash) |
if doc is None: | if doc is None: |
print "saving " + hash | print "saving " + hash |
doc = {'_id': hash, | doc = {'_id': hash, |
'agencyID': self.getAgencyID(), | 'agencyID': self.getAgencyID(), |
'url': self.getURL(), | 'url': self.getURL(), |
'docID': (''.join(id.stripped_strings))} | 'docID': (''.join(id.stripped_strings))} |
self.getLinks(self.getURL(), row, doc) | self.getLinks(self.getURL(), row, doc) |
self.getTitle(title, row, doc) | self.getTitle(title, row, doc) |
self.getDate(date, row, doc) | self.getDate(date, row, doc) |
self.getDescription(description, row, doc) | self.getDescription(description, row, doc) |
if notes is not None: | if notes is not None: |
doc.update({ 'notes': ( | doc.update({ 'notes': ( |
''.join(notes.stripped_strings))}) | ''.join(notes.stripped_strings))}) |
badtitles = ['-','Summary of FOI Request' | badtitles = ['-','Summary of FOI Request' |
, 'FOI request(in summary form)' | , 'FOI request(in summary form)' |
, 'Summary of FOI request received by the ASC', | , 'Summary of FOI request received by the ASC', |
'Summary of FOI request received by agency/minister', | 'Summary of FOI request received by agency/minister', |
'Description of Documents Requested','FOI request', | 'Description of Documents Requested','FOI request', |
'Description of FOI Request','Summary of request','Description','Summary', | 'Description of FOI Request','Summary of request','Description','Summary', |
'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67'] | 'Summary of FOIrequest received by agency/minister','Summary of FOI request received','Description of FOI Request',"FOI request",'Results 1 to 67 of 67'] |
if doc['title'] not in badtitles\ | if doc['title'] not in badtitles\ |
and doc['description'] != '': | and doc['description'] != '': |
print "saving" | print "saving" |
foidocsdb.save(doc) | foidocsdb.save(doc) |
else: | else: |
print "already saved " + dochash | print "already saved " + dochash |
elif len(row.find_all('th')) is self.getColumnCount(): | elif len(row.find_all('th')) is self.getColumnCount(): |
print "header row" | print "header row" |
else: | else: |
print "ERROR number of columns incorrect" | print "ERROR number of columns incorrect" |
print row | print row |
#http://packages.python.org/CouchDB/client.html | #http://packages.python.org/CouchDB/client.html |
import couchdb | import couchdb |
import urllib2 | import urllib2 |
from BeautifulSoup import BeautifulSoup | from BeautifulSoup import BeautifulSoup |
import re | import re |
import hashlib | import hashlib |
from urlparse import urljoin | from urlparse import urljoin |
import time | import time |
import os | import os |
import mimetypes | import mimetypes |
import re | |
import urllib | import urllib |
import urlparse | import urlparse |
def mkhash(input): | def mkhash(input): |
return hashlib.md5(input).hexdigest().encode("utf-8") | return hashlib.md5(input).hexdigest().encode("utf-8") |
def canonurl(url): | def canonurl(url): |
r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' | r"""Return the canonical, ASCII-encoded form of a UTF-8 encoded URL, or '' |
if the URL looks invalid. | if the URL looks invalid. |
>>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws | >>> canonurl('\xe2\x9e\xa1.ws') # tinyarro.ws |
'http://xn--hgi.ws/' | 'http://xn--hgi.ws/' |
""" | """ |
# strip spaces at the ends and ensure it's prefixed with 'scheme://' | # strip spaces at the ends and ensure it's prefixed with 'scheme://' |
url = url.strip() | url = url.strip() |
if not url: | if not url: |
return '' | return '' |
if not urlparse.urlsplit(url).scheme: | if not urlparse.urlsplit(url).scheme: |
url = 'http://' + url | url = 'http://' + url |
# turn it into Unicode | # turn it into Unicode |
#try: | #try: |
# url = unicode(url, 'utf-8') | # url = unicode(url, 'utf-8') |
#except UnicodeDecodeError: | #except UnicodeDecodeError: |
# return '' # bad UTF-8 chars in URL | # return '' # bad UTF-8 chars in URL |
# parse the URL into its components | # parse the URL into its components |
parsed = urlparse.urlsplit(url) | parsed = urlparse.urlsplit(url) |
scheme, netloc, path, query, fragment = parsed | scheme, netloc, path, query, fragment = parsed |
# ensure scheme is a letter followed by letters, digits, and '+-.' chars | # ensure scheme is a letter followed by letters, digits, and '+-.' chars |
if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): | if not re.match(r'[a-z][-+.a-z0-9]*$', scheme, flags=re.I): |
return '' | return '' |
scheme = str(scheme) | scheme = str(scheme) |
# ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] | # ensure domain and port are valid, eg: sub.domain.<1-to-6-TLD-chars>[:port] |
match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) | match = re.match(r'(.+\.[a-z0-9]{1,6})(:\d{1,5})?$', netloc, flags=re.I) |
if not match: | if not match: |
return '' | return '' |
domain, port = match.groups() | domain, port = match.groups() |
netloc = domain + (port if port else '') | netloc = domain + (port if port else '') |
netloc = netloc.encode('idna') | netloc = netloc.encode('idna') |
# ensure path is valid and convert Unicode chars to %-encoded | # ensure path is valid and convert Unicode chars to %-encoded |
if not path: | if not path: |
path = '/' # eg: 'http://google.com' -> 'http://google.com/' | path = '/' # eg: 'http://google.com' -> 'http://google.com/' |
path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') | path = urllib.quote(urllib.unquote(path.encode('utf-8')), safe='/;') |
# ensure query is valid | # ensure query is valid |
query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') | query = urllib.quote(urllib.unquote(query.encode('utf-8')), safe='=&?/') |
# ensure fragment is valid | # ensure fragment is valid |
fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) | fragment = urllib.quote(urllib.unquote(fragment.encode('utf-8'))) |
# piece it all back together, truncating it to a maximum of 4KB | # piece it all back together, truncating it to a maximum of 4KB |
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) | url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) |
return url[:4096] | return url[:4096] |
def fullurl(url,href): | def fullurl(url,href): |
href = href.replace(" ","%20") | href = href.replace(" ","%20") |
href = re.sub('#.*$','',href) | href = re.sub('#.*$','',href) |
return urljoin(url,href) | return urljoin(url,href) |
#http://diveintopython.org/http_web_services/etags.html | #http://diveintopython.org/http_web_services/etags.html |
class NotModifiedHandler(urllib2.BaseHandler): | class NotModifiedHandler(urllib2.BaseHandler): |
def http_error_304(self, req, fp, code, message, headers): | def http_error_304(self, req, fp, code, message, headers): |
addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) | addinfourl = urllib2.addinfourl(fp, headers, req.get_full_url()) |
addinfourl.code = code | addinfourl.code = code |
return addinfourl | return addinfourl |
def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): | def fetchURL(docsdb, url, fieldName, agencyID, scrape_again=True): |
url = canonurl(url) | url = canonurl(url) |
hash = mkhash(url) | hash = mkhash(url) |
req = urllib2.Request(url) | req = urllib2.Request(url) |
print "Fetching %s (%s)" % (url,hash) | print "Fetching %s (%s)" % (url,hash) |
if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": | if url.startswith("mailto") or url.startswith("javascript") or url.startswith("#") or url == None or url == "": |
print "Not a valid HTTP url" | print "Not a valid HTTP url" |
return (None,None,None) | return (None,None,None) |
doc = docsdb.get(hash) | doc = docsdb.get(hash) |
if doc == None: | if doc == None: |
doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} | doc = {'_id': hash, 'agencyID': agencyID, 'url': url, 'fieldName':fieldName} |
else: | else: |
if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): | if (('page_scraped' in doc) and (time.time() - doc['page_scraped']) < 60*24*14*1000): |
print "Uh oh, trying to scrape URL again too soon!" | print "Uh oh, trying to scrape URL again too soon!"+hash |
last_attachment_fname = doc["_attachments"].keys()[-1] | last_attachment_fname = doc["_attachments"].keys()[-1] |
last_attachment = docsdb.get_attachment(doc,last_attachment_fname) | last_attachment = docsdb.get_attachment(doc,last_attachment_fname) |
content = last_attachment | content = last_attachment |
return (doc['url'],doc['mime_type'],content) | return (doc['url'],doc['mime_type'],content) |
if scrape_again == False: | if scrape_again == False: |
print "Not scraping this URL again as requested" | print "Not scraping this URL again as requested" |
return (None,None,None) | return (None,None,None) |
time.sleep(3) # wait 3 seconds to give webserver time to recover | req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") |
#if there is a previous version stored in couchdb, load caching helper tags | |
req.add_header("User-Agent", "Mozilla/4.0 (compatible; Prometheus webspider; owner maxious@lambdacomplex.org)") | if doc.has_key('etag'): |
#if there is a previous version stored in couchdb, load caching helper tags | req.add_header("If-None-Match", doc['etag']) |
if doc.has_key('etag'): | if doc.has_key('last_modified'): |
req.add_header("If-None-Match", doc['etag']) | req.add_header("If-Modified-Since", doc['last_modified']) |
if doc.has_key('last_modified'): | |
req.add_header("If-Modified-Since", doc['last_modified']) | opener = urllib2.build_opener(NotModifiedHandler()) |
try: | |
opener = urllib2.build_opener(NotModifiedHandler()) | url_handle = opener.open(req) |
try: | doc['url'] = url_handle.geturl() # may have followed a redirect to a new url |
url_handle = opener.open(req) | headers = url_handle.info() # the addinfourls have the .info() too |
doc['url'] = url_handle.geturl() # may have followed a redirect to a new url | doc['etag'] = headers.getheader("ETag") |
headers = url_handle.info() # the addinfourls have the .info() too | doc['last_modified'] = headers.getheader("Last-Modified") |
doc['etag'] = headers.getheader("ETag") | doc['date'] = headers.getheader("Date") |
doc['last_modified'] = headers.getheader("Last-Modified") | doc['page_scraped'] = time.time() |
doc['date'] = headers.getheader("Date") | doc['web_server'] = headers.getheader("Server") |
doc['page_scraped'] = time.time() | doc['via'] = headers.getheader("Via") |
doc['web_server'] = headers.getheader("Server") | doc['powered_by'] = headers.getheader("X-Powered-By") |
doc['via'] = headers.getheader("Via") | doc['file_size'] = headers.getheader("Content-Length") |
doc['powered_by'] = headers.getheader("X-Powered-By") | content_type = headers.getheader("Content-Type") |
doc['file_size'] = headers.getheader("Content-Length") | if content_type != None: |
content_type = headers.getheader("Content-Type") | doc['mime_type'] = content_type.split(";")[0] |
if content_type != None: | else: |
doc['mime_type'] = content_type.split(";")[0] | (type,encoding) = mimetypes.guess_type(url) |
else: | doc['mime_type'] = type |
(type,encoding) = mimetypes.guess_type(url) | if hasattr(url_handle, 'code'): |
doc['mime_type'] = type | if url_handle.code == 304: |
if hasattr(url_handle, 'code'): | print "the web page has not been modified"+hash |
if url_handle.code == 304: | last_attachment_fname = doc["_attachments"].keys()[-1] |
print "the web page has not been modified" | last_attachment = docsdb.get_attachment(doc,last_attachment_fname) |
return (None,None,None) | content = last_attachment |
else: | return (doc['url'],doc['mime_type'],content) |
content = url_handle.read() | else: |
docsdb.save(doc) | print "new webpage loaded" |
doc = docsdb.get(hash) # need to get a _rev | content = url_handle.read() |
docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) | docsdb.save(doc) |
return (doc['url'], doc['mime_type'], content) | doc = docsdb.get(hash) # need to get a _rev |
#store as attachment epoch-filename | docsdb.put_attachment(doc, content, str(time.time())+"-"+os.path.basename(url), doc['mime_type']) |
return (doc['url'], doc['mime_type'], content) | |
except urllib2.URLError as e: | #store as attachment epoch-filename |
error = "" | |
if hasattr(e, 'reason'): | except urllib2.URLError as e: |
error = "error %s in downloading %s" % (str(e.reason), url) | print "error!" |
elif hasattr(e, 'code'): | error = "" |
error = "error %s in downloading %s" % (e.code, url) | if hasattr(e, 'reason'): |
print error | error = "error %s in downloading %s" % (str(e.reason), url) |
doc['error'] = error | elif hasattr(e, 'code'): |
docsdb.save(doc) | error = "error %s in downloading %s" % (e.code, url) |
return (None,None,None) | print error |
doc['error'] = error | |
docsdb.save(doc) | |
return (None,None,None) | |
def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): | def scrapeAndStore(docsdb, url, depth, fieldName, agencyID): |
(url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) | (url,mime_type,content) = fetchURL(docsdb, url, fieldName, agencyID) |
badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] | badURLs = ["http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report"] |
if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": | if content != None and depth > 0 and url != "http://www.ausport.gov.au/supporting/funding/grants_and_scholarships/grant_funding_report": |
if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": | if mime_type == "text/html" or mime_type == "application/xhtml+xml" or mime_type =="application/xml": |
# http://www.crummy.com/software/BeautifulSoup/documentation.html | # http://www.crummy.com/software/BeautifulSoup/documentation.html |
soup = BeautifulSoup(content) | soup = BeautifulSoup(content) |
navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) | navIDs = soup.findAll(id=re.compile('nav|Nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')) |
for nav in navIDs: | for nav in navIDs: |
print "Removing element", nav['id'] | print "Removing element", nav['id'] |
nav.extract() | nav.extract() |
navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) | navClasses = soup.findAll(attrs={'class' : re.compile('nav|menu|bar|left|right|sidebar|more-links|breadcrumb|footer|header')}) |
for nav in navClasses: | for nav in navClasses: |
print "Removing element", nav['class'] | print "Removing element", nav['class'] |
nav.extract() | nav.extract() |
links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) | links = soup.findAll('a') # soup.findAll('a', id=re.compile("^p-")) |
linkurls = set([]) | linkurls = set([]) |
for link in links: | for link in links: |
if link.has_key("href"): | if link.has_key("href"): |
if link['href'].startswith("http"): | if link['href'].startswith("http"): |
# lets not do external links for now | # lets not do external links for now |
# linkurls.add(link['href']) | # linkurls.add(link['href']) |
None | None |
if link['href'].startswith("mailto"): | if link['href'].startswith("mailto"): |
# not http | # not http |
None | None |
if link['href'].startswith("javascript"): | if link['href'].startswith("javascript"): |
# not http | # not http |
None | None |
else: | else: |
# remove anchors and spaces in urls | # remove anchors and spaces in urls |
linkurls.add(fullurl(url,link['href'])) | linkurls.add(fullurl(url,link['href'])) |
for linkurl in linkurls: | for linkurl in linkurls: |
#print linkurl | #print linkurl |
scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) | scrapeAndStore(docsdb, linkurl, depth-1, fieldName, agencyID) |
#couch = couchdb.Server('http://192.168.1.148:5984/') | #couch = couchdb.Server('http://192.168.1.148:5984/') |
couch = couchdb.Server('http://127.0.0.1:5984/') | couch = couchdb.Server('http://127.0.0.1:5984/') |
# select database | # select database |
agencydb = couch['disclosr-agencies'] | agencydb = couch['disclosr-agencies'] |
docsdb = couch['disclosr-documents'] | docsdb = couch['disclosr-documents'] |
if __name__ == "__main__": | if __name__ == "__main__": |
for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? | for row in agencydb.view('app/getScrapeRequired'): #not recently scraped agencies view? |
agency = agencydb.get(row.id) | agency = agencydb.get(row.id) |
print agency['name'] | print agency['name'] |
for key in agency.keys(): | for key in agency.keys(): |
if key == "FOIDocumentsURL" and "status" not in agency.keys: | if key == "FOIDocumentsURL" and "status" not in agency.keys: |
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) | scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) |
if key == 'website' and False: | if key == 'website' and False: |
scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) | scrapeAndStore(docsdb, agency[key],0,key,agency['_id']) |
agency['metadata']['lastScraped'] = time.time() | agency['metadata']['lastScraped'] = time.time() |
if key.endswith('URL') and False: | if key.endswith('URL') and False: |
print key | print key |
depth = 1 | depth = 1 |
if 'scrapeDepth' in agency.keys(): | if 'scrapeDepth' in agency.keys(): |
depth = agency['scrapeDepth'] | depth = agency['scrapeDepth'] |
scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) | scrapeAndStore(docsdb, agency[key],depth,key,agency['_id']) |
agencydb.save(agency) | agencydb.save(agency) |
import sys | |
import os | |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | |
import genericScrapers | |
class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): | |
def __init__(self): | |
super(ScraperImplementation, self).__init__() | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, | |
genericScrapers.GenericOAICDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), | |
genericScrapers.GenericOAICDisclogScraper) | |
ScraperImplementation().doScrape() | |
import sys | |
import os | |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | |
import genericScrapers | |
class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): | |
def __init__(self): | |
super(ScraperImplementation, self).__init__() | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, | |
genericScrapers.GenericOAICDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), | |
genericScrapers.GenericOAICDisclogScraper) | |
ScraperImplementation().doScrape() | |
import sys | |
import os | |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | |
import genericScrapers | |
class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): | |
def __init__(self): | |
super(ScraperImplementation, self).__init__() | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, | |
genericScrapers.GenericOAICDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), | |
genericScrapers.GenericOAICDisclogScraper) | |
ScraperImplementation().doScrape() | |
import sys | |
import os | |
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or '.', '../')) | |
import genericScrapers | |
class ScraperImplementation(genericScrapers.GenericPDFDisclogScraper): | |
def __init__(self): | |
super(ScraperImplementation, self).__init__() | |
if __name__ == '__main__': | |
print 'Subclass:', issubclass(ScraperImplementation, | |
genericScrapers.GenericOAICDisclogScraper) | |
print 'Instance:', isinstance(ScraperImplementation(), | |
genericScrapers.GenericOAICDisclogScraper) | |
ScraperImplementation().doScrape() | |
http://www.awm.gov.au/about/AWM_Disclosure_Log.pdf | |
<?php | <?php |
function include_header_documents($title) { | function include_header_documents($title) { |
header('X-UA-Compatible: IE=edge,chrome=1'); | header('X-UA-Compatible: IE=edge,chrome=1'); |
?> | ?> |
<!doctype html> | <!doctype html> |
<!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> | <!-- paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ --> |
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> | <!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="en"> <![endif]--> |
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> | <!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="en"> <![endif]--> |
<!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> | <!--[if IE 8]> <html class="no-js lt-ie9" lang="en"> <![endif]--> |
<!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> | <!-- Consider adding a manifest.appcache: h5bp.com/d/Offline --> |
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> | <!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]--> |
<head> | <head> |
<meta charset="utf-8"> | <meta charset="utf-8"> |
<title>Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?></title> | <title>Australian Disclosure Logs<?php if ($title != "") echo " - $title"; ?></title> |
<meta name="description" content=""> | <meta name="description" content=""> |
<!-- Mobile viewport optimized: h5bp.com/viewport --> | <!-- Mobile viewport optimized: h5bp.com/viewport --> |
<meta name="viewport" content="width=device-width"> | <meta name="viewport" content="width=device-width"> |
<link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php" /> | <link rel="alternate" type="application/rss+xml" title="Latest Disclosure Log Entries" href="rss.xml.php" /> |
<!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> | <!-- Place favicon.ico and apple-touch-icon.png in the root directory: mathiasbynens.be/notes/touch-icons --> |
<meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" /> | <meta name="google-site-verification" content="jkknX5g2FCpQvrW030b1Nq2hyoa6mb3EDiA7kCoHNj8" /> |
<!-- Le styles --> | <!-- Le styles --> |
<link href="css/bootstrap.min.css" rel="stylesheet"> | <link href="css/bootstrap.min.css" rel="stylesheet"> |
<style type="text/css"> | <style type="text/css"> |
body { | body { |
padding-top: 60px; | padding-top: 60px; |
padding-bottom: 40px; | padding-bottom: 40px; |
} | } |
.sidebar-nav { | .sidebar-nav { |
padding: 9px 0; | padding: 9px 0; |
} | } |
</style> | </style> |
<link href="css/bootstrap-responsive.min.css" rel="stylesheet"> | <link href="css/bootstrap-responsive.min.css" rel="stylesheet"> |
<!-- HTML5 shim, for IE6-8 support of HTML5 elements --> | <!-- HTML5 shim, for IE6-8 support of HTML5 elements --> |
<!--[if lt IE 9]> | <!--[if lt IE 9]> |
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> | <script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> |
<![endif]--> | <![endif]--> |
<!-- More ideas for your <head> here: h5bp.com/d/head-Tips --> | <!-- More ideas for your <head> here: h5bp.com/d/head-Tips --> |
<!-- All JavaScript at the bottom, except this Modernizr build. | <!-- All JavaScript at the bottom, except this Modernizr build. |
Modernizr enables HTML5 elements & feature detects for optimal performance. | Modernizr enables HTML5 elements & feature detects for optimal performance. |
Create your own custom Modernizr build: www.modernizr.com/download/ | Create your own custom Modernizr build: www.modernizr.com/download/ |
<script src="js/libs/modernizr-2.5.3.min.js"></script>--> | <script src="js/libs/modernizr-2.5.3.min.js"></script>--> |
<script src="js/jquery.js"></script> | <script src="js/jquery.js"></script> |
<script type="text/javascript" src="js/flotr2.min.js"></script> | <script type="text/javascript" src="js/flotr2.min.js"></script> |
</head> | </head> |
<body> | <body> |
<div class="navbar navbar-inverse navbar-fixed-top"> | <div class="navbar navbar-inverse navbar-fixed-top"> |
<div class="navbar-inner"> | <div class="navbar-inner"> |
<div class="container-fluid"> | <div class="container-fluid"> |
<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> | <a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> |
<span class="icon-bar"></span> | <span class="icon-bar"></span> |
<span class="icon-bar"></span> | <span class="icon-bar"></span> |
<span class="icon-bar"></span> | <span class="icon-bar"></span> |
</a> | </a> |
<a class="brand" href="#">Australian Disclosure Logs</a> | <a class="brand" href="#">Australian Disclosure Logs</a> |
<div class="nav-collapse collapse"> | <div class="nav-collapse collapse"> |
<p class="navbar-text pull-right"> | <p class="navbar-text pull-right"> |
Check out our subsites on: | Check out our subsites on: |
<a href="http://orgs.disclosurelo.gs">Government Agencies</a> | <a href="http://orgs.disclosurelo.gs">Government Agencies</a> |
• <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a> | • <a href="http://lobbyists.disclosurelo.gs">Political Lobbyists</a> |
• <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a> | • <a href="http://contracts.disclosurelo.gs">Government Contracts and Spending</a> |
</p> | </p> |
<ul class="nav"> | <ul class="nav"> |
<li><a href="index.php">Home</a></li> | <li><a href="index.php">Home</a></li> |
<li><a href="disclogsList.php">List of Disclosure Logs</a></li> | <li><a href="disclogsList.php">List of Disclosure Logs</a></li> |
<li><a href="about.php">About</a></li> | <li><a href="about.php">About</a></li> |
</ul> | </ul> |
</div><!--/.nav-collapse --> | </div><!--/.nav-collapse --> |
</div> | </div> |
</div> | </div> |
</div> | </div> |
<div class="container"> | <div class="container"> |
<?php | <?php |
} | } |
function include_footer_documents() { | function include_footer_documents() { |
?> | ?> |
</div> <!-- /container --> | </div> <!-- /container --> |
<hr> | <hr> |
<footer> | <footer> |
<p>Not affiliated with or endorsed by any government agency.</p> | <p>Not affiliated with or endorsed by any government agency.</p> |
</footer> | </footer> |
<script type="text/javascript"> | <script type="text/javascript"> |
var _gaq = _gaq || []; | var _gaq = _gaq || []; |
_gaq.push(['_setAccount', 'UA-12341040-4']); | _gaq.push(['_setAccount', 'UA-12341040-4']); |
_gaq.push(['_setDomainName', 'disclosurelo.gs']); | _gaq.push(['_setDomainName', 'disclosurelo.gs']); |
_gaq.push(['_setAllowLinker', true]); | _gaq.push(['_setAllowLinker', true]); |
_gaq.push(['_trackPageview']); | _gaq.push(['_trackPageview']); |
(function() { | (function() { |
var ga = document.createElement('script'); | var ga = document.createElement('script'); |
ga.type = 'text/javascript'; | ga.type = 'text/javascript'; |
ga.async = true; | ga.async = true; |
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; | ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; |
var s = document.getElementsByTagName('script')[0]; | var s = document.getElementsByTagName('script')[0]; |
s.parentNode.insertBefore(ga, s); | s.parentNode.insertBefore(ga, s); |
})(); | })(); |
</script> | </script> |
<!-- Le javascript | <!-- Le javascript |
================================================== --> | ================================================== --> |
<!-- Placed at the end of the document so the pages load faster --> | <!-- Placed at the end of the document so the pages load faster --> |
<!-- | <!-- |
<script src="js/bootstrap-transition.js"></script> | <script src="js/bootstrap-transition.js"></script> |
<script src="js/bootstrap-alert.js"></script> | <script src="js/bootstrap-alert.js"></script> |
<script src="js/bootstrap-modal.js"></script> | <script src="js/bootstrap-modal.js"></script> |
<script src="js/bootstrap-dropdown.js"></script> | <script src="js/bootstrap-dropdown.js"></script> |
<script src="js/bootstrap-scrollspy.js"></script> | <script src="js/bootstrap-scrollspy.js"></script> |
<script src="js/bootstrap-tab.js"></script> | <script src="js/bootstrap-tab.js"></script> |
<script src="js/bootstrap-tooltip.js"></script> | <script src="js/bootstrap-tooltip.js"></script> |
<script src="js/bootstrap-popover.js"></script> | <script src="js/bootstrap-popover.js"></script> |
<script src="js/bootstrap-button.js"></script> | <script src="js/bootstrap-button.js"></script> |
<script src="js/bootstrap-collapse.js"></script> | <script src="js/bootstrap-collapse.js"></script> |
<script src="js/bootstrap-carousel.js"></script> | <script src="js/bootstrap-carousel.js"></script> |
<script src="js/bootstrap-typeahead.js"></script>--> | <script src="js/bootstrap-typeahead.js"></script>--> |
</body> | </body> |
</html> | </html> |
<?php | <?php |
} | } |
function truncate($string, $length, $stopanywhere = false) { | function truncate($string, $length, $stopanywhere = false) { |
//truncates a string to a certain char length, stopping on a word if not specified otherwise. | //truncates a string to a certain char length, stopping on a word if not specified otherwise. |
if (strlen($string) > $length) { | if (strlen($string) > $length) { |
//limit hit! | //limit hit! |
$string = substr($string, 0, ($length - 3)); | $string = substr($string, 0, ($length - 3)); |
if ($stopanywhere) { | if ($stopanywhere) { |
//stop anywhere | //stop anywhere |
$string .= '...'; | $string .= '...'; |
} else { | } else { |
//stop on a word. | //stop on a word. |
$string = substr($string, 0, strrpos($string, ' ')) . '...'; | $string = substr($string, 0, strrpos($string, ' ')) . '...'; |
} | } |
} | } |
return $string; | return $string; |
} | } |
function displayLogEntry($row, $idtoname) { | function displayLogEntry($row, $idtoname) { |
$result = ""; | $result = ""; |
$result .= '<div itemscope itemtype="http://schema.org/Article">'; | $result .= '<div itemscope itemtype="http://schema.org/Article">'; |
$result .= '<h2> <span itemprop="datePublished">' . $row->value->date . "</span>: <span itemprop='name headline'>" . truncate($row->value->title, 120) . "</span>"; | $result .= '<h2> <span itemprop="datePublished">' . $row->value->date . "</span>: <span itemprop='name headline'>" . truncate($row->value->title, 120) . "</span>"; |
$result .= '(<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</h2>'; | $result .= ' (<span itemprop="author publisher creator">' . $idtoname[$row->value->agencyID] . '</span>)</h2>'; |
$result .= "<p itemprop='description articleBody text'> Title" . $row->value->title . "<br/>"; | $result .= "<p itemprop='description articleBody text'> Title: " . $row->value->title . "<br/>"; |
if (isset($row->value->description)) { | if (isset($row->value->description)) { |
$result .= str_replace("\n", "<br>", $row->value->description); | $result .= str_replace("\n", "<br>", preg_replace("/(^[\r\n]*|[\r\n]+)[\s\t]*[\r\n]+/", "",trim($row->value->description))); |
} | } |
if (isset($row->value->notes)) { | if (isset($row->value->notes)) { |
$result .= " <br>Note: " . $row->value->notes; | $result .= " <br>Note: " . $row->value->notes; |
} | } |
$result .= "</p>"; | $result .= "</p>"; |
if (isset($row->value->links)) { | if (isset($row->value->links)) { |
$result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">'; | $result .= '<h3>Links/Documents</h3><ul itemprop="associatedMedia">'; |
foreach ($row->value->links as $link) { | foreach ($row->value->links as $link) { |
$result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href=' . $link . ' itemprop="url contentURL">' . urlencode($link) . "</a></li>"; | $result .= '<li itemscope itemtype="http://schema.org/MediaObject"><a href=' . $link . ' itemprop="url contentURL">' . urlencode($link) . "</a></li>"; |
} | } |
$result .= "</ul>"; | $result .= "</ul>"; |
} | } |
$result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>"; | $result .= "<small><A itemprop='url' href='" . $row->value->url . "'>View original source...</a> ID: " . strip_tags($row->value->docID) . "</small>"; |
$result .= "</div>"; | $result .= "</div>"; |
return $result; | return $result; |
} | } |