def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
try:
roc_auc = roc_auc_score(y_true, y_score)
except ValueError:
roc_auc = np.nan
#rvalue = np.array((roc_auc, prec, reca, f1))
#[0][0]
output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
python类_asarray()的实例源码
roc_auc.py 文件源码
项目:deep-mil-for-whole-mammogram-classification
作者: wentaozhu
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def local_mean_subtraction(input, kernel_size=5):
input_shape = (input.shape[0], 1, input.shape[1], input.shape[2])
input = input.reshape(input_shape).astype(floatX)
X = T.tensor4(dtype=floatX)
filter_shape = (1, 1, kernel_size, kernel_size)
filters = mean_filter(kernel_size).reshape(filter_shape)
filters = shared(_asarray(filters, dtype=floatX), borrow=True)
mean = conv2d(input=X,
filters=filters,
input_shape=input.shape,
filter_shape=filter_shape,
border_mode='half')
new_X = X - mean
f = function([X], new_X)
return f(input)
def perform(self, node, inp, outs):
x, axes = inp
max, max_idx = outs
if axes is None:
axes = tuple(range(x.ndim))
else:
axes = tuple(int(ax) for ax in axes)
max[0] = theano._asarray(numpy.max(x, axes),
dtype=node.outputs[0].dtype)
# Numpy does not support multiple axes for argmax
# Work around
keep_axes = numpy.array([i for i in range(x.ndim) if i not in axes],
dtype='int64')
# Not-reduced axes in front
transposed_x = numpy.transpose(x, numpy.concatenate((keep_axes, axes)))
kept_shape = transposed_x.shape[:len(keep_axes)]
reduced_shape = transposed_x.shape[len(keep_axes):]
new_shape = kept_shape + (numpy.prod(reduced_shape),)
reshaped_x = transposed_x.reshape(new_shape)
max_idx[0] = theano._asarray(numpy.argmax(reshaped_x, axis=-1),
dtype='int64')
def mul_calculate(num, denum, aslist=False, out_type=None):
if not num and not denum:
# Smallest 1 possible.
if aslist:
return []
else:
return numpy.int8(1)
# Make sure we do not accidently upcast data types.
if out_type is None:
out_dtype = scalar.upcast(*[v.dtype for v in (num + denum)])
else:
out_dtype = out_type.dtype
one = theano._asarray(1, dtype=out_dtype)
v = reduce(numpy.multiply, num, one) / reduce(numpy.multiply, denum, one)
if aslist:
if numpy.all(v == 1):
return []
else:
return [v]
return v
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
# These constants were obtained by looking at the output of
# python commands like:
# for i in xrange(750):
# print i, repr(numpy.log1p(numpy.exp(theano._asarray([i,-i], dtype=dt))))
# the boundary checks prevent us from generating inf
# float16 limits: -17.0, 6.0
# We use the float32 limits for float16 for now as the
# computation will happen in float32 anyway.
if (node.inputs[0].type == scalar.float32 or
node.inputs[0].type == scalar.float16):
return """%(z)s = %(x)s < -103.0f ? 0.0 : %(x)s > 14.0f ? %(x)s : log1p(exp(%(x)s));""" % locals()
elif node.inputs[0].type == scalar.float64:
return """%(z)s = %(x)s < -745.0 ? 0.0 : %(x)s > 16.0 ? %(x)s : log1p(exp(%(x)s));""" % locals()
else:
raise NotImplementedError('only floatingpoint is implemented')
def test_givens(self):
x = shared(0)
assign = pfunc([], x, givens={x: 3})
assert assign() == 3
assert x.get_value(borrow=True) == 0
y = tensor.ivector()
f = pfunc([y], (y * x), givens={x: 6})
assert numpy.all(f([1, 1, 1]) == [6, 6, 6])
assert x.get_value() == 0
z = tensor.ivector()
c = z * y
f = pfunc([y], (c + 7),
givens={z: theano._asarray([4, 4, 4], dtype='int32')})
assert numpy.all(f([1, 1, 1]) == [11, 11, 11])
assert x.get_value() == 0
def test_elemwise_fusion():
""" Test the the GpuElemwise fusion work correctly"""
shape = (3, 4)
a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32'), 'a')
b = tensor.fmatrix()
c = tensor.fmatrix()
f = pfunc([b, c], [a + b + c], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
for i, node in enumerate(topo):
print(i, node, file=sys.stdout)
assert len(topo) == 4
assert isinstance(topo[2].op.scalar_op, theano.scalar.basic.Composite)
# let debugmode catch errors
f(theano._asarray(numpy.random.rand(*shape), dtype='float32'),
theano._asarray(numpy.random.rand(*shape), dtype='float32'))
def run_conv_nnet2_classif(use_gpu, seed, isize, ksize, bsize,
n_train=10,
check_isfinite=True,
verbose=0,
version=-1):
"""Run the train function returned by build_conv_nnet2_classif on one device.
"""
utt.seed_rng(seed) # Seeds numpy.random with seed
train, params, x_shape, y_shape, mode = build_conv_nnet2_classif(
use_gpu=use_gpu,
isize=isize,
ksize=ksize,
n_batch=bsize,
verbose=verbose,
version=version,
check_isfinite=check_isfinite)
xval = my_rand(*x_shape)
yval = my_rand(*y_shape)
lr = theano._asarray(0.01, dtype='float32')
rvals = my_zeros(n_train)
for i in xrange(n_train):
rvals[i] = train(xval, yval, lr)[0]
def test_elemwise0():
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(4, 4),
dtype='float32'), 'a')
b = tensor.fmatrix()
f = pfunc([b], [], updates=[(a, a + b)], mode=mode_with_gpu)
# check that we work inplace.
assert (list(
f.maker.fgraph.toposort()[1].op.destroy_map.items()) == [
(0, [0])])
a0 = a.get_value() * 1.0
f(numpy.ones((4, 4), dtype='float32'))
assert numpy.all(a0 + 1.0 == a.get_value())
def test_elemwise3():
""" Several kinds of elemwise expressions with dimension
permutations and broadcasting"""
shape = (3, 4, 5, 6)
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32'), 'a')
b = tensor.fvector()
new_val = (a + b).dimshuffle([2, 0, 3, 1])
new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])
f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.fgraph.toposort()):
has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
assert not has_elemwise
# let debugmode catch errors
f(theano._asarray(numpy.random.rand(6), dtype='float32'))
def test_elemwise4():
""" Test that two vectors can be broadcast to form an outer
product (by performing rank-1 matrix update"""
shape = (3, 4)
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32'), 'a')
b = tensor.fvector()
c = tensor.fvector()
f = pfunc([b, c], [],
updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))],
mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.fgraph.toposort()):
has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
assert not has_elemwise
# let debugmode catch errors
f(theano._asarray(numpy.random.rand(4), dtype='float32'),
theano._asarray(numpy.random.rand(3), dtype='float32'))
def test_elemwise_comparaison_cast():
"""
test if an elemwise comparaison followed by a cast to float32 are
pushed to gpu.
"""
a = tensor.fmatrix()
b = tensor.fmatrix()
av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
bv = numpy.ones((4, 4), dtype='float32')
for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
(tensor.le, av <= bv), (tensor.ge, av >= bv)]:
f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)
out = f(av, bv)
assert numpy.all(out == ans)
assert any([isinstance(node.op, cuda.GpuElemwise)
for node in f.maker.fgraph.toposort()])
def speed_elemwise_collapse2():
""" used to test the speed up of the generalised collapse of
ccontiguous dims"""
shape = (30, 40, 50, 600)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2[:, :, :, ::2]
b = tcn.CudaNdarrayType((False, False, False, False))()
c = a3 + b * tensor.exp(1 + b ** a3)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(*shape), dtype='float32')
v = v[:, :, :, ::2]
v = cuda_ndarray.CudaNdarray(v)
time.time()
for i in range(100):
# let debugmode catch errors
f(v)
time.time()
def test_elemwise_collapse():
""" Test when all inputs have one(and the same) broadcastable dimension """
shape = (4, 5, 60)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
b = tcn.CudaNdarrayType((False, True, False, False))()
c = a3 + b
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(shape[0], 1, *shape[1:]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)
# print "Expected collapse of all dimensions"
def test_elemwise_collapse2():
""" Test when only one inputs have one broadcastable dimension """
shape = (4, 5, 9)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
b = tcn.CudaNdarrayType((False, False, False, False))()
c = a3 + b
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)
# print "Expected collapse to 3 dimensions"
def test_elemwise_collapse3():
""" Test when only one inputs have two broadcastable dimension at each ends """
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape),
dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 0, 1, 'x')
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v)
# print "Expected collapse to 3 dimensions"
def test_elemwise_collapse4():
""" Test when only one inputs have two broadcastable dimension at
each ends and we add a scalar"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 0, 1, 'x')
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b + 2)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2)
# print "Expected collapse to 3 dimensions"
def test_elemwise_collapse6():
""" Test when all inputs have two broadcastable dimension at the
beginning"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 'x', 0, 1)
b = tcn.CudaNdarrayType((True, True, False, False))()
f = pfunc([b], [a3 + b], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v)
# print "Expected collapse to c contiguous"
def test_elemwise_collapse7(atol=1e-6):
""" Test when one input have one broadcastable dimension and the
other is a scalar"""
shape = (5, 4, 1)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a.copy(), 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
f = pfunc([], [a3 + 2], mode=mode_with_gpu)
# let debugmode catch errors
out = f()[0]
ans = (a + 2).reshape(shape[0], 1, shape[1], shape[2])
assert numpy.allclose(out, ans, atol=atol)
# print "Expected collapse to c contiguous"
def test_host_to_device():
# print >>sys.stdout, 'starting test_host_to_dev'
for shape in ((), (3,), (2, 3), (3, 4, 5, 6)):
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
c = numpy.asarray(b)
assert numpy.all(a == c)
# test with float32 dtype
d = numpy.asarray(b, dtype='float32')
assert numpy.all(a == d)
# test with not float32 dtype
try:
numpy.asarray(b, dtype='int8')
assert False
except TypeError:
pass
def test_copy():
# print >>sys.stdout, 'starting test_copy'
shape = (500, 499)
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
# print >>sys.stdout, '.. creating device object'
b = cuda_ndarray.CudaNdarray(a)
# print >>sys.stdout, '.. copy'
c = copy.copy(b)
# print >>sys.stdout, '.. deepcopy'
d = copy.deepcopy(b)
# print >>sys.stdout, '.. comparisons'
assert numpy.allclose(a, numpy.asarray(b))
assert numpy.allclose(a, numpy.asarray(c))
assert numpy.allclose(a, numpy.asarray(d))
b += b
assert numpy.allclose(a + a, numpy.asarray(b))
assert numpy.allclose(a + a, numpy.asarray(c))
assert numpy.allclose(a, numpy.asarray(d))
def test_getshape():
shapelist = [
((1, 2, 3), (1, 2, 3)),
((1,), (1,)),
((1, 2, 3), (3, 2, 1)),
((1, 2, 3), (6,)),
((1, 2, 3, 2), (6, 2)),
((2, 3, 2), (6, 2))
]
def subtest(shape):
a = theano._asarray(numpy.random.rand(*shape_1), dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
assert b.shape == a.shape
for shape_1, shape_2 in shapelist:
subtest(shape_1)
subtest(shape_2)
def test_stride_manipulation():
a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32')
b = cuda_ndarray.CudaNdarray(a)
v = b.view()
v._dev_data += 0
c = numpy.asarray(v)
assert numpy.all(a == c)
sizeof_float = 4
offset = 0
b_strides = b._strides
for i in xrange(len(b.shape)):
offset += (b.shape[i] - 1) * b_strides[i]
v._set_stride(i, -b_strides[i])
v._dev_data += offset * sizeof_float
c = numpy.asarray(v)
assert numpy.all(c == [[5, 4, 3], [2, 1, 0]])
def test_setitem_matrixscalar0():
a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray(8, dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
# set an element to 8
_a[1, 1] = _b
a[1, 1] = b
assert numpy.allclose(a, numpy.asarray(_a))
# test direct transfert from numpy
_a[1, 1] = theano._asarray(888, dtype='float32')
a[1, 1] = theano._asarray(888, dtype='float32')
assert numpy.allclose(a, numpy.asarray(_a))
# broadcast a 0
_a[1, 1] = 0
_a[0:2] = 0
_a[1:] = 0
def test_setitem_matrixvector1():
a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([8, 9], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
# set second column to 8,9
_a[:, 1] = _b
a[:, 1] = b
assert numpy.allclose(a, numpy.asarray(_a))
# test direct transfert from numpy
_a[:, 1] = b * 100
a[:, 1] = b * 100
assert numpy.allclose(a, numpy.asarray(_a))
row = theano._asarray([777, 888, 999], dtype='float32')
_a[1, :] = row
a[1, :] = row
assert numpy.allclose(a, numpy.asarray(_a))
def test_setitem_matrix_tensor3():
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8, 9], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
# set middle row through cube to 7,8,9
_a[:, 1, 1] = _b
a[:, 1, 1] = b
assert numpy.allclose(a, numpy.asarray(_a))
# test direct transfert from numpy
_a[:, 1, 1] = b * 100
a[:, 1, 1] = b * 100
assert numpy.allclose(a, numpy.asarray(_a))
row = theano._asarray([777, 888, 999], dtype='float32')
_a[1, 1, :] = row
a[1, 1, :] = row
assert numpy.allclose(a, numpy.asarray(_a))
def test_setitem_matrix_bad_ndim():
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
try:
# attempt to assign the ndarray b with setitem
_a[:, :, 1] = _b
assert False
except ValueError:
# print e
assert True
# test direct transfert from numpy
try:
# attempt to assign the ndarray b with setitem
_a[1, :, :] = b
assert False
except ValueError:
# print e
assert True
def test_setitem_matrix_bad_type():
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8], dtype='float64')
# test direct transfert from numpy
try:
# attempt to assign the ndarray b with setitem
_a[1, :, :] = b
assert False
except TypeError:
# print e
assert True
def test_setitem_assign_to_slice():
a = numpy.arange(27)
a.resize((3, 3, 3))
a = theano._asarray(a, dtype='float32')
_a = cuda_ndarray.CudaNdarray(a)
b = theano._asarray([7, 8, 9], dtype='float32')
_b = cuda_ndarray.CudaNdarray(b)
# first get a slice of a
_c = _a[:, :, 1]
# set middle row through cube to 7,8,9
# (this corresponds to middle row of matrix _c)
_c[:, 1] = _b
a[:, :, 1][:, 1] = b
assert numpy.allclose(a, numpy.asarray(_a))
# test direct transfert from numpy
_d = _a[1, :, :]
_d[1, :] = b * 10
a[1, :, :][1, :] = b * 10
assert numpy.allclose(a, numpy.asarray(_a))
def sharedX_value(value, name=None, borrow=None, dtype=None):
"""Share a single value after transforming it to floatX type.
value: a value
name: variable name (str)
borrow: boolean
dtype: the type of the value when shared. default: theano.config.floatX
"""
if dtype is None:
dtype = theano.config.floatX
return theano.shared(
theano._asarray(value, dtype=dtype), name=name, borrow=borrow)