def test_join_matrixV(self):
"""variable join axis"""
v = numpy.array([[.1, .2, .3], [.4, .5, .6]], dtype=self.floatX)
a = self.shared(v)
b = as_tensor_variable(v)
ax = lscalar()
s = join(ax, a, b)
f = inplace_func([ax], [s], mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
want = numpy.array([[.1, .2, .3], [.4, .5, .6],
[.1, .2, .3], [.4, .5, .6]])
got = f(0)
assert numpy.allclose(got, want)
want = numpy.array([[.1, .2, .3, .1, .2, .3],
[.4, .5, .6, .4, .5, .6]])
got = f(1)
assert numpy.allclose(got, want)
utt.verify_grad(lambda a, b: join(0, a, b), [v, 2 * v], mode=self.mode)
utt.verify_grad(lambda a, b: join(1, a, b), [v, 2 * v], mode=self.mode)
python类as_tensor_variable()的实例源码
def test_join_matrixV_negative_axis(self):
"""variable join negative axis"""
v = numpy.array([[.1, .2, .3], [.4, .5, .6]], dtype=self.floatX)
a = self.shared(v)
b = as_tensor_variable(v)
ax = lscalar()
s = join(ax, a, b)
f = inplace_func([ax], [s], mode=self.mode)
topo = f.maker.fgraph.toposort()
assert [True for node in topo
if isinstance(node.op, type(self.join_op))]
want = numpy.array([[.1, .2, .3, .1, .2, .3],
[.4, .5, .6, .4, .5, .6]])
got = f(-1)
assert numpy.allclose(got, want)
want = numpy.array([[.1, .2, .3], [.4, .5, .6],
[.1, .2, .3], [.4, .5, .6]])
got = f(-2)
assert numpy.allclose(got, want)
self.assertRaises(IndexError, f, -3)
def test1(self):
s = scal.constant(56)
t = as_tensor_variable(s)
self.assertTrue(t.owner.op is tensor_from_scalar)
self.assertTrue(t.type.broadcastable == (), t.type.broadcastable)
self.assertTrue(t.type.ndim == 0, t.type.ndim)
self.assertTrue(t.type.dtype == s.type.dtype)
v = eval_outputs([t])
self.assertTrue(v == 56, v)
self.assertTrue(isinstance(v, numpy.ndarray))
self.assertTrue(v.shape == (), v.shape)
g = grad(t, s)
self.assertTrue(eval_outputs([g]) == 0.)
def test2(self):
s = scal.constant(56.)
t = as_tensor_variable(s)
self.assertTrue(t.owner.op is tensor_from_scalar)
self.assertTrue(t.type.broadcastable == (), t.type.broadcastable)
self.assertTrue(t.type.ndim == 0, t.type.ndim)
self.assertTrue(t.type.dtype == s.type.dtype)
v = eval_outputs([t])
self.assertTrue(v == 56., v)
self.assertTrue(isinstance(v, numpy.ndarray))
self.assertTrue(v.shape == (), v.shape)
g = grad(t, s)
self.assertTrue(eval_outputs([g]) == 1.)
def test_shape_Constant_tensor(self):
"""
Tests convolution where the {image,filter}_shape is a Constant tensor.
"""
as_t = T.as_tensor_variable
self.validate(
(as_t(3), as_t(2), as_t(7), as_t(5)), (5, 2, 2, 3), 'valid')
self.validate(as_t([3, 2, 7, 5]), (5, 2, 2, 3), 'valid')
self.validate(as_t((3, 2, 7, 5)), (5, 2, 2, 3), 'valid')
self.validate(
(3, 2, 7, 5), (
as_t(5), as_t(2), as_t(2),
as_t(3)), 'valid')
self.validate((3, 2, 7, 5), as_t([5, 2, 2, 3]), 'valid')
self.validate((3, 2, 7, 5), as_t((5, 2, 2, 3)), 'valid')
self.validate(as_t([3, 2, 7, 5]), as_t([5, 2, 2, 3]), 'full')
def test_shape_Constant_tensor(self):
"""
Tests correlation where the {image,filter}_shape is a Constant tensor.
"""
as_t = T.as_tensor_variable
border_modes = ['valid', 'full', 'half', (1, 1, 1), (2, 1, 1),
(1, 2, 1), (1, 1, 2), (3, 3, 3), 1]
for border_mode in border_modes:
self.validate((as_t(3), as_t(2), as_t(7), as_t(5), as_t(5)),
(5, 2, 2, 3, 3), border_mode)
self.validate(as_t([3, 2, 7, 5, 5]), (5, 2, 2, 3, 3), border_mode)
self.validate(as_t((3, 2, 7, 5, 5)), (5, 2, 2, 3, 3), border_mode)
self.validate((3, 2, 7, 5, 5), (as_t(5), as_t(2), as_t(2),
as_t(3), as_t(3)), 'valid')
self.validate((3, 2, 7, 5, 5), as_t([5, 2, 2, 3, 3]), border_mode)
self.validate(as_t([3, 2, 7, 5, 5]), as_t([5, 2, 2, 3, 3]), border_mode)
def test_shape_Constant_tensor(self):
"""
Tests correlation where the {image,filter}_shape is a Constant tensor.
"""
as_t = T.as_tensor_variable
border_modes = ['valid', 'full', 'half', (1, 1), (2, 1), (1, 2), (3, 3), 1]
for border_mode in border_modes:
self.validate((as_t(3), as_t(2), as_t(7), as_t(5)),
(5, 2, 2, 3), border_mode)
self.validate(as_t([3, 2, 7, 5]), (5, 2, 2, 3), border_mode)
self.validate(as_t((3, 2, 7, 5)), (5, 2, 2, 3), border_mode)
self.validate((3, 2, 7, 5), (as_t(5), as_t(2), as_t(2),
as_t(3)), 'valid')
self.validate((3, 2, 7, 5), as_t([5, 2, 2, 3]), border_mode)
self.validate(as_t([3, 2, 7, 5]), as_t([5, 2, 2, 3]), border_mode)
def test_neibs_bad_shape(self):
shape = (2, 3, 10, 10)
for dtype in self.dtypes:
images = shared(numpy.arange(
numpy.prod(shape), dtype=dtype
).reshape(shape))
for neib_shape in [(3, 2), (2, 3)]:
neib_shape = T.as_tensor_variable(neib_shape)
f = function([], images2neibs(images, neib_shape),
mode=self.mode)
self.assertRaises(TypeError, f)
# Test that ignore border work in that case.
f = function([],
images2neibs(images, neib_shape,
mode='ignore_borders'),
mode=self.mode)
assert self.op in [type(node.op)
for node in f.maker.fgraph.toposort()]
f()
def conv2d(input,
filters,
input_shape=None,
filter_shape=None,
border_mode='valid',
subsample=(1, 1),
filter_flip=True,
filter_dilation=(1, 1)):
"""This function will build the symbolic graph for convolving a mini-batch of a
stack of 2D inputs with a set of 2D filters. The implementation is modelled
after Convolutional Neural Networks (CNN).
Refer to :func:`nnet.conv2d <theano.tensor.nnet.conv2d>` for a more detailed documentation.
"""
input = as_tensor_variable(input)
filters = as_tensor_variable(filters)
conv_op = AbstractConv2d(imshp=input_shape,
kshp=filter_shape,
border_mode=border_mode,
subsample=subsample,
filter_flip=filter_flip,
filter_dilation=filter_dilation)
return conv_op(input, filters)
def make_node(self, img, kern):
# Make sure both inputs are Variables with the same Type
if not isinstance(img, theano.Variable):
img = as_tensor_variable(img)
if not isinstance(kern, theano.Variable):
kern = as_tensor_variable(kern)
ktype = img.type.clone(dtype=kern.dtype,
broadcastable=kern.broadcastable)
kern = ktype.filter_variable(kern)
if img.type.ndim != 2 + self.convdim:
raise TypeError('img must be %dD tensor' % (2 + self.convdim))
if kern.type.ndim != 2 + self.convdim:
raise TypeError('kern must be %dD tensor' % (2 + self.convdim))
broadcastable = [img.broadcastable[0],
kern.broadcastable[0]] + ([False] * self.convdim)
output = img.type.clone(broadcastable=broadcastable)()
return Apply(self, [img, kern], [output])
def make_node(self, img, topgrad, shape):
# Make sure both inputs are Variables with the same Type
if not isinstance(img, theano.Variable):
img = as_tensor_variable(img)
if not isinstance(topgrad, theano.Variable):
topgrad = as_tensor_variable(topgrad)
gtype = img.type.clone(dtype=topgrad.dtype,
broadcastable=topgrad.broadcastable)
topgrad = gtype.filter_variable(topgrad)
if img.type.ndim != 2 + self.convdim:
raise TypeError('img must be %dD tensor' % (2 + self.convdim))
if topgrad.type.ndim != 2 + self.convdim:
raise TypeError('topgrad must be %dD tensor' % (2 + self.convdim))
shape = as_tensor_variable(shape)
broadcastable = [topgrad.broadcastable[1],
img.broadcastable[1]] + ([False] * self.convdim)
output = img.type.clone(broadcastable=broadcastable)()
return Apply(self, [img, topgrad, shape], [output])
def make_node(self, kern, topgrad, shape):
# Make sure both inputs are Variables with the same Type
if not isinstance(kern, theano.Variable):
kern = as_tensor_variable(kern)
if not isinstance(topgrad, theano.Variable):
topgrad = as_tensor_variable(topgrad)
gtype = kern.type.clone(dtype=topgrad.dtype,
broadcastable=topgrad.broadcastable)
topgrad = gtype.filter_variable(topgrad)
if kern.type.ndim != 2 + self.convdim:
raise TypeError('kern must be %dD tensor' % (2 + self.convdim))
if topgrad.type.ndim != 2 + self.convdim:
raise TypeError('topgrad must be %dD tensor' % (2 + self.convdim))
shape = as_tensor_variable(shape)
broadcastable = [topgrad.type.broadcastable[0],
kern.type.broadcastable[1]] + ([False] * self.convdim)
output = kern.type.clone(broadcastable=broadcastable)()
return Apply(self, [kern, topgrad, shape], [output])
def make_node(self, img, topgrad, shape=None):
img = as_tensor_variable(img)
topgrad = as_tensor_variable(topgrad)
img, topgrad = self.as_common_dtype(img, topgrad)
if img.type.ndim != 5:
raise TypeError('img must be 5D tensor')
if topgrad.type.ndim != 5:
raise TypeError('topgrad must be 5D tensor')
if self.subsample != (1, 1, 1) or self.border_mode == "half":
if shape is None:
raise ValueError('shape must be given if subsample != (1, 1, 1)'
' or border_mode == "half"')
height_width_depth = [as_tensor_variable(shape[0]).astype('int64'),
as_tensor_variable(shape[1]).astype('int64'),
as_tensor_variable(shape[2]).astype('int64')]
else:
height_width_depth = []
broadcastable = [topgrad.type.broadcastable[1], img.type.broadcastable[1],
False, False, False]
dtype = img.type.dtype
return Apply(self, [img, topgrad] + height_width_depth,
[TensorType(dtype, broadcastable)()])
def make_node(self, kern, topgrad, shape=None):
kern = as_tensor_variable(kern)
topgrad = as_tensor_variable(topgrad)
kern, topgrad = self.as_common_dtype(kern, topgrad)
if kern.type.ndim != 5:
raise TypeError('kern must be 5D tensor')
if topgrad.type.ndim != 5:
raise TypeError('topgrad must be 5D tensor')
if self.subsample != (1, 1, 1) and shape is None:
raise ValueError('shape must be given if subsample != (1, 1, 1)')
if self.subsample != (1, 1, 1):
height_width_depth = [as_tensor_variable(shape[0]).astype('int64'),
as_tensor_variable(shape[1]).astype('int64'),
as_tensor_variable(shape[2]).astype('int64')]
else:
height_width_depth = []
broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1],
False, False, False]
dtype = kern.type.dtype
return Apply(self, [kern, topgrad] + height_width_depth,
[TensorType(dtype, broadcastable)()])
def make_node(self, img, topgrad, shape=None):
img = as_tensor_variable(img)
topgrad = as_tensor_variable(topgrad)
img, topgrad = self.as_common_dtype(img, topgrad)
if img.type.ndim != 4:
raise TypeError('img must be 4D tensor')
if topgrad.type.ndim != 4:
raise TypeError('topgrad must be 4D tensor')
if self.subsample != (1, 1) or self.border_mode == "half":
if shape is None:
raise ValueError('shape must be given if subsample != (1, 1)'
' or border_mode == "half"')
height_width = [as_tensor_variable(shape[0]).astype('int64'), as_tensor_variable(shape[1]).astype('int64')]
else:
height_width = []
broadcastable = [topgrad.type.broadcastable[1], img.type.broadcastable[1],
False, False]
dtype = img.type.dtype
return Apply(self, [img, topgrad] + height_width,
[TensorType(dtype, broadcastable)()])
def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None):
"""
Sample from a uniform distribution between low and high.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of low and high.
If dtype is not specified, it will be inferred from the dtype of
low and high, but will be at least as precise as floatX.
"""
low = tensor.as_tensor_variable(low)
high = tensor.as_tensor_variable(high)
if dtype is None:
dtype = tensor.scal.upcast(theano.config.floatX, low.dtype, high.dtype)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)
op = RandomFunction('uniform',
tensor.TensorType(dtype=dtype, broadcastable=bcast))
return op(random_state, size, low, high)
def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None, dtype=None):
"""
Sample from a normal distribution centered on avg with
the specified standard deviation (std).
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of avg and std.
If dtype is not specified, it will be inferred from the dtype of
avg and std, but will be at least as precise as floatX.
"""
avg = tensor.as_tensor_variable(avg)
std = tensor.as_tensor_variable(std)
if dtype is None:
dtype = tensor.scal.upcast(theano.config.floatX, avg.dtype, std.dtype)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, avg, std)
op = RandomFunction('normal',
tensor.TensorType(dtype=dtype, broadcastable=bcast))
return op(random_state, size, avg, std)
def random_integers(random_state, size=None, low=0, high=1, ndim=None,
dtype='int64'):
"""
Sample a random integer between low and high, both inclusive.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of low and high.
"""
low = tensor.as_tensor_variable(low)
high = tensor.as_tensor_variable(high)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)
op = RandomFunction(random_integers_helper,
tensor.TensorType(dtype=dtype, broadcastable=bcast))
return op(random_state, size, low, high)
roc_auc.py 文件源码
项目:deep-mil-for-whole-mammogram-classification
作者: wentaozhu
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
roc_auc.py 文件源码
项目:deep-mil-for-whole-mammogram-classification
作者: wentaozhu
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.vector(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)