def placeholder(shape=None, ndim=None, dtype=_FLOATX, sparse=False, name=None):
'''Instantiate an input data placeholder variable.
'''
if shape is None and ndim is None:
raise Exception('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
else:
shape = tuple([None for _ in range(ndim)])
broadcast = (False,) * ndim
if sparse:
_assert_sparse_module()
x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
else:
x = T.TensorType(dtype, broadcast)(name)
x._keras_shape = shape
x._uses_learning_phase = False
return x
python类TensorType()的实例源码
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiate an input data placeholder variable.
"""
if dtype is None:
dtype = floatx()
if shape is None and ndim is None:
raise ValueError('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
else:
shape = tuple([None for _ in range(ndim)])
broadcast = (False,) * ndim
if sparse:
_assert_sparse_module()
x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
else:
x = T.TensorType(dtype, broadcast)(name)
x._keras_shape = shape
x._uses_learning_phase = False
return x
def __init__(self):
metric_names = ['Loss','L2','Accuracy']
super(Fr3dNetTrainer, self).__init__(metric_names)
tensor5 = T.TensorType(theano.config.floatX, (False,) * 5)
input_var = tensor5('inputs')
target_var = T.ivector('targets')
logging.info("Defining network")
net = fr3dnet.define_network(input_var)
self.network = net
train_fn, val_fn, l_r = fr3dnet.define_updates(net, input_var, target_var)
self.train_fn = train_fn
self.val_fn = val_fn
self.l_r = l_r
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
'''Instantiate an input data placeholder variable.
'''
if dtype is None:
dtype = floatx()
if shape is None and ndim is None:
raise ValueError('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
else:
shape = tuple([None for _ in range(ndim)])
broadcast = (False,) * ndim
if sparse:
_assert_sparse_module()
x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
else:
x = T.TensorType(dtype, broadcast)(name)
x._keras_shape = shape
x._uses_learning_phase = False
return x
def __init__(self):
metric_names = ['Loss','L2','Accuracy']
super(Fr3dNetTrainer, self).__init__(metric_names)
tensor5 = T.TensorType(theano.config.floatX, (False,) * 5)
input_var = tensor5('inputs')
target_var = T.ivector('targets')
logging.info("Defining network")
net = fr3dnet.define_network(input_var)
self.network = net
train_fn, val_fn, l_r = fr3dnet.define_updates(net, input_var, target_var)
self.train_fn = train_fn
self.val_fn = val_fn
self.l_r = l_r
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiate an input data placeholder variable.
"""
if dtype is None:
dtype = floatx()
if shape is None and ndim is None:
raise ValueError('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
else:
shape = tuple([None for _ in range(ndim)])
broadcast = (False,) * ndim
if sparse:
_assert_sparse_module()
x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
else:
x = T.TensorType(dtype, broadcast)(name)
x._keras_shape = shape
x._uses_learning_phase = False
return x
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiate an input data placeholder variable.
"""
if dtype is None:
dtype = floatx()
if shape is None and ndim is None:
raise ValueError('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
else:
shape = tuple([None for _ in range(ndim)])
name = _prepare_name(name, 'placeholder')
broadcast = (False,) * ndim
if sparse:
_assert_sparse_module()
x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
else:
x = T.TensorType(dtype, broadcast)(name)
x._keras_shape = shape
x._uses_learning_phase = False
return x
def test_op_sd(self):
for format in sparse.sparse_formats:
for dtype in sparse.all_dtypes:
variable, data = sparse_random_inputs(format,
shape=(10, 10),
out_dtype=dtype,
n=2,
p=0.1)
variable[1] = tensor.TensorType(dtype=dtype,
broadcastable=(False, False))()
data[1] = data[1].toarray()
f = theano.function(variable, self.op(*variable))
tested = f(*data)
expected = numpy.dot(data[0].toarray(), data[1])
assert tested.format == format
assert tested.dtype == expected.dtype
tested = tested.toarray()
utt.assert_allclose(tested, expected)
def _is_sparse_variable(x):
"""
Returns
-------
boolean
True iff x is a L{SparseVariable} (and not a L{tensor.TensorType},
for instance).
"""
if not isinstance(x, gof.Variable):
raise NotImplementedError("this function should only be called on "
"*variables* (of type sparse.SparseType "
"or tensor.TensorType, for instance), not ",
x)
return isinstance(x.type, SparseType)
def test_infer_shape(self):
for s_left, s_right in [((5, 6), (5, 6)),
((5, 6), (5, 1)),
((5, 6), (1, 6)),
((5, 1), (5, 6)),
((1, 6), (5, 6)),
((2, 3, 4, 5), (2, 3, 4, 5)),
((2, 3, 4, 5), (2, 3, 1, 5)),
((2, 3, 4, 5), (1, 3, 4, 5)),
((2, 1, 4, 5), (2, 3, 4, 5)),
((2, 3, 4, 1), (2, 3, 4, 5))]:
dtype = theano.config.floatX
t_left = TensorType(dtype, [(entry == 1) for entry in s_left])()
t_right = TensorType(dtype, [(entry == 1) for entry in s_right])()
t_left_val = numpy.zeros(s_left, dtype=dtype)
t_right_val = numpy.zeros(s_right, dtype=dtype)
self._compile_and_check([t_left, t_right],
[Elemwise(scalar.add)(t_left, t_right)],
[t_left_val, t_right_val], Elemwise)
def test_recursive_lift(self):
v = T.vector(dtype="float64")
m = T.matrix(dtype="float64")
out = ((v + 42) * (m + 84)).T
g = FunctionGraph([v, m], [out])
init_str_g = ("[InplaceDimShuffle{1,0}(Elemwise{mul,no_inplace}"
"(InplaceDimShuffle{x,0}(Elemwise{add,no_inplace}"
"(<TensorType(float64, vector)>, "
"InplaceDimShuffle{x}(TensorConstant{42}))), "
"Elemwise{add,no_inplace}"
"(<TensorType(float64, matrix)>, "
"InplaceDimShuffle{x,x}(TensorConstant{84}))))]")
self.assertTrue(str(g) == init_str_g)
new_out = local_dimshuffle_lift.transform(g.outputs[0].owner)[0]
new_g = FunctionGraph(g.inputs, [new_out])
opt_str_g = ("[Elemwise{mul,no_inplace}(Elemwise{add,no_inplace}"
"(InplaceDimShuffle{0,x}(<TensorType(float64, vector)>), "
"InplaceDimShuffle{x,x}(TensorConstant{42})), "
"Elemwise{add,no_inplace}(InplaceDimShuffle{1,0}"
"(<TensorType(float64, matrix)>), "
"InplaceDimShuffle{x,x}(TensorConstant{84})))]")
self.assertTrue(str(new_g) == opt_str_g)
# Check stacktrace was copied over correctly after opt was applied
self.assertTrue(check_stack_trace(new_g, ops_to_check='all'))
def test_eq(self):
x = T.dmatrix()
y = T.dmatrix()
f = theano.function([x, y], T.eq(x, y), mode=self.mode)
vx = numpy.random.rand(5, 4)
vy = numpy.random.rand(5, 4)
f(vx, vy)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, T.Elemwise)
assert isinstance(topo[0].op.scalar_op, theano.scalar.EQ)
f2 = theano.function([x], T.eq(x, x), mode=self.mode)
assert numpy.all(f2(vx) == numpy.ones((5, 4)))
topo2 = f2.maker.fgraph.toposort()
# Shape_i{1}(<TensorType(float64, matrix)>), Shape_i{0}(<TensorType(float64, matrix)>), Alloc([[1]], Shape_i{0}.0, Shape_i{1}.0
assert len(topo2) == 3
assert isinstance(topo2[-1].op, T.Alloc)
def test_local_reduce_broadcast_some_0(self):
for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod,
tensor.max, tensor.min]:
x = T.TensorType('int64', (True, False, True))()
f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode)
order = f.maker.fgraph.toposort()
assert 1 == sum([isinstance(node.op, T.CAReduce)
for node in order])
node = [node for node in order if isinstance(node.op,
tensor.CAReduce)][0]
op = node.op
assert isinstance(op, T.CAReduce)
# -- the leading broadcastable dimension has been dropped
# by the local_reduce_broadcastable optimization
# now summation is over the original x's dimension 1.
assert node.inputs[0].ndim == 2, node
assert op.axis == (0,), op.axis
def test_local_log_sum_exp1():
# Tests if optimization is applied by checking the presence of the maximum
x = tensor3('x')
check_max_log_sum_exp(x, axis=(0,), dimshuffle_op=None)
check_max_log_sum_exp(x, axis=(1,), dimshuffle_op=None)
check_max_log_sum_exp(x, axis=(2,), dimshuffle_op=None)
check_max_log_sum_exp(x, axis=(0, 1), dimshuffle_op=None)
check_max_log_sum_exp(x, axis=(0, 1, 2), dimshuffle_op=None)
# If a transpose is applied to the sum
transpose_op = DimShuffle((False, False), (1, 0))
check_max_log_sum_exp(x, axis=2, dimshuffle_op=transpose_op)
# If the sum is performed with keepdims=True
x = TensorType(dtype='floatX', broadcastable=(False, True, False))('x')
sum_keepdims_op = x.sum(axis=(0, 1), keepdims=True).owner.op
check_max_log_sum_exp(x, axis=(0, 1), dimshuffle_op=sum_keepdims_op)
def test_inc_adv_subtensor_w_2vec(self):
if inplace_increment is None:
raise inplace_increment_missing
subt = self.m[self.ix1, self.ix12]
a = inc_subtensor(subt, subt)
typ = tensor.TensorType(self.m.type.dtype, self.ix2.type.broadcastable)
assert a.type == typ, (a.type, typ)
f = theano.function([self.m, self.ix1, self.ix12], a,
allow_input_downcast=True)
aval = f([[.4, .9, .1],
[5, 6, 7],
[.5, .3, .15]],
[1, 2, 1],
[0, 1, 0])
assert numpy.allclose(aval,
[[.4, .9, .1],
[5 * 3, 6, 7],
[.5, .3 * 2, .15]]), aval
def test_tensor_values_eq_approx():
# test, inf, -inf and nan equal themself
a = numpy.asarray([-numpy.inf, -1, 0, 1, numpy.inf, numpy.nan])
assert TensorType.values_eq_approx(a, a)
# test inf, -inf don't equal themself
b = numpy.asarray([numpy.inf, -1, 0, 1, numpy.inf, numpy.nan])
assert not TensorType.values_eq_approx(a, b)
b = numpy.asarray([-numpy.inf, -1, 0, 1, -numpy.inf, numpy.nan])
assert not TensorType.values_eq_approx(a, b)
# test allow_remove_inf
b = numpy.asarray([numpy.inf, -1, 0, 1, 5, numpy.nan])
assert TensorType.values_eq_approx(a, b, allow_remove_inf=True)
b = numpy.asarray([numpy.inf, -1, 0, 1, 5, 6])
assert not TensorType.values_eq_approx(a, b, allow_remove_inf=True)
# test allow_remove_nan
b = numpy.asarray([numpy.inf, -1, 0, 1, 5, numpy.nan])
assert not TensorType.values_eq_approx(a, b, allow_remove_nan=False)
b = numpy.asarray([-numpy.inf, -1, 0, 1, numpy.inf, 6])
assert not TensorType.values_eq_approx(a, b, allow_remove_nan=False)
def test_flatten_broadcastable():
# Ensure that the broadcastable pattern of the output is coherent with
# that of the input
inp = TensorType('float64', (False, False, False, False))()
out = flatten(inp, outdim=2)
assert out.broadcastable == (False, False)
inp = TensorType('float64', (False, False, False, True))()
out = flatten(inp, outdim=2)
assert out.broadcastable == (False, False)
inp = TensorType('float64', (False, True, False, True))()
out = flatten(inp, outdim=2)
assert out.broadcastable == (False, False)
inp = TensorType('float64', (False, True, True, True))()
out = flatten(inp, outdim=2)
assert out.broadcastable == (False, True)
inp = TensorType('float64', (True, False, True, True))()
out = flatten(inp, outdim=3)
assert out.broadcastable == (True, False, True)
def local_abstractconv_gemm(node):
if theano.config.cxx == "" or not theano.config.blas.ldflags:
return
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if not isinstance(img.type, TensorType) or \
not isinstance(kern.type, TensorType):
return None
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
rval = CorrMM(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(img, kern)
copy_stack_trace(node.outputs[0], rval)
return [rval]
def local_abstractconv3d_gemm(node):
if theano.config.cxx == "" or not theano.config.blas.ldflags:
return
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if not isinstance(img.type, TensorType) or \
not isinstance(kern.type, TensorType):
return None
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = Corr3dMM(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(img, kern)
copy_stack_trace(node.outputs[0], rval)
return [rval]
def local_abstractconv3d_gradweight_gemm(node):
if theano.config.cxx == "" or not theano.config.blas.ldflags:
return
if not isinstance(node.op, AbstractConv3d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, TensorType) or \
not isinstance(topgrad.type, TensorType):
return None
rval = Corr3dMM_gradWeights(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(img, topgrad, shape)
copy_stack_trace(node.outputs[0], rval)
# need to flip the kernel if necessary
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1, ::-1]
rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], rval)
return [rval]
def local_abstractconv_gradinputs_gemm(node):
if theano.config.cxx == "" or not theano.config.blas.ldflags:
return
if not isinstance(node.op, AbstractConv2d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, TensorType) or \
not isinstance(topgrad.type, TensorType):
return None
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1]
rval = CorrMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(kern, topgrad,
shape)
copy_stack_trace(node.outputs[0], rval)
return [rval]
def local_abstractconv3d_gradinputs_gemm(node):
if theano.config.cxx == "" or not theano.config.blas.ldflags:
return
if not isinstance(node.op, AbstractConv3d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, TensorType) or \
not isinstance(topgrad.type, TensorType):
return None
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = Corr3dMM_gradInputs(border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation)(kern, topgrad,
shape)
copy_stack_trace(node.outputs[0], rval)
return [rval]
def local_conv2d_cpu(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if ((not isinstance(img.type, TensorType) or
not isinstance(kern.type, TensorType))):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
if not node.op.filter_flip:
# Not tested yet
return None
rval = conv2d(img, kern,
node.op.imshp, node.op.kshp,
border_mode=node.op.border_mode,
subsample=node.op.subsample)
copy_stack_trace(node.outputs[0], rval)
return [rval]
def make_node(self, img, topgrad, shape=None):
img = as_tensor_variable(img)
topgrad = as_tensor_variable(topgrad)
img, topgrad = self.as_common_dtype(img, topgrad)
if img.type.ndim != 5:
raise TypeError('img must be 5D tensor')
if topgrad.type.ndim != 5:
raise TypeError('topgrad must be 5D tensor')
if self.subsample != (1, 1, 1) or self.border_mode == "half":
if shape is None:
raise ValueError('shape must be given if subsample != (1, 1, 1)'
' or border_mode == "half"')
height_width_depth = [as_tensor_variable(shape[0]).astype('int64'),
as_tensor_variable(shape[1]).astype('int64'),
as_tensor_variable(shape[2]).astype('int64')]
else:
height_width_depth = []
broadcastable = [topgrad.type.broadcastable[1], img.type.broadcastable[1],
False, False, False]
dtype = img.type.dtype
return Apply(self, [img, topgrad] + height_width_depth,
[TensorType(dtype, broadcastable)()])
def make_node(self, kern, topgrad, shape=None):
kern = as_tensor_variable(kern)
topgrad = as_tensor_variable(topgrad)
kern, topgrad = self.as_common_dtype(kern, topgrad)
if kern.type.ndim != 5:
raise TypeError('kern must be 5D tensor')
if topgrad.type.ndim != 5:
raise TypeError('topgrad must be 5D tensor')
if self.subsample != (1, 1, 1) and shape is None:
raise ValueError('shape must be given if subsample != (1, 1, 1)')
if self.subsample != (1, 1, 1):
height_width_depth = [as_tensor_variable(shape[0]).astype('int64'),
as_tensor_variable(shape[1]).astype('int64'),
as_tensor_variable(shape[2]).astype('int64')]
else:
height_width_depth = []
broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1],
False, False, False]
dtype = kern.type.dtype
return Apply(self, [kern, topgrad] + height_width_depth,
[TensorType(dtype, broadcastable)()])
def make_node(self, kern, topgrad, shape=None):
kern = as_tensor_variable(kern)
topgrad = as_tensor_variable(topgrad)
kern, topgrad = self.as_common_dtype(kern, topgrad)
if kern.type.ndim != 4:
raise TypeError('kern must be 4D tensor')
if topgrad.type.ndim != 4:
raise TypeError('topgrad must be 4D tensor')
if self.subsample != (1, 1) and shape is None:
raise ValueError('shape must be given if subsample != (1, 1)')
height_width = [as_tensor_variable(shape[0]).astype('int64'), as_tensor_variable(shape[1]).astype('int64')] if self.subsample != (1, 1) else []
broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1],
False, False]
dtype = kern.type.dtype
return Apply(self, [kern, topgrad] + height_width,
[TensorType(dtype, broadcastable)()])
def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None):
"""
Sample from a uniform distribution between low and high.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of low and high.
If dtype is not specified, it will be inferred from the dtype of
low and high, but will be at least as precise as floatX.
"""
low = tensor.as_tensor_variable(low)
high = tensor.as_tensor_variable(high)
if dtype is None:
dtype = tensor.scal.upcast(theano.config.floatX, low.dtype, high.dtype)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)
op = RandomFunction('uniform',
tensor.TensorType(dtype=dtype, broadcastable=bcast))
return op(random_state, size, low, high)
def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None, dtype=None):
"""
Sample from a normal distribution centered on avg with
the specified standard deviation (std).
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of avg and std.
If dtype is not specified, it will be inferred from the dtype of
avg and std, but will be at least as precise as floatX.
"""
avg = tensor.as_tensor_variable(avg)
std = tensor.as_tensor_variable(std)
if dtype is None:
dtype = tensor.scal.upcast(theano.config.floatX, avg.dtype, std.dtype)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, avg, std)
op = RandomFunction('normal',
tensor.TensorType(dtype=dtype, broadcastable=bcast))
return op(random_state, size, avg, std)
def random_integers(random_state, size=None, low=0, high=1, ndim=None,
dtype='int64'):
"""
Sample a random integer between low and high, both inclusive.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of low and high.
"""
low = tensor.as_tensor_variable(low)
high = tensor.as_tensor_variable(high)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)
op = RandomFunction(random_integers_helper,
tensor.TensorType(dtype=dtype, broadcastable=bcast))
return op(random_state, size, low, high)
def test_ctors(self):
if theano.configdefaults.python_int_bitwidth() == 32:
assert shared(7).type == theano.tensor.iscalar, shared(7).type
else:
assert shared(7).type == theano.tensor.lscalar, shared(7).type
assert shared(7.0).type == theano.tensor.dscalar
assert shared(numpy.float32(7)).type == theano.tensor.fscalar
# test tensor constructor
b = shared(numpy.zeros((5, 5), dtype='int32'))
assert b.type == TensorType('int32', broadcastable=[False, False])
b = shared(numpy.random.rand(4, 5))
assert b.type == TensorType('float64', broadcastable=[False, False])
b = shared(numpy.random.rand(5, 1, 2))
assert b.type == TensorType('float64', broadcastable=[False, False, False])
assert shared([]).type == generic
def badfunc():
shared(7, bad_kw=False)
self.assertRaises(TypeError, badfunc)