def test_append_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicMatrix = T.matrix()
z = Append()(mySymbolicMatricesList, mySymbolicMatrix)
m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt")
f = theano.function([In(mySymbolicMatricesList, borrow=True,
mutable=True),
In(mySymbolicMatrix, borrow=True,
mutable=True)], z, accept_inplace=True, mode=m)
self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
python类TensorType()的实例源码
def test_extend_inplace(self):
mySymbolicMatricesList1 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicMatricesList2 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Extend()(mySymbolicMatricesList1, mySymbolicMatricesList2)
m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt")
f = theano.function([In(mySymbolicMatricesList1, borrow=True,
mutable=True), mySymbolicMatricesList2],
z, mode=m)
self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
def test_insert_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicIndex = T.scalar(dtype='int64')
mySymbolicMatrix = T.matrix()
z = Insert()(mySymbolicMatricesList, mySymbolicIndex, mySymbolicMatrix)
m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt")
f = theano.function([In(mySymbolicMatricesList, borrow=True,
mutable=True), mySymbolicIndex, mySymbolicMatrix],
z, accept_inplace=True, mode=m)
self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1,
dtype='int64'), y), [x, y]))
def test_remove_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicMatrix = T.matrix()
z = Remove()(mySymbolicMatricesList, mySymbolicMatrix)
m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt")
f = theano.function([In(mySymbolicMatricesList, borrow=True,
mutable=True), In(mySymbolicMatrix, borrow=True,
mutable=True)], z, accept_inplace=True, mode=m)
self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
def test_type_equality(self):
"""
Typed list types should only be equal
when they contains the same theano
variables
"""
# list of matrices
myType1 = TypedListType(T.TensorType(theano.config.floatX,
(False, False)))
# list of matrices
myType2 = TypedListType(T.TensorType(theano.config.floatX,
(False, False)))
# list of scalars
myType3 = TypedListType(T.TensorType(theano.config.floatX,
()))
self.assertTrue(myType2 == myType1)
self.assertFalse(myType3 == myType1)
def test_sanity_check_slice(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicSlice = SliceType()()
z = GetItem()(mySymbolicMatricesList, mySymbolicSlice)
self.assertFalse(isinstance(z, T.TensorVariable))
f = theano.function([mySymbolicMatricesList, mySymbolicSlice],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], slice(0, 1, 1)), [x]))
def test_sanity_check_single(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicScalar = T.scalar(dtype='int64')
z = GetItem()(mySymbolicMatricesList, mySymbolicScalar)
f = theano.function([mySymbolicMatricesList, mySymbolicScalar],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(0, dtype='int64')),
x))
def test_constant_input(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = GetItem()(mySymbolicMatricesList, 0)
f = theano.function([mySymbolicMatricesList],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x]), x))
z = GetItem()(mySymbolicMatricesList, slice(0, 1, 1))
f = theano.function([mySymbolicMatricesList],
z)
self.assertTrue(numpy.array_equal(f([x]), [x]))
def test_sanity_check(self):
mySymbolicMatricesList1 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicMatricesList2 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Extend()(mySymbolicMatricesList1, mySymbolicMatricesList2)
f = theano.function([mySymbolicMatricesList1, mySymbolicMatricesList2],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
def test_interface(self):
mySymbolicMatricesList1 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
mySymbolicMatricesList2 = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = mySymbolicMatricesList1.extend(mySymbolicMatricesList2)
f = theano.function([mySymbolicMatricesList1, mySymbolicMatricesList2],
z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
def test_inplace(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
myScalar = T.scalar(dtype='int64')
z = Insert(True)(mySymbolicMatricesList, myScalar, myMatrix)
f = theano.function([mySymbolicMatricesList, myScalar, myMatrix], z,
accept_inplace=True)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(1, dtype='int64'),
y),
[x, y]))
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
myScalar = T.scalar(dtype='int64')
z = Insert()(mySymbolicMatricesList, myScalar, myMatrix)
f = theano.function([mySymbolicMatricesList, myScalar, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1,
dtype='int64'), y), [x, y]))
def test_interface(self):
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
myMatrix = T.matrix()
myScalar = T.scalar(dtype='int64')
z = mySymbolicMatricesList.insert(myScalar, myMatrix)
f = theano.function([mySymbolicMatricesList, myScalar, myMatrix], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(numpy.array_equal(f([x],
numpy.asarray(1, dtype='int64'),
y),
[x, y]))
def test_non_tensor_type(self):
mySymbolicNestedMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)), 1)()
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Index()(mySymbolicNestedMatricesList, mySymbolicMatricesList)
f = theano.function([mySymbolicNestedMatricesList,
mySymbolicMatricesList], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([[x, y], [x, y, y]], [x, y]) == 0)
def test_non_tensor_type(self):
mySymbolicNestedMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)), 1)()
mySymbolicMatricesList = TypedListType(T.TensorType(
theano.config.floatX, (False, False)))()
z = Count()(mySymbolicNestedMatricesList, mySymbolicMatricesList)
f = theano.function([mySymbolicNestedMatricesList,
mySymbolicMatricesList], z)
x = rand_ranged_matrix(-1000, 1000, [100, 101])
y = rand_ranged_matrix(-1000, 1000, [100, 101])
self.assertTrue(f([[x, y], [x, y, y]], [x, y]) == 1)
def test_cdata():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
i = TensorType('float32', (False,))()
c = ProdOp()(i)
i2 = GetOp()(c)
mode = None
if theano.config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
# This should be a passthrough function for vectors
f = theano.function([i], i2, mode=mode)
v = numpy.random.randn(9).astype('float32')
v2 = f(v)
assert (v2 == v).all()
def test_maxpool():
"""TODO: test the gpu version!!! """
for d0, d1, r_true, r_false in [(4, 4, [[[[5, 7], [13, 15]]]], [[[[5, 7], [13, 15]]]]),
(5, 5, [[[[6, 8], [16, 18], [21, 23]]]],
[[[[6, 8, 9], [16, 18, 19], [21, 23, 24]]]])]:
for border, ret in [(True, r_true), (False, r_false)]:
ret = numpy.array(ret)
a = tcn.blas.Pool((2, 2), border)
dmatrix4 = tensor.TensorType("float32", (False, False, False, False))
b = dmatrix4()
f = pfunc([b], [a(b)], mode=mode_with_gpu)
bval = numpy.arange(0, d0 * d1).reshape(1, 1, d0, d1)
r = f(bval)[0]
# print bval, bval.shape, border
# print r, r.shape
assert (ret == r).all()
def local_gpu_extract_diagonal(node):
"""
extract_diagonal(host_from_gpu()) -> host_from_gpu(extract_diagonal)
gpu_from_host(extract_diagonal) -> extract_diagonal(gpu_from_host)
"""
if (isinstance(node.op, nlinalg.ExtractDiag) and
isinstance(node.inputs[0].type,
theano.tensor.TensorType)):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, HostFromGpu):
return [host_from_gpu(nlinalg.extract_diag(
as_cuda_ndarray_variable(inp)))]
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if (host_input.owner and
isinstance(host_input.owner.op, nlinalg.ExtractDiag) and
isinstance(host_input.owner.inputs[0].type,
theano.tensor.TensorType)):
diag_node = host_input.owner
return [nlinalg.extract_diag(
as_cuda_ndarray_variable(diag_node.inputs[0]))]
return False
def build_encoder_network(num_inputs, num_hidden):
input = T.TensorType('float32', [None]*3)('input')
B, L = input.shape[0:2]
l_in = InputLayer((None, max_seq_len, num_inputs))
l_mask = InputLayer(shape=(None, max_seq_len))
l_enc = MyLSTMLayer(l_in, num_hidden, mask_input=l_mask, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.rectify, only_return_final=True)
params = lasagne.layers.get_all_params(l_enc)
hid_out, _ = lasagne.layers.get_output(l_enc, {l_in: input})
tvars = [input, l_mask.input_var]
return hid_out, tvars, theano.function(tvars, hid_out), params
def placeholder(shape=None, ndim=None, dtype=_FLOATX, name=None):
'''Instantiate an input data placeholder variable.
'''
if shape is None and ndim is None:
raise Exception('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
broadcast = (False,) * ndim
# ====== Modify add name prefix ====== #
global _PLACEHOLDER_ID
name_prefix = 'ID.%02d.' % _PLACEHOLDER_ID
_PLACEHOLDER_ID += 1
if name is None:
name = ''
name = name_prefix + name
placeholder = T.TensorType(dtype, broadcast)(name)
# store the predefined shape of placeholder
_PLACEHOLDER_SHAPE[name] = \
[None for _ in range(ndim)] if shape is None else shape
return placeholder