def test_local_merge_abs():
x, y, z = T.matrices('xyz')
x_val = numpy.random.rand(5, 5).astype(config.floatX)
y_val = numpy.random.rand(5, 5).astype(config.floatX)
z_val = numpy.random.rand(5, 5).astype(config.floatX)
mode = theano.config.mode
if mode == "FAST_COMPILE":
mode = "FAST_RUN"
mode = theano.compile.mode.get_mode(mode).excluding(
"local_elemwise_fusion")
f = theano.function([y, z], (abs(y * z * -2)), mode=mode)
f(y_val, z_val)
assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op, scal.Abs)
assert len(f.maker.fgraph.toposort()) == 2
f = theano.function([x, y], abs(x / y), mode=mode)
f(x_val, y_val)
assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op, scal.Abs)
assert len(f.maker.fgraph.toposort()) == 2
python类compile()的实例源码
def test_gpu_fusion(self):
shp = (5, 5)
# we need the optimisation enabled, debug do this.
if theano.config.mode == "FAST_COMPILE":
mode = theano.compile.mode.get_mode("FAST_RUN").including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'gpu')
else:
mode = theano.compile.mode.get_default_mode().including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'gpu')
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
raise SkipTest("cuda not available")
self.do(mode, cuda.float32_shared_constructor, shp, gpu=True)
def test_gpu_fusion_Xd(self):
# we need the optimisation enabled, debug do this.
if theano.config.mode == "FAST_COMPILE":
mode = theano.compile.mode.get_mode("FAST_RUN").including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'gpu')
else:
mode = theano.compile.mode.get_default_mode().including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'gpu')
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
raise SkipTest("cuda not available")
sizes = cuda.opt.get_device_type_sizes()
if sizes['int_size'] == 4:
shp = (5, 5, 5, 5)
else:
shp = (5, 5, 5)
self.do(mode, cuda.float32_shared_constructor, shp, gpu=True)
def test_inequality_with_self(self):
x = T.scalar('x', dtype=config.floatX)
mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison')
f = theano.function([x], T.lt(x, x), mode=mode)
self.assert_eqs_const(f, 0)
f = theano.function([x], T.le(x, x), mode=mode)
self.assert_eqs_const(f, 1)
f = theano.function([x], T.gt(x, x), mode=mode)
self.assert_eqs_const(f, 0)
f = theano.function([x], T.ge(x, x), mode=mode)
self.assert_eqs_const(f, 1)
f = theano.function([x], T.minimum(x, x), mode=mode)
self.assert_identity(f)
f = theano.function([x], T.maximum(x, x), mode=mode)
self.assert_identity(f)
def test_and(self):
mode = theano.compile.get_default_mode().including('canonicalize')
x = T.scalar('x', dtype='int8')
for zero, one in [(numpy.int8(0), numpy.int8(1)), (0, 1)]:
f = theano.function([x], T.and_(x, zero), mode=mode)
self.assert_eqs_const(f, 0)
f = theano.function([x], T.and_(zero, x), mode=mode)
self.assert_eqs_const(f, 0)
f = theano.function([x], T.and_(x, one), mode=mode)
if f.outputs[0].variable.dtype == x.dtype:
self.assert_identity(f)
f = theano.function([x], T.and_(one, x), mode=mode)
if f.outputs[0].variable.dtype == x.dtype:
self.assert_identity(f)
def test0(self):
x = shared(self.rng.randn(3, 7))
a = tensor.alloc(x, 6, 7)
# It is a bad idea to have tensor.alloc return x directly,
# because the shape mismatch cannot be caught.
assert a.owner and isinstance(a.owner.op, tensor.Alloc)
f = function([], a, mode=mode_opt)
# The optimization should then be applied, and remove Alloc
assert ([node.op for node in f.maker.fgraph.toposort()]
== [deep_copy_op])
# In DebugMode, the shape mismatch should be detected
if isinstance(mode_opt, compile.DebugMode):
self.assertRaises(ValueError, f)
# No need to check_stack_trace as the optimization
# local_canonicalize_alloc only removes nodes.
def test_test_local_remove_useless_assert2(self):
# remove assert condition that are always true
mode = theano.config.mode
if mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
mode = compile.mode.get_mode(mode)
x = T.scalar()
y = T.scalar()
f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 1),
mode=mode)
assert f(1, 1) == 1
assert f(5, 1) == 5
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert len(topo[0].inputs) == 2
assert topo[1].op == deep_copy_op
def test_local_remove_useless_assert3(self):
# don't remove assert condition that are always false
mode = theano.config.mode
if mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
mode = compile.mode.get_mode(mode)
x = T.scalar()
y = T.scalar()
f = theano.function([x, y], theano.tensor.opt.assert_op(x, y, 0),
mode=mode)
self.assertRaises(AssertionError, f, 1, 0)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert len(topo[0].inputs) == 3
assert topo[1].op == deep_copy_op
def test_constant_folding():
""" Test that constant folding get registered at fast_compile
An error removed that registration during the registration.
"""
x = tensor.dvector()
mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
f = theano.function([x], [x * 2, x + x], mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
# Test that we do not crash when constant folding elemwise scalar
# as they should not generate c code.
x = tensor.constant(3)
assert x.ndim == 0
mode = theano.compile.get_mode("FAST_COMPILE").excluding("fusion")
f = theano.function([], [x * 2, x + x], mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert all([isinstance(n.op, DeepCopyOp) for n in topo])
def setUp(self):
# condition values
self.condm = numpy.asarray([[0.1, 0, 1, -1],
[0., 0., 0., 0.],
[1, 1, 1, 1]])
self.condv = numpy.asarray([0.1, 0, 1, -1])
self.conds = [0.1, 0, 1, -1]
# x values
self.xm = numpy.ones((3, 4))
self.xv = numpy.ones((4,))
self.xs = 1.
# expected results
self.resm = [numpy.asarray([[1, 0, 1, 0], [0, 0, 0, 0], [1, 1, 1, 1]])]*3 + [numpy.asarray([[1, 0, 1, 0], [1, 0, 1, 0], [1, 0, 1, 0]])] + \
2*[numpy.asarray([[1, 0, 1, 0]])] + [[numpy.ones((3, 4)), numpy.zeros((3, 4)), numpy.ones((3, 4)), numpy.zeros((3, 4))]] + \
[[numpy.ones((4,)), numpy.zeros((4,)), numpy.ones((4,)), numpy.zeros((4,))]] + \
[[numpy.asarray(1.0), numpy.asarray(
0.0), numpy.asarray(1.0), numpy.asarray(0.0)]]
self.mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run').excluding('gpu', 'fusion')
self.mode = copy.copy(self.mode)
self.mode.check_isfinite = False
def speed_local_log_erfc(self):
val = numpy.random.rand(1e6)
x = T.vector()
mode = theano.compile.mode.get_mode("FAST_RUN")
f1 = theano.function([x], T.log(T.erfc(x)), mode=mode.
excluding("local_log_erfc"))
f2 = theano.function([x], T.log(T.erfc(x)), mode=mode)
print(f1.maker.fgraph.toposort())
print(f2.maker.fgraph.toposort())
t0 = time.time()
f1(val)
t1 = time.time()
f2(val)
t2 = time.time()
print(t1 - t0, t2 - t1)
def test_local_useless_split():
x = tensor.matrix('x')
splits = tensor.ivector('splits')
opt = tensor.split(x, splits, n_splits=1)
nonopt = tensor.split(x, splits, n_splits=3)
mode = compile.get_default_mode().including("local_useless_split")
f_opt = theano.function([x, splits], opt, mode=mode)
f_nonopt = theano.function([x, splits], nonopt, mode=mode)
f_opt(numpy.random.rand(4,4).astype(config.floatX), [4])
f_nonopt(numpy.random.rand(4,4).astype(config.floatX), [1,2,1])
graph_opt = f_opt.maker.fgraph.toposort()
graph_nonopt = f_nonopt.maker.fgraph.toposort()
assert isinstance(graph_opt[-1].op, DeepCopyOp)
assert len(graph_nonopt)==1
assert isinstance(graph_nonopt[0].op, tensor.Split)
assert check_stack_trace(f_opt, ops_to_check=[Assert])
assert check_stack_trace(f_nonopt, ops_to_check='all')
def test_local_sumsqr2dot():
G = matrix('G')
W = matrix('W')
y = T.sqr(W.dimshuffle('x', 0, 1) * G.dimshuffle(0, 'x', 1)).sum(axis=(1, 2))
MODE = theano.compile.get_default_mode().including('local_sumsqr2dot')
f = function([W, G], y, mode=MODE)
w_val = numpy.random.rand(4, 3).astype(config.floatX)
g_val = numpy.random.rand(5, 3).astype(config.floatX)
f_val = f(w_val, g_val)
f_test = numpy.dot(numpy.square(g_val), numpy.square(w_val).sum(axis=0))
utt.assert_allclose(f_val, f_test)
assert any(isinstance(n.op, (tensor.basic.Dot, tensor.blas.Dot22,
tensor.blas.Gemv, tensor.blas_c.CGemv))
for n in f.maker.fgraph.toposort())
def __init__(self, name,
shared=tensor._shared,
sub=tensor.AdvancedSubtensor,
inc_sub=tensor.AdvancedIncSubtensor,
mode=None,
dtype=theano.config.floatX,
ignore_topo=DeepCopyOp):
self.shared = shared
self.sub = sub
self.inc_sub = inc_sub
if mode is None:
mode = theano.compile.mode.get_default_mode()
self.mode = mode
self.dtype = dtype
self.ignore_topo = ignore_topo
super(TestAdvancedSubtensor, self).__init__(name)
def handle_shared_float32(tf):
"""
Set the default shared type for float32 tensor to CudaNdarrayType.
This function is intended to be called from use(gpu_index), not directly.
"""
if tf:
theano.compile.shared_constructor(float32_shared_constructor)
else:
theano.compile.shared_constructor(float32_shared_constructor, True)
assert (float32_shared_constructor not in
theano.compile.shared.constructors)
# We can't test the driver during import here as this cause circular
# import dependency. So we also test it in the file theano/__init__.py
def _dnn_check_compile():
preambule = """
#include <stdio.h>
#include <cudnn.h>
#include <cudnn_helper.h>
"""
# No need for the context in here since we won't execute that code
body = """
cudnnHandle_t _handle = NULL;
cudnnStatus_t err;
if ((err = cudnnCreate(&_handle)) != CUDNN_STATUS_SUCCESS) {
fprintf(stderr, "could not create cuDNN handle: %s",
cudnnGetErrorString(err));
return 1;
}
"""
params = ["-l", "cudnn", "-I" + os.path.dirname(__file__)]
if config.dnn.include_path:
params.append("-I" + config.dnn.include_path)
if config.dnn.library_path:
params.append("-L" + config.dnn.library_path)
# Do not run here the test program. It would run on the
# default gpu, not the one selected by the user. If mixed
# GPU are installed or if the GPUs are configured in
# exclusive mode, this cause bad detection.
avail, out, err = GCC_compiler.try_flags(
params, preambule=preambule, body=body,
try_run=False, output=True)
if not avail:
return False, ("cannot compile with cuDNN. "
"We got this error:\n" + str(err))
return True, None
def test_filter_float():
theano.compile.shared_constructor(gpuarray_shared_constructor)
try:
s = theano.shared(numpy.array(0.0, dtype='float32'),
target=test_ctx_name)
theano.function([], updates=[(s, 0.0)])
finally:
del theano.compile.sharedvalue.shared.constructors[-1]
def test_abs_mul_div(self):
"""
test that if we have
4 * x / abs(2*x) it get simplifier during canonicalisation.
"""
x = T.dscalar()
a = T.abs_(x)
if theano.config.mode == 'FAST_COMPILE':
mode = theano.compile.mode.get_mode('FAST_RUN').excluding(
"local_elemwise_fusion")
else:
mode = theano.compile.mode.get_default_mode().excluding(
"local_elemwise_fusion")
f = theano.function([x], [(4 * x) / abs(2 * x)], mode=mode)
print(f.maker.fgraph.toposort())
print()
f(.1)
f(-1)
# some stabilization optimization make the output be finite instead of nan
# debug_mode will raise an error when he see nan
if not isinstance(mode, theano.compile.debugmode.DebugMode):
assert numpy.isfinite(f(0))
assert len(f.maker.fgraph.toposort()) == 2
assert f.maker.fgraph.toposort()[0].op == T.sgn
f = theano.function([x], [(4 * x) / abs(x / 2)], mode=mode)
print(f.maker.fgraph.toposort())
print()
f(.1)
f(-1)
# some stabilization optimization make the output be finite instead of nan
# debug_mode will raise an error when he see nan
if not isinstance(mode, theano.compile.debugmode.DebugMode):
assert numpy.isfinite(f(0))
assert len(f.maker.fgraph.toposort()) == 2
assert f.maker.fgraph.toposort()[0].op == T.sgn
def test_elemwise_fusion_4d(self):
shp = (3, 3, 3, 3)
mode = copy.copy(compile.mode.get_default_mode())
# we need the optimisation enabled and the canonicalize.
# the canonicalize is needed to merge multiplication/addition by constant.
mode._optimizer = mode._optimizer.including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize')
self.do(mode, shared, shp)
def speed_fusion(self, shared_fn=shared, gpu=False, s=None):
"""
param type s: a slice object
param s: a slice to apply to the case to execute. If None, exec all case.
"""
shp = (3000, 3000)
shp = (1000, 1000)
nb_repeat = 50
# linker=gof.CLinker
# linker=gof.OpWiseCLinker
mode1 = copy.copy(compile.get_default_mode())
mode1._optimizer = mode1._optimizer.including('local_elemwise_fusion')
# TODO:clinker is much faster... but use to much memory
# Possible cause: as their is do deletion of intermediate value when we don't keep the fct.
# More plausible cause: we keep a link to the output data?
# Follow up. Clinker do the same... second cause?
mode2 = copy.copy(compile.get_default_mode())
mode2._optimizer = mode2._optimizer.excluding('local_elemwise_fusion')
print("test with linker", str(mode1.linker))
times1 = self.do(mode1, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
assert_len_topo=False, slice=s)
times2 = self.do(mode2, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
assert_len_topo=False, slice=s)
print("times1 with local_elemwise_fusion")
print(times1, times1.min(), times1.max(), times1.sum())
print("times2 without local_elemwise_fusion")
print(times2, times2.min(), times2.max(), times2.sum())
d = times2 / times1
print("times2/times1")
print(d)
print("min", d.min(), "argmin", d.argmin(), "max", d.max(), \
"mean", d.mean(), "std", d.std())
def test_fusion_inplace(self):
mode = copy.copy(compile.mode.get_default_mode())
# we need the optimisation enabled and the canonicalize.
# the canonicalize is needed to merge multiplication/addition by constant.
mode._optimizer = mode._optimizer.including(
'local_elemwise_fusion', 'composite_elemwise_fusion',
'canonicalize', 'inplace')
x, y, z = dmatrices('xyz')
f = theano.function([x, y, z], tensor.dot(x, y) + x + y + z, mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert f.maker.fgraph.toposort()[-1].op.inplace_pattern
f(numpy.random.random((5, 5)), numpy.random.random((5, 5)),
numpy.random.random((5, 5)))
def test_log_add():
m = theano.config.mode
if m == 'FAST_COMPILE':
m = 'FAST_RUN'
m = compile.mode.get_mode(m)
m = m.excluding('fusion')
m = copy.copy(m)
# No need to put them back as we have a new object
m.check_isfinite = False
# check some basic cases
x = dvector()
y = dvector()
f = function([x, y], T.log(T.exp(x) + T.exp(y)), mode=m)
f([10000], [10000]) # causes overflow if handled incorrectly
assert numpy.isfinite(f([10000], [10000]))
utt.assert_allclose(f([10000], [10000]), 10000 + numpy.log1p(1))
# test that it give the same result when it don't overflow
f([10], [10]) # don't causes overflow
utt.assert_allclose(f([10], [10]), 10 + numpy.log1p(1))
# test that it also works with more than two args, (this currently fails)
x = dvector()
y = dvector()
f = function([x, y], T.log(T.exp(x) + T.exp(y) + T.exp(x - y) + T.exp(
x + y)), mode=m)
try:
f([10000], [10000]) # causes overflow if handled incorrectly
utt.assert_allclose(f([10000], [10000]), 20000)
except utt.WrongValue:
raise SkipTest("log(add(exp)) is not stabilized when adding "
"more than 2 elements, see #623")
# TODO: test that the optimization works in the presence of broadcasting.
# TODO: (write and) test that the optimization works with Sum in addition to working with Add.
def test_stack_trace(self):
x, y, z = tensor.lscalars('xyz')
v = make_vector(x, y, z)
mode = theano.compile.mode.get_default_mode().including(
"local_subtensor_make_vector")
# list of subtensor cases, where local_subtensor_make_vector
# inserts a new MakeVector node
v_subtensors = [v[:2], v[::2], v[[0, 2]]]
for v_subtensor in v_subtensors:
f = function([x, y, z], v_subtensor, mode=mode)
self.assertTrue(check_stack_trace(f, ops_to_check='all'))
def setUp(self):
utt.seed_rng()
mode = theano.compile.mode.get_default_mode()
self.mode = mode.including("local_adv_sub1_adv_inc_sub1").excluding("fusion")
self.mode_no_assert = self.mode.including("local_remove_all_assert")
def setUp(self):
mode = theano.compile.mode.get_default_mode()
self.mode = mode.including("local_incsubtensor_of_zeros",
"local_setsubtensor_of_constants",
"local_0_dot_x")
def test_local_IncSubtensor_serialize():
d = numpy.random.normal(0, 0.01, size=(100, 100))
d = d.astype(theano.config.floatX)
W = theano.shared(d, name='W')
i = T.vector('i', dtype='int64')
j = T.vector('j', dtype='int64')
t = T.scalar('t')
if theano.tensor.subtensor.inplace_increment:
y = (W[i] + W[j] + W[1] + W[i, j]).sum()
else:
y = (W[i] + W[j] + W[1]).sum()
cost = T.sqr(t - y)
dW = theano.grad(cost, W)
mode = theano.compile.mode.get_default_mode().excluding('fusion')
mode = mode.including("local_IncSubtensor_serialize")
f = theano.function([i, j, t], updates=[(W, W - 0.01 * dW)], mode=mode)
topo = f.maker.fgraph.toposort()
adds = [n for n in topo if isinstance(n.op, T.Elemwise) and
isinstance(n.op.scalar_op, theano.scalar.Add)]
for a in adds:
assert not any([inp.owner and
isinstance(inp.owner.op,
(tensor.IncSubtensor,
tensor.AdvancedIncSubtensor,
tensor.AdvancedIncSubtensor1))
for inp in a.inputs])
# Now test that the stack trace is copied over properly,
# if we return the gradients. We need to use same mode as before.
f = theano.function([i, j, t], dW, mode=mode)
assert check_stack_trace(f, ops_to_check=[
tensor.IncSubtensor, tensor.AdvancedIncSubtensor,
tensor.AdvancedIncSubtensor1])
def test_local_subtensor_of_dot():
m1 = theano.tensor.matrix()
m2 = theano.tensor.matrix()
d1 = numpy.arange(6).reshape((3, 2)).astype(config.floatX)
d2 = numpy.arange(8).reshape((2, 4)).astype(config.floatX) + 10
mode = compile.get_default_mode().including("local_subtensor_of_dot")
def test_equality(a, b):
return a.shape == b.shape and numpy.allclose(a, b)
# [cst]
f = theano.function([m1, m2], theano.dot(m1, m2)[1], mode=mode)
topo = f.maker.fgraph.toposort()
assert test_equality(f(d1, d2), numpy.dot(d1, d2)[1])
# DimShuffle happen in FAST_COMPILE
assert isinstance(topo[-1].op, (T.blas_c.CGemv, T.blas.Gemv, T.DimShuffle))
# slice
f = theano.function([m1, m2], theano.dot(m1, m2)[1:2], mode=mode)
topo = f.maker.fgraph.toposort()
assert test_equality(f(d1, d2), numpy.dot(d1, d2)[1:2])
assert isinstance(topo[-1].op, (T.blas.Dot22))
m1 = theano.tensor.tensor3()
m2 = theano.tensor.tensor3()
idx = theano.tensor.iscalar()
d1 = numpy.arange(30).reshape(2, 5, 3).astype(config.floatX)
d2 = numpy.arange(72).reshape(4, 3, 6).astype(config.floatX) + 100
f = theano.function([m1, m2, idx], theano.dot(m1, m2)[idx, 1:4, :, idx:], mode=mode)
assert test_equality(f(d1, d2, 1), numpy.dot(d1, d2)[1, 1:4, :, 1:])
# if we return the gradients. We need to use same mode as before.
assert check_stack_trace(f, ops_to_check='last')
f = theano.function([m1, m2, idx], theano.dot(m1, m2)[1:4, :, idx:, idx], mode=mode)
assert test_equality(f(d1, d2, 1), numpy.dot(d1, d2)[1:4, :, 1:, 1])
# Now test that the stack trace is copied over properly,
# if we return the gradients. We need to use same mode as before.
assert check_stack_trace(f, ops_to_check='last')
def test_shape_inequality_with_self(self):
x = T.vector('x', dtype=config.floatX)
mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',
'local_shape_to_shape_i',
'local_track_shape_i',
'local_subtensor_make_vector')
f = theano.function([x], T.lt(x.shape[0], 0), mode=mode)
self.assert_eqs_const(f, 0)
f = theano.function([x], T.ge(x.shape[0], 0), mode=mode)
self.assert_eqs_const(f, 1)
f = theano.function([x], T.maximum(x.shape[0], 0), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, Shape_i), topo[0].op
x_val = numpy.ones(100, dtype=config.floatX)
assert f(x_val) == x_val.shape[0]
f = theano.function([x], T.maximum(0, x.shape[0]), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, Shape_i), topo[0].op
x_val = numpy.ones(100, dtype=config.floatX)
assert f(x_val) == x_val.shape[0]
f = theano.function([x], T.minimum(x.shape[0], 0), mode=mode)
self.assert_eqs_const(f, 0)
assert f(x_val) == 0
f = theano.function([x], T.minimum(0, x.shape[0]), mode=mode)
self.assert_eqs_const(f, 0)
assert f(x_val) == 0
f = theano.function([x], T.minimum([0, 0], x.shape[0]), mode=mode)
# This case isn't optimized.
# self.assert_eqs_const(f, 0)
utt.assert_allclose(f(x_val), [0, 0])
def test_shape_add_inequality(self):
x = T.vector('x', dtype=config.floatX)
mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',
'local_shape_to_shape_i',
'local_track_shape_i',
'local_subtensor_make_vector')
y = T.vector('y', dtype=config.floatX)
f = theano.function([x, y], T.lt(x.shape[0]+y.shape[0], 0), mode=mode)
self.assert_eqs_const(f, 0)
f = theano.function([x, y], T.ge(x.shape[0]+y.shape[0], 0), mode=mode)
self.assert_eqs_const(f, 1)
def test_xor(self):
mode = theano.compile.get_default_mode().including('canonicalize')
x = T.scalar('x', dtype='int8')
f = theano.function([x], T.xor(x, x), mode=mode)
self.assert_eqs_const(f, 0)