def make_node(self, N, M, k):
N = as_tensor_variable(N)
M = as_tensor_variable(M)
k = as_tensor_variable(k)
return gof.Apply(
self,
[N, M, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
python类gof()的实例源码
def make_node(self, n, m, k):
n = as_tensor_variable(n)
m = as_tensor_variable(m)
k = as_tensor_variable(k)
assert n.ndim == 0
assert m.ndim == 0
assert k.ndim == 0
return gof.Apply(
self,
[n, m, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
def make_node(self, value, *shape):
v = as_tensor_variable(value)
sh, bcast = alloc_validate_shape(shape)
if v.ndim > len(sh):
raise TypeError("The Alloc value to use has more dimensions"
" than the specified dimensions",
v.ndim, len(sh))
otype = TensorType(dtype=v.dtype, broadcastable=bcast)
return gof.Apply(self, [v] + sh, [otype()])
def make_node(self, x, shp):
x = as_tensor_variable(x)
shp_orig = shp
shp = as_tensor_variable(shp, ndim=1)
if not (shp.dtype.startswith('int') or
(isinstance(shp, TensorConstant) and shp.data.size == 0)):
# It raises an error if shp is not of integer type,
# except when shp is constant and empty
# (in this case, shp.dtype does not matter anymore).
raise TypeError("Shape must be integers", shp, shp.dtype)
assert shp.ndim == 1
if isinstance(shp, TensorConstant):
bcast = [s == 1 for s in shp.data]
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])
else:
bcasts = [False] * self.ndim
shp_list = shp_orig
if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0:
shp_list = [shp_orig]
for index in xrange(self.ndim):
y = shp_list[index]
y = as_tensor_variable(y)
# Try to see if we can infer that y has a constant value of 1.
# If so, that dimension should be broadcastable.
try:
bcasts[index] = (
hasattr(y, 'get_scalar_constant_value') and
y.get_scalar_constant_value() == 1)
except NotScalarConstantError:
pass
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])
def make_node(self, x, reps):
warnings.warn((
"Tile op is deprecated, use tile function instead."), stacklevel=3)
x = as_tensor_variable(x)
reps = as_tensor_variable(reps)
return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False] *
self.ndim)])
def make_node(self, value, *conds):
if not isinstance(value, Variable):
value = T.as_tensor_variable(value)
cond = [T.as_tensor_variable(c) for c in conds]
assert numpy.all([c.type.ndim == 0 for c in cond])
return gof.Apply(self, [value] + cond, [value.type()])
def local_reshape_chain(op):
@gof.local_optimizer([op])
def f(node):
"""
Reshape(Reshape(shape1),shape2) -> Reshape(shape2)
"""
if not opt.check_chain(node, op, op):
return False
# TODO: this can permit a failing program to run by eliminating
# the lower reshape
rval = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])
# It might happen that the desired output of this node has a
# broadcastable pattern that does not match that of 'rval'. This is
# when originally, we were able to figure out that one of the
# dimensions of the reshape is one, but some other transformation
# replaced the shape by one for which this cannot be guessed.
# We should try to figure out why we lost the information about this
# constant value... but in the meantime, better not apply this
# optimization.
if rval.broadcastable == node.outputs[0].broadcastable:
return [rval]
else:
return False
return f
def c_compile_args(self):
ret = []
if self.use_blas():
ret = blas.ldflags(libs=False, flags=True)
if (theano.gof.cmodule.gcc_version() in ['4.3.0'] and
self.kshp == (1, 1)):
ret += ['-O2']
# Add the -fopenmp flags
ret += super(ConvOp, self).c_compile_args()
return ret
def __init__(self, **kwargs):
gof.Op.__init__(self, **kwargs)
def make_node(self, path):
if isinstance(path, str):
path = Constant(Generic(), path)
return gof.Apply(self, [path], [tensor(self.dtype,
broadcastable=self.broadcastable)])
def make_node(self):
return gof.Apply(self, [], [theano.Variable(Generic()),
tensor(self.dtype,
broadcastable=self.broadcastable)])
def make_node(self, data):
return gof.Apply(self, [data],
[theano.Variable(Generic()), data.type()])
def make_node(self, request, data):
return gof.Apply(self, [request, data],
[theano.Variable(Generic())])
def validate(self, fgraph):
if not hasattr(fgraph, 'destroyers'):
return True
for r in self.protected + list(fgraph.outputs):
if fgraph.destroyers(r):
raise gof.InconsistencyError("Trying to destroy a protected"
"Variable.", r)
def free(self):
"""
When allow_gc = False, clear the Variables in storage_map
"""
# 1.no allow_gc return False
# 2.has allow_gc, if allow_gc is False, return True
if not getattr(self.fn, 'allow_gc', True):
for key in self.fn.storage_map:
if not isinstance(key, theano.gof.Constant):
self.fn.storage_map[key][0] = None
for node in self.nodes_with_inner_function:
ops_with_inner_function[node.op].free()
def wrap_out(output):
if isinstance(output, SymbolicOutput):
return output
elif isinstance(output, gof.Variable):
return SymbolicOutput(output)
else:
raise TypeError("Unknown output type: %s (%s)", type(output),
output)
def _check_unused_inputs(self, inputs, outputs, on_unused_input):
if on_unused_input is None:
on_unused_input = theano.config.on_unused_input
if on_unused_input == 'ignore':
return
# There should be two categories of variables in inputs:
# - variables that have to be provided (used_inputs)
# - shared variables that will be updated
used_inputs = gof.graph.ancestors(
([o.variable for o in outputs] +
[i.update for i in inputs if getattr(i, 'update', False)]),
blockers=[i.variable for i in inputs])
msg = ("theano.function was asked to create a function computing "
"outputs given certain inputs, but the provided input "
"variable at index %i is not part of the computational graph "
"needed to compute the outputs: %s.\n%s")
warn_msg = ("To make this warning into an error, you can pass the "
"parameter on_unused_input='raise' to theano.function. "
"To disable it completely, use on_unused_input='ignore'.")
err_msg = ("To make this error into a warning, you can pass the "
"parameter on_unused_input='warn' to theano.function. "
"To disable it completely, use on_unused_input='ignore'.")
for i in inputs:
if ((i.variable not in used_inputs) and (i.update is None)):
if on_unused_input == 'warn':
warnings.warn(msg % (inputs.index(i), i.variable,
warn_msg), stacklevel=6)
elif on_unused_input == 'raise':
raise UnusedInputError(msg % (inputs.index(i),
i.variable, err_msg))
else:
raise ValueError("Invalid value for keyword "
"on_unused_input of theano.function: "
"'%s'.\nValid values are 'raise', "
"'warn', and 'ignore'." % on_unused_input)
def PatternOptimizer(p1, p2, ign=True):
return gof.OpKeyOptimizer(gof.PatternSub(p1, p2), ignore_newtrees=ign)
def __init__(self, maker, schedule=None):
super(gof.LocalLinker, self).__init__()
self.fgraph = None
self.maker = maker
if schedule:
self.schedule = schedule
def __init__(self, inputs, outputs, **kwargs):
if not isinstance(outputs, list):
raise TypeError('outputs must be list', outputs)
for i in inputs + outputs:
if not isinstance(i, gof.Variable):
raise TypeError(
'inputs and outputs must be Variable instances', i)
if 'updates' in kwargs or 'givens' in kwargs:
raise TypeError('updates and givens are not allowed in kwargs')
# To support correctly shared variables the inner fct should
# not see them. Otherwise their is problem with the gradient.
self.shared_inputs = [var for var in gof.graph.inputs(outputs)
if isinstance(var, SharedVariable)]
shared_vars = [var.type() for var in self.shared_inputs]
new = rebuild_collect_shared(outputs, inputs=inputs + shared_vars,
replace=dict(izip(self.shared_inputs,
shared_vars)),
copy_inputs_over=False)
(new_inputs, new_outputs,
[clone_d, update_d, update_expr, shared_inputs]) = new
assert len(new_inputs) == len(inputs) + len(self.shared_inputs)
assert len(new_outputs) == len(outputs)
assert not update_d
assert not update_expr
assert not shared_inputs
self.new_inputs = new_inputs
self.new_outputs = new_outputs
self.inputs = inputs
self.outputs = outputs
self.kwargs = kwargs
self.input_types = [input.type for input in inputs]
self.output_types = [output.type for output in outputs]