def conv2d_gru(inputs, n_output_channels, is_training, reuse, filter_size=3, padding="SAME", dilation=1, name='conv2d_gru', outputs_collections=None, **kwargs):
"""Adds a convolutional GRU layer in 1 dimension
Args:
x: A 4-D `Tensor` of with rank 4 and value for the last dimension,
i.e. `[batch_size, in_height, in_width, depth]`,
is_training: Bool, training or testing
n_output: Integer or long, the number of output units in the layer.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
filter_size: a int or list/tuple of 2 positive integers specifying the spatial
dimensions of of the filters.
dilation: A positive int32. The stride with which we sample input values across
the height and width dimensions. Equivalently, the rate by which we upsample the
filter values by inserting zeros across the height and width dimensions. In the literature,
the same parameter is sometimes called input stride/rate or dilation.
padding: one of `"VALID"` or `"SAME"`. IF padding is LEFT, it preprocess the input to use Valid padding
activation: activation function, set to None to skip it and maintain
a linear activation.
batch_norm: normalization function to use. If
`batch_norm` is `True` then google original implementation is used and
if another function is provided then it is applied.
default set to None for no normalizer function
batch_norm_args: normalization function parameters.
w_init: An initializer for the weights.
w_regularizer: Optional regularizer for the weights.
untie_biases: spatial dimensions wise baises
b_init: An initializer for the biases. If None skip biases.
outputs_collections: The collections to which the outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: Optional name or scope for variable_scope/name_scope.
use_bias: Whether to add bias or not
Returns:
The 4-D `Tensor` variable representing the result of the series of operations.
e.g.: 4-D `Tensor` [batch, new_height, new_width, n_output].
Raises:
ValueError: if x has rank less than 4 or if its last dimension is not set.
"""
def conv2d_fn(x, name, bias_start, padding):
return conv2d_v2(x, n_output_channels, is_training, reuse, filter_size=filter_size, padding=padding, b_init=bias_start, dilation=dilation, name=name, **kwargs)
with tf.variable_scope(name, reuse=reuse):
reset = saturating_sigmoid(conv2d_fn(inputs, "reset", 1.0, padding))
gate = saturating_sigmoid(conv2d_fn(inputs, "gate", 1.0, padding))
candidate = tf.tanh(
conv2d_fn(reset * inputs, "candidate", 0.0, padding))
outputs = gate * inputs + (1 - gate) * candidate
return _collect_named_outputs(outputs_collections, name, outputs)
评论列表
文章目录