def _get_layer(self, layer_input, size_last_layer, size_current_layer):
"""
Returns a layer with a batch normalized input, depending on the `batch_norm flag`
@param layer_input is the value used as an input to the layer.
@param size_last_layer is the size of the last layer (used in weight) or the size of the input
@param size_current_layer is the size of the current layer (used in weight and bias)
"""
weight = tf.Variable(tf.random_normal([size_last_layer, size_current_layer]))
bias = tf.Variable(tf.random_normal([size_current_layer]))
if not self.batch_norm:
return self.activation_func(tf.add(tf.matmul(layer_input, weight), bias))
layer_input = tf.contrib.layers.batch_norm(layer_input,
center=True, scale=True,
is_training=self.is_training,
scope='bn{}-{}'.format(size_last_layer, size_current_layer))
return self.activation_func(tf.add(tf.matmul(layer_input, weight), bias))
评论列表
文章目录