def conv(self, name, filter_size, stride, num_output, stddev, bias, layer_name=None, input=None, reuse=False, relu=True):
if input is None:
input = self.last_layer
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
weight = tf.get_variable('weights', dtype=tf.float32, \
initializer=tf.random_normal([filter_size, filter_size, int(input.shape[3]), num_output], \
stddev=stddev))
bias = tf.get_variable('biases', dtype=tf.float32, initializer=np.ones(num_output, dtype=np.float32)*bias)
self.weights[name] = weight
self.biases[name] = bias
if layer_name is not None:
name = layer_name
if relu:
self.layers[name] = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(input, weight, [1, stride, stride, 1], "VALID"), bias))
else:
self.layers[name] = tf.nn.bias_add(tf.nn.conv2d(input, weight, [1, stride, stride, 1], "VALID"), bias)
self.last_layer = self.layers[name]
return self
评论列表
文章目录