def conv2d(input_, output_shape, is_train,
k=5, s=2, stddev=0.01,
name='conv2d', with_w=False):
k_h = k_w = k
s_h = s_w = s
with tf.variable_scope(name):
weights = tf.get_variable('weights', [k_h, k_w, input_.get_shape()[-1], output_shape[-1]],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, weights, strides=[1, s_h, s_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
bn = tf.contrib.layers.batch_norm(conv, center=True, scale=True, decay=0.9,
is_training=is_train, updates_collections=None)
out = lrelu(bn, name=lrelu)
if with_w:
return out, weights, biases
else:
return out
评论列表
文章目录