def discriminator(input, is_train, reuse=False):
c2, c4, c8 = 16, 32, 64
with tf.variable_scope('dis') as scope:
if reuse:
scope.reuse_variables()
# 16*16*16
conv1 = tf.layers.conv2d(input, c2, kernel_size=[4, 4], strides=[2, 2], padding="SAME",
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name='conv1')
act1 = lrelu(conv1, n='act1')
# 8*8*32
conv2 = tf.layers.conv2d(act1, c4, kernel_size=[4, 4], strides=[2, 2], padding="SAME",
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name='conv2')
bn2 = tf.layers.batch_normalization(conv2, training=is_train, name='bn2')
act2 = lrelu(bn2, n='act2')
# 4*4*64
conv3 = tf.layers.conv2d(act2, c8, kernel_size=[4, 4], strides=[2, 2], padding="SAME",
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name='conv3')
bn3 = tf.layers.batch_normalization(conv3, training=is_train, name='bn3')
act3 = lrelu(bn3, n='act3')
# 1024
shape = act3.get_shape().as_list()
dim = shape[1] * shape[2] * shape[3]
fc1 = tf.reshape(act3, shape=[-1, dim], name='fc1')
w1 = tf.get_variable('w1', shape=[fc1.shape[1], 1], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.02))
b1 = tf.get_variable('b1', shape=[1], dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
output = tf.nn.sigmoid(tf.add(tf.matmul(fc1, w1), b1, name='output'))
return output
评论列表
文章目录