def discriminator(img, name, target):
size = 64
with tf.variable_scope(name):
# img = ly.conv2d(img, num_outputs=size, kernel_size=3,
# stride=2, activation_fn=None, biases_initializer=None)
# bias = slim.model_variable('conv_bias', shape=(
# size, ), initializer=tf.zeros_initializer)
# img += bias
# img = lrelu(img)
img = ly.conv2d(img, num_outputs=size, kernel_size=3,
stride=2, activation_fn=lrelu, normalizer_fn=ly.batch_norm)
img = ly.conv2d(img, num_outputs=size * 2, kernel_size=3,
stride=2, activation_fn=lrelu, normalizer_fn=ly.batch_norm)
img = ly.conv2d(img, num_outputs=size * 4, kernel_size=3,
stride=2, activation_fn=lrelu, normalizer_fn=ly.batch_norm)
img = tf.reshape(img, (2 * batch_size, -1))
weights = slim.model_variable('weights', shape=[img.get_shape().as_list()[-1], 1],
initializer=ly.xavier_initializer())
bias = slim.model_variable('bias', shape=(
1,), initializer=tf.zeros_initializer)
logit = fully_connected(img, weights, bias)
fake_logit = logit[:FLAGS.batch_size]
true_logit = logit[FLAGS.batch_size:]
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
fake_logit, tf.zeros_like(fake_logit)))
d_loss_true = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
true_logit, tf.ones_like(true_logit)))
f = tf.reduce_mean(d_loss_fake + d_loss_true)
return f, logit, d_loss_true, d_loss_fake
评论列表
文章目录