def _recon_loss_using_disc_encoder(
self, opts, reconstructed_training, encoded_training,
real_points, is_training_ph, keep_prob_ph):
"""Build an additional loss using the encoder as discriminator."""
reconstructed_reencoded_sg = self.encoder(
opts, tf.stop_gradient(reconstructed_training),
is_training=is_training_ph, keep_prob=keep_prob_ph, reuse=True)
if opts['e_is_random']:
reconstructed_reencoded_sg = reconstructed_reencoded_sg[0]
reconstructed_reencoded = self.encoder(
opts, reconstructed_training, is_training=is_training_ph,
keep_prob=keep_prob_ph, reuse=True)
if opts['e_is_random']:
reconstructed_reencoded = reconstructed_reencoded[0]
# Below line enforces the forward to be reconstructed_reencoded and backwards to NOT change the encoder....
crazy_hack = reconstructed_reencoded - reconstructed_reencoded_sg +\
tf.stop_gradient(reconstructed_reencoded_sg)
encoded_training_sg = self.encoder(
opts, tf.stop_gradient(real_points),
is_training=is_training_ph, keep_prob=keep_prob_ph, reuse=True)
if opts['e_is_random']:
encoded_training_sg = encoded_training_sg[0]
adv_fake_layer = ops.linear(opts, reconstructed_reencoded_sg, 1, scope='adv_layer')
adv_true_layer = ops.linear(opts, encoded_training_sg, 1, scope='adv_layer', reuse=True)
adv_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=adv_fake_layer, labels=tf.zeros_like(adv_fake_layer))
adv_true = tf.nn.sigmoid_cross_entropy_with_logits(
logits=adv_true_layer, labels=tf.ones_like(adv_true_layer))
adv_fake = tf.reduce_mean(adv_fake)
adv_true = tf.reduce_mean(adv_true)
adv_c_loss = adv_fake + adv_true
emb_c = tf.reduce_sum(tf.square(crazy_hack - tf.stop_gradient(encoded_training)), 1)
emb_c_loss = tf.reduce_mean(tf.sqrt(emb_c + 1e-5))
# Normalize the loss, so that it does not depend on how good the
# discriminator is.
emb_c_loss = emb_c_loss / tf.stop_gradient(emb_c_loss)
return adv_c_loss, emb_c_loss
评论列表
文章目录