pot.py 文件源码

python
阅读 25 收藏 0 点赞 0 评论 0

项目:adagan 作者: tolstikhin 项目源码 文件源码
def _recon_loss_using_disc_encoder(
            self, opts, reconstructed_training, encoded_training,
            real_points, is_training_ph, keep_prob_ph):
        """Build an additional loss using the encoder as discriminator."""
        reconstructed_reencoded_sg = self.encoder(
            opts, tf.stop_gradient(reconstructed_training),
            is_training=is_training_ph, keep_prob=keep_prob_ph, reuse=True)
        if opts['e_is_random']:
            reconstructed_reencoded_sg = reconstructed_reencoded_sg[0]
        reconstructed_reencoded = self.encoder(
            opts, reconstructed_training, is_training=is_training_ph,
            keep_prob=keep_prob_ph, reuse=True)
        if opts['e_is_random']:
            reconstructed_reencoded = reconstructed_reencoded[0]
        # Below line enforces the forward to be reconstructed_reencoded and backwards to NOT change the encoder....
        crazy_hack = reconstructed_reencoded - reconstructed_reencoded_sg +\
            tf.stop_gradient(reconstructed_reencoded_sg)
        encoded_training_sg = self.encoder(
            opts, tf.stop_gradient(real_points),
            is_training=is_training_ph, keep_prob=keep_prob_ph, reuse=True)
        if opts['e_is_random']:
            encoded_training_sg = encoded_training_sg[0]

        adv_fake_layer = ops.linear(opts, reconstructed_reencoded_sg, 1, scope='adv_layer')
        adv_true_layer = ops.linear(opts, encoded_training_sg, 1, scope='adv_layer', reuse=True)
        adv_fake = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=adv_fake_layer, labels=tf.zeros_like(adv_fake_layer))
        adv_true = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=adv_true_layer, labels=tf.ones_like(adv_true_layer))
        adv_fake = tf.reduce_mean(adv_fake)
        adv_true = tf.reduce_mean(adv_true)
        adv_c_loss = adv_fake + adv_true
        emb_c = tf.reduce_sum(tf.square(crazy_hack - tf.stop_gradient(encoded_training)), 1)
        emb_c_loss = tf.reduce_mean(tf.sqrt(emb_c + 1e-5))
        # Normalize the loss, so that it does not depend on how good the
        # discriminator is.
        emb_c_loss = emb_c_loss / tf.stop_gradient(emb_c_loss)
        return adv_c_loss, emb_c_loss
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号