def discriminator(self, opts, input_, prefix='DISCRIMINATOR', reuse=False):
"""Discriminator for the GAN objective
"""
num_units = opts['d_num_filters']
num_layers = opts['d_num_layers']
nowozin_trick = opts['gan_p_trick']
# No convolutions as GAN happens in the latent space
with tf.variable_scope(prefix, reuse=reuse):
hi = input_
for i in range(num_layers):
hi = ops.linear(opts, hi, num_units, scope='h%d_lin' % (i+1))
hi = tf.nn.relu(hi)
hi = ops.linear(opts, hi, 1, scope='final_lin')
if nowozin_trick:
# We are doing GAN between our model Qz and the true Pz.
# We know analytical form of the true Pz.
# The optimal discriminator for D_JS(Pz, Qz) is given by:
# Dopt(x) = log dPz(x) - log dQz(x)
# And we know exactly dPz(x). So add log dPz(x) explicitly
# to the discriminator and let it learn only the remaining
# dQz(x) term. This appeared in the AVB paper.
assert opts['latent_space_distr'] == 'normal'
sigma2_p = float(opts['pot_pz_std']) ** 2
normsq = tf.reduce_sum(tf.square(input_), 1)
hi = hi - normsq / 2. / sigma2_p \
- 0.5 * tf.log(2. * np.pi) \
- 0.5 * opts['latent_space_dim'] * np.log(sigma2_p)
return hi
评论列表
文章目录