def discriminator_anderson_test(self, opts, input_):
"""Deterministic discriminator using the Anderson Darling test.
"""
# A-D test says to normalize data before computing the statistic
# Because true mean and variance are known, we are supposed to use
# the population parameters for that, but wiki says it's better to
# still use the sample estimates while normalizing
means = tf.reduce_mean(input_, 0)
input_ = input_ - means # Broadcasting
stds = tf.sqrt(1e-5 + tf.reduce_mean(tf.square(input_), 0))
input_= input_ / stds
# top_k can only sort on the last dimension and we want to sort the
# first one (batch_size).
batch_size = self.get_batch_size(opts, input_)
transposed = tf.transpose(input_, perm=[1, 0])
values, indices = tf.nn.top_k(transposed, k=tf.cast(batch_size, tf.int32))
values = tf.reverse(values, [1])
normal_dist = tf.contrib.distributions.Normal(0., float(opts['pot_pz_std']))
normal_cdf = normal_dist.cdf(values)
# ln_normal_cdf is of shape (z_dim, batch_size)
ln_normal_cdf = tf.log(normal_cdf)
ln_one_normal_cdf = tf.log(1.0 - normal_cdf)
w1 = 2 * tf.range(1, batch_size + 1, 1, dtype="float") - 1
w2 = 2 * tf.range(batch_size - 1, -1, -1, dtype="float") + 1
stat = -batch_size - tf.reduce_sum(w1 * ln_normal_cdf + \
w2 * ln_one_normal_cdf, 1) / batch_size
# stat is of shape (z_dim)
stat = tf.reduce_mean(tf.square(stat))
return stat
评论列表
文章目录