def eval_reg(log_sigma2, W):
# Approximates the negative of the KL-divergence according to eqn 14.
# This is a key part of the loss function (see eqn 3).
k1, k2, k3 = 0.63576, 1.8732, 1.48695
C = -k1
log_alpha = clip(log_sigma2 - tf.log(W**2))
mdkl = k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) - 0.5 * tf.log1p(tf.exp(-log_alpha)) + C
return -tf.reduce_sum(mdkl)
评论列表
文章目录