def kl_gaussian(mean_, logsigma,
prior_mean=0., prior_logsigma=0.,
regularizer_scale=1.):
''' KL-divergence between two gaussians.
Useful for Variational AutoEncoders. Use this as an activation regularizer
Parameters:
-----------
mean, logsigma: parameters of the input distributions
prior_mean, prior_logsigma: paramaters of the desired distribution (note the
log on logsigma)
regularizer_scale: Rescales the regularization cost. Keep this 1 for most cases.
Note
----
origin implementation from seya:
https://github.com/Philip-Bachman/ICML-2015/blob/master/LogPDFs.py
Copyright (c) Philip Bachman
'''
gauss_klds = 0.5 * (prior_logsigma - logsigma +
((tf.exp(logsigma) + pow((mean_ - prior_mean), 2.0)) / tf.exp(prior_logsigma)) - 1.0)
return mean(gauss_klds)
评论列表
文章目录