def gaussian_kl_divergence_keepbatch(self, mean, ln_var):
var = F.exp(ln_var)
kld = F.sum(mean * mean + var - ln_var - 1, axis=1) * 0.5
return kld
python类exp()的实例源码
def log_pz(self, z, mean, ln_var, test=False):
if self.type_pz == "gaussianmarg":
# \int q(z)logp(z)dz = -(J/2)*log2pi - (1/2)*sum_{j=1}^{J} (mu^2 + var)
# See Appendix B [Auto-Encoding Variational Bayes](http://arxiv.org/abs/1312.6114)
log_pz = -0.5 * (math.log(2.0 * math.pi) + mean * mean + F.exp(ln_var))
elif self.type_pz == "gaussian":
log_pz = -0.5 * math.log(2.0 * math.pi) - 0.5 * z ** 2
return F.sum(log_pz, axis=1)
# this will not be used
def gaussian_nll_keepbatch(self, x, mean, ln_var):
x_prec = F.exp(-ln_var)
x_diff = x - mean
x_power = x_diff ** 2 * x_prec * 0.5
return F.sum((math.log(2.0 * math.pi) + ln_var) * 0.5 + x_power, axis=1)
def gaussian_kl_divergence_keepbatch(self, mean, ln_var):
var = F.exp(ln_var)
kld = F.sum(mean ** 2 + var - ln_var - 1, axis=1) * 0.5
return kld
predictive_autoencoder.py 文件源码
项目:Multitask-and-Transfer-Learning
作者: AI-ON
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def normalize_2d(x):
exp = F.exp(x[0])
sums = F.sum(F.sum(exp, axis=-1), axis=-1)
expanded = F.expand_dims(F.expand_dims(sums, axis=-1), axis=-1)
denominator = F.tile(expanded, (1, 160, 210))
return exp / denominator