def create_objectives(self, deterministic=False):
"""ELBO objective with the analytic expectation trick"""
# load network input
X = self.inputs[0]
# load network output
if self.model == 'bernoulli':
q_mu, q_logsigma, sample, _ \
= lasagne.layers.get_output(self.network[2:], deterministic=deterministic)
elif self.model in ('gaussian', 'svhn'):
p_mu, p_logsigma, q_mu, q_logsigma, _, _ \
= lasagne.layers.get_output(self.network, deterministic=deterministic)
# first term of the ELBO: kl-divergence (using the closed form expression)
kl_div = 0.5 * T.sum(1 + 2*q_logsigma - T.sqr(q_mu)
- T.exp(2 * T.minimum(q_logsigma,50)), axis=1).mean()
# second term: log-likelihood of the data under the model
if self.model == 'bernoulli':
logpxz = -lasagne.objectives.binary_crossentropy(sample, X.flatten(2)).sum(axis=1).mean()
elif self.model in ('gaussian', 'svhn'):
# def log_lik(x, mu, log_sig):
# return T.sum(-(np.float32(0.5 * np.log(2 * np.pi)) + log_sig)
# - 0.5 * T.sqr(x - mu) / T.exp(2 * log_sig), axis=1)
# logpxz = log_lik(X.flatten(2), p_mu, p_logsigma).mean()
logpxz = log_normal2(X.flatten(2), p_mu, p_logsigma).sum(axis=1).mean()
loss = -1 * (logpxz + kl_div)
# we don't use the spearate accuracy metric right now
return loss, -kl_div
评论列表
文章目录