def build_elbo(self, n_samples, training=False):
cfg = self.config
reuse = False
if training:
reuse = True
z = self.variational.sample(self.data, n_samples=n_samples, reuse=reuse)
log_q_z = self.variational.log_prob(z, reuse=reuse)
self.log_q_z = log_q_z
log_p_x_z = self.model.log_prob(self.data, z, reuse=reuse)
if cfg['optim/deterministic_annealing'] and training:
self.build_magnitude()
tf.summary.scalar('c/magnitude', self.magnitude)
magnitude = tf.maximum(1., self.magnitude)
elbo = log_p_x_z - magnitude * log_q_z
else:
elbo = log_p_x_z - log_q_z
if training:
self.elbo_loss = elbo
_, variance = tf.nn.moments(elbo, [0])
self.elbo_variance = tf.reduce_mean(variance)
self.log_q_z_loss = log_q_z
self.variational.build_entropy(z)
self.q_z_sample = z
slim.summarize_collection('variational')
slim.summarize_collection('model')
slim.summarize_activations('variational')
slim.summarize_activations('model')
else:
self.elbo = elbo
self.log_q_z = log_q_z
self.log_p_x_hat = (tf.reduce_logsumexp(elbo, [0], keep_dims=True) -
tf.log(float(cfg['q/n_samples_stats'])))
tf.summary.scalar('o/log_p_x_hat', tf.reduce_mean(self.log_p_x_hat))
def sum_mean(x): return tf.reduce_sum(tf.reduce_mean(x, 0))
self.elbo_sum = sum_mean(elbo)
self.q_entropy = -sum_mean(log_q_z)
self.E_log_lik = sum_mean(log_p_x_z)
tf.summary.scalar('o/elbo_sum', sum_mean(elbo))
tf.summary.scalar('o/elbo_mean', sum_mean(elbo) / cfg['batch_size'])
tf.summary.scalar('o/E_log_q_z', sum_mean(log_q_z))
tf.summary.scalar('o/E_log_p_x_z', self.E_log_lik)
评论列表
文章目录