def loss(self, x, y):
'''
Args:
x: shape=[s, b, c]
y: shape=[s, b]
Returns:
a `dict` of losses
'''
z_mu, z_lv = self._encode(x, is_training=self.is_training)
z = GaussianSampleLayer(z_mu, z_lv)
xh = self._decode(z, y, is_training=self.is_training)
with tf.name_scope('loss'):
with tf.name_scope('E_log_p_x_zy'):
L_x = -1.0 * tf.reduce_mean(
GaussianLogDensity(x, xh, tf.zeros_like(x)),
)
with tf.name_scope('D_KL_z'):
L_z = tf.reduce_mean(
GaussianKLD(
z_mu, z_lv,
tf.zeros_like(z_mu), tf.zeros_like(z_lv)
)
)
loss = {
'L_x': L_x,
'L_z': L_z,
}
tf.summary.scalar('L_x', L_x)
tf.summary.scalar('L_z', L_z)
return loss
评论列表
文章目录