convvae.py 文件源码

python
阅读 42 收藏 0 点赞 0 评论 0

项目:deep-learning-models 作者: kuleshov 项目源码 文件源码
def create_objectives(self, deterministic=False):
    """ELBO objective with the analytic expectation trick"""
    # load network input
    X = self.inputs[0]

    # load network output
    if self.model == 'bernoulli':
      q_mu, q_logsigma, sample, _ \
          = lasagne.layers.get_output(self.network[2:], deterministic=deterministic)
    elif self.model in ('gaussian', 'svhn'):
      p_mu, p_logsigma, q_mu, q_logsigma, _, _ \
          = lasagne.layers.get_output(self.network, deterministic=deterministic)

    # first term of the ELBO: kl-divergence (using the closed form expression)
    kl_div = 0.5 * T.sum(1 + 2*q_logsigma - T.sqr(q_mu) 
                         - T.exp(2 * T.minimum(q_logsigma,50)), axis=1).mean()

    # second term: log-likelihood of the data under the model
    if self.model == 'bernoulli':
      logpxz = -lasagne.objectives.binary_crossentropy(sample, X.flatten(2)).sum(axis=1).mean()
    elif self.model in ('gaussian', 'svhn'):
      # def log_lik(x, mu, log_sig):
      #     return T.sum(-(np.float32(0.5 * np.log(2 * np.pi)) + log_sig)
      #                   - 0.5 * T.sqr(x - mu) / T.exp(2 * log_sig), axis=1)
      # logpxz = log_lik(X.flatten(2), p_mu, p_logsigma).mean()
      logpxz = log_normal2(X.flatten(2), p_mu, p_logsigma).sum(axis=1).mean()

    loss = -1 * (logpxz + kl_div)

    # we don't use the spearate accuracy metric right now
    return loss, -kl_div
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号