python类log()的实例源码

vae_m2.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def log_pz(self, z, mean, ln_var, test=False):
        if self.type_pz == "gaussianmarg":
            # \int q(z)logp(z)dz = -(J/2)*log2pi - (1/2)*sum_{j=1}^{J} (mu^2 + var)
            # See Appendix B [Auto-Encoding Variational Bayes](http://arxiv.org/abs/1312.6114)
            log_pz = -0.5 * (math.log(2.0 * math.pi) + mean * mean + F.exp(ln_var))
        elif self.type_pz == "gaussian":
            log_pz = -0.5 * math.log(2.0 * math.pi) - 0.5 * z ** 2
        return F.sum(log_pz, axis=1)

    # this will not be used
vae_m2.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def log_qz_xy(self, z, mean, ln_var, test=False):
        if self.type_qz == "gaussianmarg":
            # \int q(z)logq(z)dz = -(J/2)*log2pi - (1/2)*sum_{j=1}^{J} (1 + logvar)
            # See Appendix B [Auto-Encoding Variational Bayes](http://arxiv.org/abs/1312.6114)
            log_qz_xy = -0.5 * F.sum((math.log(2.0 * math.pi) + 1 + ln_var), axis=1)
        elif self.type_qz == "gaussian":
            log_qz_xy = -self.gaussian_nll_keepbatch(z, mean, ln_var)
        return log_qz_xy
vae_m2.py 文件源码 项目:variational-autoencoder 作者: musyoku 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward_one_step(self, x, y, test=False, apply_f=True):
        f = activations[self.activation_function]

        if self.apply_batchnorm_to_input:
            if self.batchnorm_before_activation:
                merged_input = f(self.batchnorm_merge(self.layer_merge_x(x) + self.layer_merge_y(y), test=test))
            else:
                merged_input = f(self.layer_merge_x(self.batchnorm_merge(x, test=test)) + self.layer_merge_y(y))
        else:
            merged_input = f(self.layer_merge_x(x) + self.layer_merge_y(y))

        chain = [merged_input]

        # Hidden
        for i in range(self.n_layers):
            u = chain[-1]
            if self.batchnorm_before_activation:
                u = getattr(self, "layer_%i" % i)(u)
            if self.apply_batchnorm:
                u = getattr(self, "batchnorm_%d" % i)(u, test=test)
            if self.batchnorm_before_activation == False:
                u = getattr(self, "layer_%i" % i)(u)
            output = f(u)
            if self.apply_dropout:
                output = F.dropout(output, train=not test)
            chain.append(output)

        u = chain[-1]
        mean = self.layer_output_mean(u)

        # log(sd^2)
        u = chain[-1]
        ln_var = self.layer_output_var(u)

        return mean, ln_var


问题


面经


文章

微信
公众号

扫码关注公众号