pot.py 文件源码

python
阅读 27 收藏 0 点赞 0 评论 0

项目:adagan 作者: tolstikhin 项目源码 文件源码
def began_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
        num_units = opts['e_num_filters']
        assert num_units == opts['g_num_filters'], 'BEGAN requires same number of filters in encoder and decoder'
        num_layers = opts['e_num_layers']
        layer_x = ops.conv2d(opts, input_, num_units, scope='h_first_conv')
        for i in xrange(num_layers):
            if i % 3 < 2:
                if i != num_layers - 2:
                    ii = i - (i / 3)
                    scale = (ii + 1 - ii / 2)
                else:
                    ii = i - (i / 3)
                    scale = (ii - (ii - 1) / 2)
                layer_x = ops.conv2d(opts, layer_x, num_units * scale, d_h=1, d_w=1, scope='h%d_conv' % i)
                layer_x = tf.nn.elu(layer_x)
            else:
                if i != num_layers - 1:
                    layer_x = ops.downsample(layer_x, scope='h%d_maxpool' % i, reuse=reuse)
        # Tensor should be [N, 8, 8, filters] right now

        if opts['e_is_random']:
            latent_mean = ops.linear(
                opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
            log_latent_sigmas = ops.linear(
                opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
            return latent_mean, log_latent_sigmas
        else:
            return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号