python类model_variable()的实例源码

unrolled_gan.py 文件源码 项目:unrolled-GAN 作者: Zardinality 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def generator(z):
    # because up to now we can not derive bias_add's higher order derivative in tensorflow, 
    # so I use vanilla implementation of FC instead of a FC layer in tensorflow.contrib.layers
    # the following conv case is out of the same reason
    weights = slim.model_variable(
        'fn_weights', shape=(FLAGS.z_dim, 4 * 4 * 512), initializer=ly.xavier_initializer())
    bias = slim.model_variable(
        'fn_bias', shape=(4 * 4 * 512, ), initializer=tf.zeros_initializer)
    train = tf.nn.relu(ly.batch_norm(fully_connected(z, weights, bias)))
    train = tf.reshape(train, (-1, 4, 4, 512))
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=None, padding='SAME', biases_initializer=None)
    bias = slim.model_variable('bias', shape=(
        1, ), initializer=tf.zeros_initializer)
    train += bias
    train = tf.nn.tanh(train)
    return train
generate_from_ckpt.py 文件源码 项目:unrolled-GAN 作者: Zardinality 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def generator(z):
    weights = slim.model_variable(
        'fn_weights', shape=(FLAGS.z_dim, 4 * 4 * 512), initializer=ly.xavier_initializer())
    bias = slim.model_variable(
        'fn_bias', shape=(4 * 4 * 512, ), initializer=tf.zeros_initializer)
    train = tf.nn.relu(fully_connected(z, weights, bias))
    train = tf.reshape(train, (-1, 4, 4, 512))
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=None, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02), biases_initializer=None)
    bias = slim.model_variable('bias', shape=(
        1, ), initializer=tf.zeros_initializer)
    train += bias
    train = tf.nn.tanh(train)
    return train
unrolled_gan.py 文件源码 项目:unrolled-GAN 作者: Zardinality 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def discriminator(img, name, target):
    size = 64
    with tf.variable_scope(name):
        # img = ly.conv2d(img, num_outputs=size, kernel_size=3,
        #                 stride=2, activation_fn=None, biases_initializer=None)
        # bias = slim.model_variable('conv_bias', shape=(
        #     size, ), initializer=tf.zeros_initializer)
        # img += bias
        # img = lrelu(img)
        img = ly.conv2d(img, num_outputs=size, kernel_size=3,
                        stride=2, activation_fn=lrelu, normalizer_fn=ly.batch_norm)
        img = ly.conv2d(img, num_outputs=size * 2, kernel_size=3,
                        stride=2, activation_fn=lrelu, normalizer_fn=ly.batch_norm)
        img = ly.conv2d(img, num_outputs=size * 4, kernel_size=3,
                        stride=2, activation_fn=lrelu, normalizer_fn=ly.batch_norm)
        img = tf.reshape(img, (2 * batch_size, -1))
        weights = slim.model_variable('weights', shape=[img.get_shape().as_list()[-1], 1],
                                      initializer=ly.xavier_initializer())
        bias = slim.model_variable('bias', shape=(
            1,), initializer=tf.zeros_initializer)
        logit = fully_connected(img, weights, bias)
        fake_logit = logit[:FLAGS.batch_size]
        true_logit = logit[FLAGS.batch_size:]
        d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            fake_logit, tf.zeros_like(fake_logit)))
        d_loss_true = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            true_logit, tf.ones_like(true_logit)))
        f = tf.reduce_mean(d_loss_fake + d_loss_true)

    return f, logit, d_loss_true, d_loss_fake


问题


面经


文章

微信
公众号

扫码关注公众号