python类batch_norm()的实例源码

train_cifar_feature_matching_tf.py 文件源码 项目:Semi_Supervised_GAN 作者: ChunyuanLI 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        weight_decay = 0.0001
        xx = layers.fully_connected(input_latent, num_outputs=4*4*512, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.reshape(xx, (batch_size, 4,4,512))
        xx = layers.conv2d_transpose(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.conv2d_transpose(xx, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.conv2d_transpose(xx, 3, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.tanh(xx)

    return gen_dat     

# specify discriminative model
train_cifar_feature_matching_ali_tf.py 文件源码 项目:Semi_Supervised_GAN 作者: ChunyuanLI 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=4*4*512, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.reshape(xx, (batch_size, 4,4,512))
        xx = layers.conv2d_transpose(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.conv2d_transpose(xx, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.conv2d_transpose(xx, 3, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.tanh(xx)

    return gen_dat
train_cifar_feature_matching_ali_tf.py 文件源码 项目:Semi_Supervised_GAN 作者: ChunyuanLI 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def inference(input_img):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Inf') as scope:
        xx = layers.convolution2d(input_img, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.convolution2d(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.convolution2d(xx, 512, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)  
        xx = layers.flatten(xx)
        xx = layers.fully_connected(xx, num_outputs=latent_size, activation_fn=None)
        xx = layers.batch_norm(xx)
        inf_latent = tf.nn.tanh(xx)
    return inf_latent

# specify discriminative model
basic_resnet.py 文件源码 项目:various_residual_networks 作者: yuhui-lin 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def BN_ReLU(self, net):
        """Batch Normalization and ReLU."""
        # 'gamma' is not used as the next layer is ReLU
        net = batch_norm(net,
                         center=True,
                         scale=False,
                         activation_fn=tf.nn.relu, )
        self._activation_summary(net)
        return net

        # def conv2d(self, net, num_ker, ker_size, stride):
        # 1D-convolution
        net = convolution2d(
            net,
            num_outputs=num_ker,
            kernel_size=[ker_size, 1],
            stride=[stride, 1],
            padding='SAME',
            activation_fn=None,
            normalizer_fn=None,
            weights_initializer=variance_scaling_initializer(),
            weights_regularizer=l2_regularizer(self.weight_decay),
            biases_initializer=tf.zeros_initializer)
        return net
train_mnist_feature_matching_ali_tf.py 文件源码 项目:Semi_Supervised_GAN 作者: ChunyuanLI 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=28**2, activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.sigmoid(xx)

    return gen_dat  

# specify inference model
train_mnist_feature_matching_tf.py 文件源码 项目:Semi_Supervised_GAN 作者: ChunyuanLI 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def generator(input_latent):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Gen') as scope:
        xx = layers.fully_connected(input_latent, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=28**2, activation_fn=None)
        xx = layers.batch_norm(xx)
        gen_dat = tf.nn.sigmoid(xx)

    return gen_dat

# specify discriminative model
bn_gru.py 文件源码 项目:website-fingerprinting 作者: AxelGoetz 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with _checked_scope(self, scope or "gru_cell"):
      with vs.variable_scope("gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        value = sigmoid(_linear(
          [inputs, state], 2 * self._num_units, True, 1.0))
        r, u = array_ops.split(
            value=value,
            num_or_size_splits=2,
            axis=1)
      with vs.variable_scope("candidate"):
        res = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))

        if self._batch_norm:
          c = batch_norm(res,
                         center=True, scale=True,
                         is_training=self._is_training,
                         scope='bn1')
        else:
          c = res

      new_h = u * state + (1 - u) * c
    return new_h, new_h
variational_dropout.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def var_dropout(observed, x, n, net_size, n_particles, is_training):
    with zs.BayesianNet(observed=observed) as model:
        h = x
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
            eps_mean = tf.ones([n, n_in])
            eps = zs.Normal(
                'layer' + str(i) + '/eps', eps_mean, std=1.,
                n_samples=n_particles, group_ndims=1)
            h = layers.fully_connected(
                h * eps, n_out, normalizer_fn=layers.batch_norm,
                normalizer_params=normalizer_params)
            if i < len(net_size) - 2:
                h = tf.nn.relu(h)
        y = zs.OnehotCategorical('y', h)
    return model, h
vae_conv.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def q_net(x, n_xl, n_z, n_particles, is_training):
    with zs.BayesianNet() as variational:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        lz_x = tf.reshape(tf.to_float(x), [-1, n_xl, n_xl, 1])
        lz_x = layers.conv2d(
            lz_x, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 64, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 128, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.dropout(lz_x, keep_prob=0.9, is_training=is_training)
        lz_x = tf.reshape(lz_x, [-1, 128 * 3 * 3])
        lz_mean = layers.fully_connected(lz_x, n_z, activation_fn=None)
        lz_logstd = layers.fully_connected(lz_x, n_z, activation_fn=None)
        z = zs.Normal('z', lz_mean, logstd=lz_logstd, n_samples=n_particles,
                      group_ndims=1)
    return variational
gait_nn.py 文件源码 项目:gait-recognition 作者: marian-margeta 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_arg_scope(is_training):
        weight_decay_l2 = 0.1
        batch_norm_decay = 0.999
        batch_norm_epsilon = 0.0001

        with slim.arg_scope([slim.conv2d, slim.fully_connected, layers.separable_convolution2d],
                            weights_regularizer = slim.l2_regularizer(weight_decay_l2),
                            biases_regularizer = slim.l2_regularizer(weight_decay_l2),
                            weights_initializer = layers.variance_scaling_initializer(),
                            ):
            batch_norm_params = {
                'decay': batch_norm_decay,
                'epsilon': batch_norm_epsilon
            }
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training = is_training):
                with slim.arg_scope([slim.batch_norm],
                                    **batch_norm_params):
                    with slim.arg_scope([slim.conv2d, layers.separable_convolution2d, layers.fully_connected],
                                        activation_fn = tf.nn.elu,
                                        normalizer_fn = slim.batch_norm,
                                        normalizer_params = batch_norm_params) as scope:
                        return scope
convnet.py 文件源码 项目:deep_unsupervised_posets 作者: asanakoy 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def conv_relu(self, input_tensor, kernel_size, kernels_num, stride, batch_norm=True,
                  group=1, name=None):
        with tf.variable_scope(name) as scope:
            assert int(input_tensor.get_shape()[3]) % group == 0
            num_input_channels = int(input_tensor.get_shape()[3]) / group
            w, b = self.get_conv_weights(kernel_size, num_input_channels, kernels_num)
            conv = Convnet.conv(input_tensor, w, b, stride, padding="SAME", group=group)
            if batch_norm:
                conv = tf.cond(self.is_phase_train,
                               lambda: tflayers.batch_norm(conv,
                                                           decay=self.batch_norm_decay,
                                                           is_training=True,
                                                           trainable=True,
                                                           reuse=None,
                                                           scope=scope),
                               lambda: tflayers.batch_norm(conv,
                                                           decay=self.batch_norm_decay,
                                                           is_training=False,
                                                           trainable=True,
                                                           reuse=True,
                                                           scope=scope))
            conv = tf.nn.relu(conv, name=name)
        return conv
ops.py 文件源码 项目:NAF-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def fc(layer, output_size, is_training, 
       weight_init, weight_reg=None, activation_fn=None, 
       use_batch_norm=False, scope='fc'):
  if use_batch_norm:
    batch_norm_args = {
      'normalizer_fn': batch_norm,
      'normalizer_params': {
        'is_training': is_training,
      }
    }
  else:
    batch_norm_args = {}

  with tf.variable_scope(scope):
    return fully_connected(
      layer,
      num_outputs=output_size,
      activation_fn=activation_fn,
      weights_initializer=weight_init,
      weights_regularizer=weight_reg,
      biases_initializer=tf.constant_initializer(0.0),
      scope=scope,
      **batch_norm_args
    )
model.py 文件源码 项目:ste-GAN-ography2 作者: bin2415 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def discriminator_stego_nn(self, img, batch_size, name):
        eve_input = self.image_processing_layer(img)
        eve_conv1 = convolution2d(eve_input, 64, kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv1')

        eve_conv2 = convolution2d(eve_conv1, 64 * 2, kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv2')

        eve_conv3 = convolution2d(eve_conv2, 64 * 4,kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv3')

        eve_conv4 = convolution2d(eve_conv3, 64* 8, kernel_size = [5, 5], stride = [2,2],
        activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv4')

        eve_conv4 = tf.reshape(eve_conv4, [batch_size, -1])

        #eve_fc = fully_connected(eve_conv4, 1, activation_fn = tf.nn.sigmoid, normalizer_fn = BatchNorm,
        #weights_initializer=tf.random_normal_initializer(stddev=1.0))
        eve_fc = fully_connected(eve_conv4, 1, normalizer_fn = BatchNorm, 
        weights_initializer=tf.random_normal_initializer(stddev=1.0), scope = 'eve' + name + '/final_fc')
        return eve_fc
cluttered.py 文件源码 项目:information-dropout 作者: ucla-vision 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def conv(self, inputs, num_outputs, activations, normalizer_fn = batch_norm, kernel_size=3, stride=1, scope=None):
        '''Creates a convolutional layer with default arguments'''
        if activations == 'relu':
            activation_fn = tf.nn.relu
        elif activations == 'softplus':
            activation_fn = tf.nn.softplus
        else:
            raise ValueError("Invalid activation function.")
        return conv2d( inputs = inputs,
            num_outputs = num_outputs,
            kernel_size = kernel_size,
            stride = stride,
            padding = 'SAME',
            activation_fn = activation_fn,
            normalizer_fn = batch_norm,
            scope=scope )
cifar.py 文件源码 项目:information-dropout 作者: ucla-vision 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def conv(self, inputs, num_outputs, activations, normalizer_fn = batch_norm, kernel_size=3, stride=1, scope=None):
        '''Creates a convolutional layer with default arguments'''
        if activations == 'relu':
            activation_fn = tf.nn.relu
        elif activations == 'softplus':
            activation_fn = tf.nn.softplus
        else:
            raise ValueError("Invalid activation function.")
        return conv2d( inputs = inputs,
            num_outputs = num_outputs,
            kernel_size = kernel_size,
            stride = stride,
            padding = 'SAME',
            activation_fn = activation_fn,
            normalizer_fn = normalizer_fn,
            normalizer_params = {'is_training' : self.is_training, 'updates_collections': None, 'decay': 0.9},
            scope=scope )
ops.py 文件源码 项目:Mendelssohn 作者: diggerdu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def conv2d(input_, o_dim, k_size, st, name='conv2d'):
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_dim, kernel_size=k_size, stride=st,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        fil = tf.get_variable('co_f', k_size+\
                [ten_sh(input_)[-1], o_dim],initializer=init)
        co = tf.nn.conv2d(input_, fil, strides=[1]+st+[1], \
                padding='SAME')
        bia = tf.get_variable('co_b', [o_dim])
        co = tf.nn.bias_add(co, bia)
        return co
bn_ops.py 文件源码 项目:Mendelssohn 作者: diggerdu 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def conv2d(input_, o_dim, k_size, st, name='conv2d'):
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_dim, kernel_size=k_size, stride=st,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        fil = tf.get_variable('co_f', k_size+\
                [ten_sh(input_)[-1], o_dim],initializer=init)
        co = tf.nn.conv2d(input_, fil, strides=[1]+st+[1], \
                padding='SAME')
        bia = tf.get_variable('co_b', [o_dim])
        co = tf.nn.bias_add(co, bia)
        return co
    '''
nets.py 文件源码 项目:GAN_Theories 作者: YadiraF 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.fully_connected(d, 1, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return d
nets.py 文件源码 项目:GAN_Theories 作者: YadiraF 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            mu = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))
            sigma = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return mu, sigma
model.py 文件源码 项目:generating_people 作者: classner 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
model.py 文件源码 项目:generating_people 作者: classner 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
model.py 文件源码 项目:generating_people 作者: classner 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
model.py 文件源码 项目:generating_people 作者: classner 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
model.py 文件源码 项目:generating_people 作者: classner 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def batchnorm(input, orig_graph, is_training):
    return tfl.batch_norm(
        input,
        decay=0.9,
        scale=True,
        epsilon=1E-5,
        activation_fn=None,
        param_initializers={
            'beta': get_val_or_initializer(orig_graph,
                                           tf.constant_initializer(0.),
                                           'BatchNorm/beta'),
            'gamma': get_val_or_initializer(orig_graph,
                                            tf.random_normal_initializer(1.0,
                                                                         0.02),
                                            'BatchNorm/gamma'),
            'moving_mean': get_val_or_initializer(orig_graph,
                                                  tf.constant_initializer(0.),
                                                  'BatchNorm/moving_mean'),
            'moving_variance': get_val_or_initializer(orig_graph,
                                                      tf.ones_initializer(),
                                                      'BatchNorm/moving_variance')
        },
        is_training=is_training,
        fused=True,  # new implementation with a fused kernel => speedup.
    )
unrolled_gan.py 文件源码 项目:unrolled-GAN 作者: Zardinality 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def generator(z):
    # because up to now we can not derive bias_add's higher order derivative in tensorflow, 
    # so I use vanilla implementation of FC instead of a FC layer in tensorflow.contrib.layers
    # the following conv case is out of the same reason
    weights = slim.model_variable(
        'fn_weights', shape=(FLAGS.z_dim, 4 * 4 * 512), initializer=ly.xavier_initializer())
    bias = slim.model_variable(
        'fn_bias', shape=(4 * 4 * 512, ), initializer=tf.zeros_initializer)
    train = tf.nn.relu(ly.batch_norm(fully_connected(z, weights, bias)))
    train = tf.reshape(train, (-1, 4, 4, 512))
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME')
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=None, padding='SAME', biases_initializer=None)
    bias = slim.model_variable('bias', shape=(
        1, ), initializer=tf.zeros_initializer)
    train += bias
    train = tf.nn.tanh(train)
    return train
dcgan_like.py 文件源码 项目:unrolled-GAN 作者: Zardinality 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def generator(z, label):
    z = tf.concat(1, [z,label])
    train = ly.fully_connected(
        z, 1024, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.concat(1, [train, label])
    train = ly.fully_connected(
        z, 4*4*512, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.reshape(train, (-1, 4, 4, 512))
    yb = tf.ones([FLAGS.batch_size, 4, 4, 10])*tf.reshape(label, [FLAGS.batch_size, 1, 1, 10]) 
    train = tf.concat(3, [train, yb])
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=tf.nn.tanh, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    return train
dcgan_like_sample.py 文件源码 项目:unrolled-GAN 作者: Zardinality 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def generator(z, label):
    z = tf.concat(1, [z,label])
    train = ly.fully_connected(
        z, 1024, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.concat(1, [train, label])
    train = ly.fully_connected(
        z, 4*4*512, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm)
    train = tf.reshape(train, (-1, 4, 4, 512))
    yb = tf.ones([FLAGS.batch_size, 4, 4, 10])*tf.reshape(label, [FLAGS.batch_size, 1, 1, 10]) 
    train = tf.concat(3, [train, yb])
    train = ly.conv2d_transpose(train, 256, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 128, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 64, 3, stride=2,
                                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    train = ly.conv2d_transpose(train, 1, 3, stride=1,
                                activation_fn=tf.nn.tanh, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
    return train
build_resnet.py 文件源码 项目:tensorflow-litterbox 作者: rwightman 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def resnet_arg_scope(
        weight_decay=0.0001,
        batch_norm_decay=0.997,
        batch_norm_epsilon=1e-5,
        batch_norm_scale=True,
):
    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
    }
    l2_regularizer = layers.l2_regularizer(weight_decay)

    arg_scope_layers = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d, layers.fully_connected],
        weights_initializer=layers.variance_scaling_initializer(),
        weights_regularizer=l2_regularizer,
        activation_fn=tf.nn.relu)
    arg_scope_conv = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d],
        normalizer_fn=layers.batch_norm,
        normalizer_params=batch_norm_params)
    with arg_scope_layers, arg_scope_conv as arg_sc:
        return arg_sc
dcgan_.py 文件源码 项目:DCGAN-WGAN-TF 作者: lovecambi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def generator(self, z, Cc=128, f_h=5, f_w=5):
        with tf.variable_scope("g_deconv0",reuse=None):
            deconv0 = deconv2d(z, [self.batch_size, 4, 4, 8*Cc], 4, 4, 1, 1, bias=not self.Bn, padding='VALID')
            deconv0 = tf.nn.relu(tcl.batch_norm(deconv0)) if self.Bn else tf.nn.relu(deconv0)
        with tf.variable_scope("g_deconv1",reuse=None):
            deconv1 = deconv2d(deconv0, [self.batch_size, 8, 8, 4*Cc], f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME')
            deconv1 = tf.nn.relu(tcl.batch_norm(deconv1)) if self.Bn else tf.nn.relu(deconv1)
        with tf.variable_scope("g_deconv2",reuse=None):
            deconv2 = deconv2d(deconv1, [self.batch_size, 16, 16, 2*Cc], f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME')
            deconv2 = tf.nn.relu(tcl.batch_norm(deconv2)) if self.Bn else tf.nn.relu(deconv2)
        with tf.variable_scope("g_deconv3",reuse=None):
            deconv3 = deconv2d(deconv2, [self.batch_size, 32, 32, Cc], f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME')
            deconv3 = tf.nn.relu(tcl.batch_norm(deconv3)) if self.Bn else tf.nn.relu(deconv3)
        with tf.variable_scope("g_deconv4",reuse=None):
            deconv4 = deconv2d(deconv3, [self.batch_size, 64, 64, self.C], f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME')
        return tf.tanh(deconv4)
dcgan_.py 文件源码 项目:DCGAN-WGAN-TF 作者: lovecambi 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def discriminator(self, x, Cc=128, f_h=5, f_w=5):
        with tf.variable_scope("d_conv1",reuse=self.DO_SHARE):
            conv1 = conv2d(x, self.C, Cc, f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME') # H/2 x W/2
            conv1 = lrelu(conv1)
        with tf.variable_scope("d_conv2",reuse=self.DO_SHARE):
            conv2 = conv2d(conv1, Cc, 2*Cc, f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME') # H/4 x W/4
            conv2 = lrelu(tcl.batch_norm(conv2)) if self.Bn else lrelu(conv2)
        with tf.variable_scope("d_conv3",reuse=self.DO_SHARE):
            conv3 = conv2d(conv2, 2*Cc, 4*Cc, f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME') # H/8 x W/8
            conv3 = lrelu(tcl.batch_norm(conv3)) if self.Bn else lrelu(conv3)
        with tf.variable_scope("d_conv4",reuse=self.DO_SHARE):
            conv4 = conv2d(conv3, 4*Cc, 8*Cc, f_h, f_w, 2, 2, bias=not self.Bn, padding='SAME') # H/16 x W/16
            conv4 = lrelu(tcl.batch_norm(conv4)) if self.Bn else lrelu(conv4)
        with tf.variable_scope("d_conv5",reuse=self.DO_SHARE):
            conv5 = conv2d(conv4, 8*Cc, 1, 4, 4, 1, 1, bias=not self.Bn, padding='VALID') # 1 x 1
        return tf.reshape(conv5, [-1, 1])


问题


面经


文章

微信
公众号

扫码关注公众号