python类batch_norm()的实例源码

train_tf.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            # tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            x = tcl.conv2d(inputs,
                           num_outputs = 64,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.conv2d(x,
                           num_outputs = 128,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.flatten(x)
            logits = tcl.fully_connected(x, num_outputs = self.num_output)

            return logits
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _bn_relu_conv(filters, kernel_size = (3, 3), stride = (1, 1)):
    def f(inputs):
        x = tcl.batch_norm(inputs)
        x = tf.nn.relu(x)
        x = tcl.conv2d(x,
                       num_outputs = filters,
                       kernel_size = kernel_size,
                       stride = stride,
                       padding = 'SAME')
        return x
    return f
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            conv1 = tcl.conv2d(inputs,
                               num_outputs = 64,
                               kernel_size = (7, 7),
                               stride = (2, 2),
                               padding = 'SAME')
            conv1 = tcl.batch_norm(conv1)
            conv1 = tf.nn.relu(conv1)
            conv1 = tcl.max_pool2d(conv1,
                                   kernel_size = (3, 3),
                                   stride = (2, 2),
                                   padding = 'SAME')

            x = conv1
            filters = 64
            first_layer = True
            for i, r in enumerate(self.repetitions):
                x = _residual_block(self.block_fn,
                                    filters = filters,
                                    repetition = r,
                                    is_first_layer = first_layer)(x)
                filters *= 2
                if first_layer:
                    first_layer = False

            _, h, w, ch = x.shape.as_list()
            outputs = tcl.avg_pool2d(x,
                                     kernel_size = (h, w),
                                     stride = (1, 1))
            outputs = tcl.flatten(outputs)
            logits = tcl.fully_connected(outputs, num_outputs = self.num_output,
                                         activation_fn = None)
            return logits
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _conv_bn_relu(filters, kernel_size = (3, 3), stride = (1, 1)):
    def f(inputs):
        x = tcl.conv2d(inputs,
                       num_outputs = filters,
                       kernel_size = kernel_size,
                       stride = stride,
                       padding = 'SAME')
        x = tcl.batch_norm(x)
        x = tf.nn.relu(x)
        return x
    return f
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self,
                 output_ch, # outputs channel, size is same as inputs
                 block_fn = 'origin',
                 name = 'unet'):
        self.output_ch = output_ch
        self.name = name

        assert block_fn in ['batch_norm', 'origin'], 'choose \'batch_norm\' or \'origin\''
        if block_fn == 'batch_norm':
            self.block_fn = _conv_bn_relu
        elif block_fn == 'origin':
            self.block_fn = _conv_relu
dg_mnist.py 文件源码 项目:deligan 作者: val-iisc 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def discriminator(image, Reuse=False):
    with tf.variable_scope('disc', reuse=Reuse):
        image = tf.reshape(image, [-1, 28, 28, 1])
        h0 = lrelu(conv(image, 5, 5, 1, df_dim, stridex=2, stridey=2, name='d_h0_conv'))
        h1 = lrelu( batch_norm(conv(h0, 5, 5, df_dim,df_dim*2,stridex=2,stridey=2,name='d_h1_conv'), decay=0.9, scale=True, updates_collections=None, is_training=phase_train, reuse=Reuse, scope='d_bn1'))
        h2 = lrelu(batch_norm(conv(h1, 3, 3, df_dim*2, df_dim*4, stridex=2, stridey=2,name='d_h2_conv'), decay=0.9,scale=True, updates_collections=None, is_training=phase_train, reuse=Reuse, scope='d_bn2'))
        h3 = tf.nn.max_pool(h2, ksize=[1,4,4,1], strides=[1,1,1,1],padding='VALID')
        h6 = tf.reshape(h2,[-1, 4*4*df_dim*4])
        h7 = Minibatch_Discriminator(h3, num_kernels=df_dim*4, name = 'd_MD')
        h8 = dense(tf.reshape(h7, [batchsize, -1]), df_dim*4*2, 1, scope='d_h8_lin')
        return tf.nn.sigmoid(h8), h8
train_mnist_feature_matching_ali_tf.py 文件源码 项目:Semi_Supervised_GAN 作者: ChunyuanLI 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def inference(input_img):
    with tf.variable_scope('Net_Inf') as scope:
        xx = layers.fully_connected(input_img, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.fully_connected(xx, num_outputs=latent_size, activation_fn=None)
        xx = layers.batch_norm(xx)
        inf_latent = tf.nn.tanh(xx)
    return inf_latent

# specify discriminative model
train_mnist_feature_matching_tf.py 文件源码 项目:Semi_Supervised_GAN 作者: ChunyuanLI 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def discriminator(input_img):
    # input_img = Input(batch_shape=(None, 3, 32, 32), dtype=im_dtype)
    with tf.variable_scope('Net_Dis') as scope:
        xx = layers.fully_connected(input_img, num_outputs=1000, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.nn.dropout(xx, 0.5)
        xx = layers.fully_connected(xx, num_outputs=500, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.nn.dropout(xx, 0.5)
        xx = layers.fully_connected(xx, num_outputs=250, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.nn.dropout(xx, 0.5)
        xx = layers.fully_connected(xx, num_outputs=250, activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = tf.nn.dropout(xx, 0.5)
        xx0 = layers.fully_connected(xx, num_outputs=250, activation_fn=None)
        xx = layers.batch_norm(xx0)
        xx = tf.nn.relu(xx)
        logits = layers.fully_connected(xx, label_size, activation_fn=None)

    return  logits, xx0

# pdb.set_trace()
residual_encoder.py 文件源码 项目:Automatic-Image-Colorization 作者: Armour 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def batch_normal_new(input_data, scope, training_flag):
        """
        Doing batch normalization, this is the new version with build-in batch_norm function
        :param input_data: the input data
        :param scope: scope
        :param training_flag: the flag indicate if it is training
        :return: normalized data
        """
        return tf.cond(training_flag,
                       lambda: batch_norm(input_data, decay=0.9999, is_training=True, center=True, scale=True,
                                          updates_collections=None, scope=scope),
                       lambda: batch_norm(input_data, decay=0.9999, is_training=False, center=True, scale=True,
                                          updates_collections=None, scope=scope, reuse=True),
                       name='batch_normalization')
Densenet_MNIST.py 文件源码 项目:Densenet-Tensorflow 作者: taki0112 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True))
Densenet_Cifar10.py 文件源码 项目:Densenet-Tensorflow 作者: taki0112 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True))
bn_gru.py 文件源码 项目:website-fingerprinting 作者: AxelGoetz 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self, num_units, input_size=None, activation=tanh, is_training=True, batch_norm=True):
    self._is_training = is_training
    self._batch_norm = batch_norm

    super().__init__(num_units, input_size, activation)
vae_conv.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def vae_conv(observed, n, n_x, n_z, n_particles, is_training):
    with zs.BayesianNet(observed=observed) as model:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        z_mean = tf.zeros([n, n_z])
        z = zs.Normal('z', z_mean, std=1., n_samples=n_particles,
                      group_ndims=1)
        lx_z = tf.reshape(z, [-1, 1, 1, n_z])
        lx_z = layers.conv2d_transpose(
            lx_z, 128, kernel_size=3, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 64, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lx_z = layers.conv2d_transpose(
            lx_z, 1, kernel_size=5, stride=2,
            activation_fn=None)
        x_logits = tf.reshape(lx_z, [n_particles, n, -1])
        x = zs.Bernoulli('x', x_logits, group_ndims=1)
    return model
gait_nn.py 文件源码 项目:gait-recognition 作者: marian-margeta 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def residual_block(net, ch = 256, ch_inner = 128, scope = None, reuse = None, stride = 1):
        """
        Bottleneck v2
        """

        with slim.arg_scope([layers.convolution2d],
                            activation_fn = None,
                            normalizer_fn = None):
            with tf.variable_scope(scope, 'ResidualBlock', reuse = reuse):
                in_net = net

                if stride > 1:
                    net = layers.convolution2d(net, ch, kernel_size = 1, stride = stride)

                in_net = layers.batch_norm(in_net)
                in_net = tf.nn.relu(in_net)
                in_net = layers.convolution2d(in_net, ch_inner, 1)

                in_net = layers.batch_norm(in_net)
                in_net = tf.nn.relu(in_net)
                in_net = layers.convolution2d(in_net, ch_inner, 3, stride = stride)

                in_net = layers.batch_norm(in_net)
                in_net = tf.nn.relu(in_net)
                in_net = layers.convolution2d(in_net, ch, 1, activation_fn = None)

                net = tf.nn.relu(in_net + net)

        return net
pretrained_models.py 文件源码 项目:DocumentSegmentation 作者: SeguinBe 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def resnet_v1_50_fn(input_tensor: tf.Tensor, is_training=False, blocks=4, weight_decay=0.0001, renorm=True) -> tf.Tensor:
    with slim.arg_scope(nets.resnet_v1.resnet_arg_scope(weight_decay=weight_decay, batch_norm_decay=0.999)), \
         slim.arg_scope([layers.batch_norm], renorm_decay=0.95, renorm=renorm):
        input_tensor = mean_substraction(input_tensor)
        assert 0 < blocks <= 4
        blocks_list = [
              nets.resnet_v1.resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
              nets.resnet_v1.resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
              nets.resnet_v1.resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
              nets.resnet_v1.resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
        ]
        net, endpoints = nets.resnet_v1.resnet_v1(input_tensor,
                                                  blocks=blocks_list[:blocks],
                                                  num_classes=None,
                                                  is_training=is_training,
                                                  global_pool=False,
                                                  output_stride=None,
                                                  include_root_block=True,
                                                  reuse=None,
                                                  scope='resnet_v1_50')

        desired_endpoints = ['resnet_augmented/resnet_v1_50/conv1',
                             'resnet_v1_50/block1/unit_2/bottleneck_v1',
                             'resnet_v1_50/block2/unit_3/bottleneck_v1',
                             'resnet_v1_50/block3/unit_5/bottleneck_v1',
                             'resnet_v1_50/block4/unit_2/bottleneck_v1'
                             ]

        intermediate_layers = list()
        for d in desired_endpoints:
            intermediate_layers.append(endpoints[d])

        return net, intermediate_layers
resnn.py 文件源码 项目:web_page_classification 作者: yuhui-lin 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def BN_ReLU(self, net):
        # Batch Normalization and ReLU
        # 'gamma' is not used as the next layer is ReLU
        net = batch_norm(net,
                         center=True,
                         scale=False,
                         activation_fn=tf.nn.relu, )
        # net = tf.nn.relu(net)
        # activation summary ??
        self._activation_summary(net)
        return net
convnet.py 文件源码 项目:deep_unsupervised_posets 作者: asanakoy 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def fc_relu(self, input_tensor, num_outputs, relu=False, batch_norm=False, weight_std=0.005,
                bias_init_value=0.1, name=None):
        if batch_norm and not relu:
            raise ValueError('Cannot use batch normalization without following RELU')
        with tf.variable_scope(name) as scope:
            num_inputs = int(np.prod(input_tensor.get_shape()[1:]))
            w, b = self.get_fc_weights(num_inputs, num_outputs,
                                       weight_std=weight_std,
                                       bias_init_value=bias_init_value)

            fc_relu = None
            input_tensor_reshaped = tf.reshape(input_tensor, [-1, num_inputs])
            fc = tf.add(tf.matmul(input_tensor_reshaped, w), b, name='fc' if relu or batch_norm else name)
            if batch_norm:
                fc = tf.cond(self.is_phase_train,
                             lambda: tflayers.batch_norm(fc,
                                                           decay=self.batch_norm_decay,
                                                           is_training=True,
                                                           trainable=True,
                                                           reuse=None,
                                                           scope=scope),
                              lambda: tflayers.batch_norm(fc,
                                                           decay=self.batch_norm_decay,
                                                           is_training=False,
                                                           trainable=True,
                                                           reuse=True,
                                                           scope=scope))
            if relu:
                fc_relu = tf.nn.relu(fc, name=name)
        return fc, fc_relu
ops.py 文件源码 项目:Img2Img-Translation-Tensorflow 作者: lovecambi 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def batch_norm(x, train_mode=True, epsilon=1e-5, momentum=0.9, name="bn"):
    with tf.variable_scope(name):
        return tcl.batch_norm(x, 
                              decay=momentum, 
                              updates_collections=None, 
                              epsilon=epsilon, 
                              scale=True, 
                              is_training=train_mode, 
                              trainable=True, 
                              scope=name)
predictron.py 文件源码 项目:predictron 作者: zhongwen 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def iter_func(self, state):
    sc = predictron_arg_scope()

    with tf.variable_scope('value'):
      value_net = slim.fully_connected(slim.flatten(state), 32, scope='fc0')
      value_net = layers.batch_norm(value_net, activation_fn=tf.nn.relu, scope='fc0/preact')
      value_net = slim.fully_connected(value_net, self.maze_size, activation_fn=None, scope='fc1')

    with slim.arg_scope(sc):
      net = slim.conv2d(state, 32, [3, 3], scope='conv1')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv1/preact')
      net_flatten = slim.flatten(net, scope='conv1/flatten')

      with tf.variable_scope('reward'):
        reward_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        reward_net = layers.batch_norm(reward_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        reward_net = slim.fully_connected(reward_net, self.maze_size, activation_fn=None, scope='fc1')

      with tf.variable_scope('gamma'):
        gamma_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        gamma_net = layers.batch_norm(gamma_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        gamma_net = slim.fully_connected(gamma_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')

      with tf.variable_scope('lambda'):
        lambda_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        lambda_net = layers.batch_norm(lambda_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        lambda_net = slim.fully_connected(lambda_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')

      net = slim.conv2d(net, 32, [3, 3], scope='conv2')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv2/preact')

      net = slim.conv2d(net, 32, [3, 3], scope='conv3')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv3/preact')
    return net, reward_net, gamma_net, lambda_net, value_net
utils.py 文件源码 项目:learning-tensorflow 作者: Salon-sai 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
        with tf.variable_scope(name):
            self.epsilon = epsilon
            self.momentum = momentum
            self.name = name


问题


面经


文章

微信
公众号

扫码关注公众号