python类batch_norm()的实例源码

conv_vae.py 文件源码 项目:vae-flow 作者: andymiller 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def generative_network(z, zdim):
  """Generative network to parameterize generative model. It takes
  latent variables as input and outputs the likelihood parameters.
  logits = neural_network(z)

  Args:
  z = tensor input
  d = latent variable dimension
  """
  with slim.arg_scope([slim.conv2d_transpose],
                      activation_fn=tf.nn.elu,
                      normalizer_fn=slim.batch_norm,
                      normalizer_params={'scale': True}):
    net = tf.reshape(z, [N_MINIBATCH, 1, 1, zdim])
    net = slim.conv2d_transpose(net, 128, 3, padding='VALID')
    net = slim.conv2d_transpose(net, 64, 5, padding='VALID')
    net = slim.conv2d_transpose(net, 32, 5, stride=2)
    net = slim.conv2d_transpose(net, 1, 5, stride=2, activation_fn=None)
    net = slim.flatten(net)
    #net = slim.nn.sigmoid(net)
    return net
model.py 文件源码 项目:domain-transfer-network 作者: yunjey 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def content_extractor(self, images, reuse=False):
        # images: (batch, 32, 32, 3) or (batch, 32, 32, 1)

        if images.get_shape()[3] == 1:
            # For mnist dataset, replicate the gray scale image 3 times.
            images = tf.image.grayscale_to_rgb(images)

        with tf.variable_scope('content_extractor', reuse=reuse):
            with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
                                 stride=2,  weights_initializer=tf.contrib.layers.xavier_initializer()):
                with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
                                    activation_fn=tf.nn.relu, is_training=(self.mode=='train' or self.mode=='pretrain')):

                    net = slim.conv2d(images, 64, [3, 3], scope='conv1')   # (batch_size, 16, 16, 64)
                    net = slim.batch_norm(net, scope='bn1')
                    net = slim.conv2d(net, 128, [3, 3], scope='conv2')     # (batch_size, 8, 8, 128)
                    net = slim.batch_norm(net, scope='bn2')
                    net = slim.conv2d(net, 256, [3, 3], scope='conv3')     # (batch_size, 4, 4, 256)
                    net = slim.batch_norm(net, scope='bn3')
                    net = slim.conv2d(net, 128, [4, 4], padding='VALID', scope='conv4')   # (batch_size, 1, 1, 128)
                    net = slim.batch_norm(net, activation_fn=tf.nn.tanh, scope='bn4')
                    if self.mode == 'pretrain':
                        net = slim.conv2d(net, 10, [1, 1], padding='VALID', scope='out')
                        net = slim.flatten(net)
                    return net
model.py 文件源码 项目:domain-transfer-network 作者: yunjey 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def generator(self, inputs, reuse=False):
        # inputs: (batch, 1, 1, 128)
        with tf.variable_scope('generator', reuse=reuse):
            with slim.arg_scope([slim.conv2d_transpose], padding='SAME', activation_fn=None,           
                                 stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
                with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
                                     activation_fn=tf.nn.relu, is_training=(self.mode=='train')):

                    net = slim.conv2d_transpose(inputs, 512, [4, 4], padding='VALID', scope='conv_transpose1')   # (batch_size, 4, 4, 512)
                    net = slim.batch_norm(net, scope='bn1')
                    net = slim.conv2d_transpose(net, 256, [3, 3], scope='conv_transpose2')  # (batch_size, 8, 8, 256)
                    net = slim.batch_norm(net, scope='bn2')
                    net = slim.conv2d_transpose(net, 128, [3, 3], scope='conv_transpose3')  # (batch_size, 16, 16, 128)
                    net = slim.batch_norm(net, scope='bn3')
                    net = slim.conv2d_transpose(net, 1, [3, 3], activation_fn=tf.nn.tanh, scope='conv_transpose4')   # (batch_size, 32, 32, 1)
                    return net
model.py 文件源码 项目:domain-transfer-network 作者: yunjey 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def discriminator(self, images, reuse=False):
        # images: (batch, 32, 32, 1)
        with tf.variable_scope('discriminator', reuse=reuse):
            with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
                                 stride=2,  weights_initializer=tf.contrib.layers.xavier_initializer()):
                with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
                                    activation_fn=tf.nn.relu, is_training=(self.mode=='train')):

                    net = slim.conv2d(images, 128, [3, 3], activation_fn=tf.nn.relu, scope='conv1')   # (batch_size, 16, 16, 128)
                    net = slim.batch_norm(net, scope='bn1')
                    net = slim.conv2d(net, 256, [3, 3], scope='conv2')   # (batch_size, 8, 8, 256)
                    net = slim.batch_norm(net, scope='bn2')
                    net = slim.conv2d(net, 512, [3, 3], scope='conv3')   # (batch_size, 4, 4, 512)
                    net = slim.batch_norm(net, scope='bn3')
                    net = slim.conv2d(net, 1, [4, 4], padding='VALID', scope='conv4')   # (batch_size, 1, 1, 1)
                    net = slim.flatten(net)
                    return net
network.py 文件源码 项目:BDD_Driving_Model 作者: gy20073 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def batch_normalization(self, input, name, scale_offset=True, relu=False, is_training=False):
        with tf.variable_scope(name) as scope:
            norm_params = {'decay':0.999, 'scale':scale_offset, 'epsilon':0.001, 'is_training':is_training,
                           'activation_fn':tf.nn.relu if relu else None}            
            if hasattr(self, 'data_dict'):
                param_inits={'moving_mean':self.get_saved_value('mean'),
                             'moving_variance':self.get_saved_value('variance')}
                if scale_offset:
                    param_inits['beta']=self.get_saved_value('offset')
                    param_inits['gamma']=self.get_saved_value('scale')

                shape = [input.get_shape()[-1]]
                for key in param_inits:
                    param_inits[key] = np.reshape(param_inits[key], shape)
                norm_params['param_initializers'] = param_inits
            # TODO: there might be a bug if reusing is enabled.
            return slim.batch_norm(input, **norm_params)
InceptionResnetV2.py 文件源码 项目:RFCN-tensorflow 作者: xdever 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, name, inputs, trainFrom = None, reuse=False, weightDecay=0.00004, batchNormDecay=0.9997, batchNormEpsilon=0.001, freezeBatchNorm=False):
        self.name = name
        self.inputs = inputs
        self.trainFrom = trainFrom

        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                weights_regularizer=None,
                biases_regularizer=None):

            batch_norm_params = {
                'decay': batchNormDecay,
                'epsilon': batchNormEpsilon,
            }
            # Set activation_fn and parameters for batch_norm.
            with slim.arg_scope([slim.conv2d],
                    activation_fn=tf.nn.relu,
                    normalizer_fn=slim.batch_norm,
                    normalizer_params=batch_norm_params) as scope:

                self.endPoints, self.scope, self.scopeList = InceptionResnetV2.define(inputs, weightDecay = weightDecay, trainFrom=trainFrom, scope=name, reuse = reuse, freezeBatchNorm = freezeBatchNorm)
encoders.py 文件源码 项目:LifelongVAE 作者: jramapuram 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _get_normalizer(is_training, use_bn, use_ln):
    '''
    Helper to get normalizer function and params
    '''
    batch_norm_params = {'is_training': is_training,
                         'decay': 0.999, 'center': True,
                         'scale': True, 'updates_collections': None}
    layer_norm_params = {'center': True, 'scale': True}

    if use_ln:
        print 'using layer norm'
        normalizer_fn = slim.layer_norm
        normalizer_params = layer_norm_params
    elif use_bn:
        print 'using batch norm'
        normalizer_fn = slim.batch_norm
        normalizer_params = batch_norm_params
    else:
        print 'not using any layer normalization scheme'
        normalizer_fn = None
        normalizer_params = None

    return [normalizer_fn, normalizer_params]
layers.py 文件源码 项目:vessel-classification 作者: GlobalFishingWatch 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def misconception_with_bypass(input,
                              window_size,
                              stride,
                              depth,
                              is_training,
                              scope=None):
    with tf.name_scope(scope):
        with slim.arg_scope(
            [slim.conv2d],
                padding='SAME',
                activation_fn=tf.nn.relu,
                normalizer_fn=slim.batch_norm,
                normalizer_params={'is_training': is_training}):
            residual = misconception_layer(input, window_size, stride, depth,
                                           is_training, scope)

            if stride > 1:
                input = slim.avg_pool2d(
                    input, [1, stride], stride=[1, stride], padding='SAME')

            input = zero_pad_features(input, depth)

            return input + residual
pyramid_network.py 文件源码 项目:TFMaskRCNN 作者: hillox 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
                     activation_fn=None,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):

  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': tf.GraphKeys.UPDATE_OPS_EXTRA,
  }

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
fc1024.py 文件源码 项目:triplet-reid 作者: VisualComputingInstitute 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def head(endpoints, embedding_dim, is_training):
    endpoints['head_output'] = slim.fully_connected(
        endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
        normalizer_params={
            'decay': 0.9,
            'epsilon': 1e-5,
            'scale': True,
            'is_training': is_training,
            'updates_collections': tf.GraphKeys.UPDATE_OPS,
        })

    endpoints['emb'] = endpoints['emb_raw'] = slim.fully_connected(
        endpoints['head_output'], embedding_dim, activation_fn=None,
        weights_initializer=tf.orthogonal_initializer(), scope='emb')

    return endpoints
fc1024_normalize.py 文件源码 项目:triplet-reid 作者: VisualComputingInstitute 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def head(endpoints, embedding_dim, is_training):
    endpoints['head_output'] = slim.fully_connected(
        endpoints['model_output'], 1024, normalizer_fn=slim.batch_norm,
        normalizer_params={
            'decay': 0.9,
            'epsilon': 1e-5,
            'scale': True,
            'is_training': is_training,
            'updates_collections': tf.GraphKeys.UPDATE_OPS,
        })

    endpoints['emb_raw'] = slim.fully_connected(
        endpoints['head_output'], embedding_dim, activation_fn=None,
        weights_initializer=tf.orthogonal_initializer(), scope='emb')
    endpoints['emb'] = tf.nn.l2_normalize(endpoints['emb_raw'], -1)

    return endpoints
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, epsilon=1e-5, momentum=0.99, name="batch_norm"):
        with tf.variable_scope(name):
            self.epsilon = epsilon
            self.momentum = momentum

            self.ema = tf.train.ExponentialMovingAverage(decay=self.momentum)
            self.name = name
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def cnn(self,
            model_input,
            l2_penalty=1e-8,
            num_filters = [1024, 1024, 1024],
            filter_sizes = [1,2,3],
            sub_scope="",
            **unused_params):
        max_frames = model_input.get_shape().as_list()[1]
        num_features = model_input.get_shape().as_list()[2]

        shift_inputs = []
        for i in range(max(filter_sizes)):
            if i == 0:
                shift_inputs.append(model_input)
            else:
                shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])

        cnn_outputs = []
        for nf, fs in zip(num_filters, filter_sizes):
            sub_input = tf.concat(shift_inputs[:fs], axis=2)
            sub_filter = tf.get_variable(sub_scope+"cnn-filter-len%d"%fs,
                                         shape=[num_features*fs, nf], dtype=tf.float32,
                                         initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
                                         regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
            cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))

        cnn_output = tf.concat(cnn_outputs, axis=2)
        cnn_output = slim.batch_norm(
            cnn_output,
            center=True,
            scale=True,
            is_training=FLAGS.train,
            scope=sub_scope+"cluster_bn")
        return cnn_output, max_frames
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def cnn(self,
            model_input,
            l2_penalty=1e-8,
            num_filters = [1024, 1024, 1024],
            filter_sizes = [1,2,3],
            sub_scope="",
            **unused_params):
        max_frames = model_input.get_shape().as_list()[1]
        num_features = model_input.get_shape().as_list()[2]

        shift_inputs = []
        for i in range(max(filter_sizes)):
            if i == 0:
                shift_inputs.append(model_input)
            else:
                shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])

        cnn_outputs = []
        for nf, fs in zip(num_filters, filter_sizes):
            sub_input = tf.concat(shift_inputs[:fs], axis=2)
            sub_filter = tf.get_variable(sub_scope+"cnn-filter-len%d"%fs,
                                         shape=[num_features*fs, nf], dtype=tf.float32,
                                         initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
                                         regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
            cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))

        cnn_output = tf.concat(cnn_outputs, axis=2)
        cnn_output = slim.batch_norm(
            cnn_output,
            center=True,
            scale=True,
            is_training=FLAGS.train,
            scope=sub_scope+"cluster_bn")
        return cnn_output, max_frames
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cnn(self,
            model_input,
            l2_penalty=1e-8,
            num_filters=[1024,1024,1024],
            filter_sizes=[1,2,3],
            sub_scope="",
            **unused_params):
        max_frames = model_input.get_shape().as_list()[1]
        num_features = model_input.get_shape().as_list()[2]

        shift_inputs = []
        for i in range(max(filter_sizes)):
            if i == 0:
                shift_inputs.append(model_input)
            else:
                shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])

        cnn_outputs = []
        for nf, fs in zip(num_filters, filter_sizes):
            sub_input = tf.concat(shift_inputs[:fs], axis=2)
            sub_filter = tf.get_variable(sub_scope+"cnn-filter-len%d"%fs,
                                         shape=[num_features*fs, nf], dtype=tf.float32,
                                         initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
                                         regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
            cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))

        cnn_output = tf.concat(cnn_outputs, axis=2)
        cnn_output = slim.batch_norm(
            cnn_output,
            center=True,
            scale=True,
            is_training=FLAGS.train,
            scope=sub_scope+"cluster_bn")
        return cnn_output, max_frames
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def cnn(self,
            model_input,
            l2_penalty=1e-8,
            num_filters=[1024,1024,1024],
            filter_sizes=[1,2,3],
            sub_scope="",
            **unused_params):
        max_frames = model_input.get_shape().as_list()[1]
        num_features = model_input.get_shape().as_list()[2]

        shift_inputs = []
        for i in range(max(filter_sizes)):
            if i == 0:
                shift_inputs.append(model_input)
            else:
                shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])

        cnn_outputs = []
        for nf, fs in zip(num_filters, filter_sizes):
            sub_input = tf.concat(shift_inputs[:fs], axis=2)
            sub_filter = tf.get_variable(sub_scope+"cnn-filter-len%d"%fs,
                                         shape=[num_features*fs, nf], dtype=tf.float32,
                                         initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
                                         regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
            cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))

        cnn_output = tf.concat(cnn_outputs, axis=2)
        cnn_output = slim.batch_norm(
            cnn_output,
            center=True,
            scale=True,
            is_training=FLAGS.train,
            scope=sub_scope+"cluster_bn")
        return cnn_output, max_frames
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def cnn(self,
            model_input,
            l2_penalty=1e-8,
            num_filters = [1024, 1024, 1024],
            filter_sizes = [1,2,3],
            sub_scope="",
            **unused_params):
        max_frames = model_input.get_shape().as_list()[1]
        num_features = model_input.get_shape().as_list()[2]

        shift_inputs = []
        for i in range(max(filter_sizes)):
            if i == 0:
                shift_inputs.append(model_input)
            else:
                shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])

        cnn_outputs = []
        for nf, fs in zip(num_filters, filter_sizes):
            sub_input = tf.concat(shift_inputs[:fs], axis=2)
            sub_filter = tf.get_variable(sub_scope+"cnn-filter-len%d"%fs,
                                         shape=[num_features*fs, nf], dtype=tf.float32,
                                         initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
                                         regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
            cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))

        cnn_output = tf.concat(cnn_outputs, axis=2)
        cnn_output = slim.batch_norm(
            cnn_output,
            center=True,
            scale=True,
            is_training=FLAGS.train,
            scope=sub_scope+"cluster_bn")
        return cnn_output, max_frames
frame_level_models.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def cnn(self,
            model_input,
            l2_penalty=1e-8,
            num_filters = [1024, 1024, 1024],
            filter_sizes = [1,2,3],
            sub_scope="",
            **unused_params):
        max_frames = model_input.get_shape().as_list()[1]
        num_features = model_input.get_shape().as_list()[2]

        shift_inputs = []
        for i in range(max(filter_sizes)):
            if i == 0:
                shift_inputs.append(model_input)
            else:
                shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])

        cnn_outputs = []
        for nf, fs in zip(num_filters, filter_sizes):
            sub_input = tf.concat(shift_inputs[:fs], axis=2)
            sub_filter = tf.get_variable(sub_scope+"cnn-filter-len%d"%fs,
                                         shape=[num_features*fs, nf], dtype=tf.float32,
                                         initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
                                         regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
            sub_bias = tf.get_variable(sub_scope+"cnn-bias-len%d"%fs,
                                         shape=[nf], dtype=tf.float32,
                                         initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
                                         regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
            cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter) + sub_bias)

        cnn_output = tf.concat(cnn_outputs, axis=2)
        cnn_output = slim.batch_norm(
            cnn_output,
            center=True,
            scale=True,
            is_training=FLAGS.train,
            scope=sub_scope+"cluster_bn")
        return cnn_output, max_frames
multiscale_cnn_lstm_model.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def cnn(self, 
          model_input, 
          l2_penalty=1e-8, 
          num_filters = [1024, 1024, 1024],
          filter_sizes = [1,2,3], 
          sub_scope="",
          **unused_params):
    max_frames = model_input.get_shape().as_list()[1]
    num_features = model_input.get_shape().as_list()[2]

    shift_inputs = []
    for i in xrange(max(filter_sizes)):
      if i == 0:
        shift_inputs.append(model_input)
      else:
        shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])

    cnn_outputs = []
    for nf, fs in zip(num_filters, filter_sizes):
      sub_input = tf.concat(shift_inputs[:fs], axis=2)
      sub_filter = tf.get_variable(sub_scope+"cnn-filter-len%d"%fs, 
                       shape=[num_features*fs, nf], dtype=tf.float32, 
                       initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1), 
                       regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
      cnn_outputs.append(tf.einsum("ijk,kl->ijl", sub_input, sub_filter))

    cnn_output = tf.concat(cnn_outputs, axis=2)
    cnn_output = slim.batch_norm(
        cnn_output,
        center=True,
        scale=True,
        is_training=FLAGS.is_training,
        scope=sub_scope+"cluster_bn")
    return cnn_output
mobilenet_v1.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def mobilenet_v1_arg_scope(is_training=True,
                           stddev=0.09):
  batch_norm_params = {
      'is_training': False,
      'center': True,
      'scale': True,
      'decay': 0.9997,
      'epsilon': 0.001,
      'trainable': False,
  }

  # Set weight_decay for weights in Conv and DepthSepConv layers.
  weights_init = tf.truncated_normal_initializer(stddev=stddev)
  regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
  if cfg.MOBILENET.REGU_DEPTH:
    depthwise_regularizer = regularizer
  else:
    depthwise_regularizer = None

  with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                      trainable=is_training,
                      weights_initializer=weights_init,
                      activation_fn=tf.nn.relu6, 
                      normalizer_fn=slim.batch_norm,
                      padding='SAME'):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
        with slim.arg_scope([slim.separable_conv2d],
                            weights_regularizer=depthwise_regularizer) as sc:
          return sc


问题


面经


文章

微信
公众号

扫码关注公众号