python类arg_scope()的实例源码

vae.py 文件源码 项目:TensorFlow-ADGM 作者: dancsalo 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def create_architecture(self, mode, tag=None):

        training = mode == 'TRAIN'
        testing = mode == 'TEST'

        assert tag != None

        # handle most of the regularizers here
        weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
        biases_regularizer = weights_regularizer

        # list as many types of layers as possible, even if they are not used now
        with arg_scope([slim.conv2d, slim.conv2d_in_plane,
                        slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
                       weights_regularizer=weights_regularizer,
                       biases_regularizer=biases_regularizer,
                       biases_initializer=tf.constant_initializer(0.0)):
            self.build_network()

        elbo = self.add_losses()
        self._summary_op = tf.summary.merge_all()
        return elbo
utils.py 文件源码 项目:CEVAE 作者: AMLab-Amsterdam 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def fc_net(inp, layers, out_layers, scope, lamba=1e-3, activation=tf.nn.relu, reuse=None,
           weights_initializer=initializers.xavier_initializer(uniform=False)):
    with slim.arg_scope([slim.fully_connected],
                        activation_fn=activation,
                        normalizer_fn=None,
                        weights_initializer=weights_initializer,
                        reuse=reuse,
                        weights_regularizer=slim.l2_regularizer(lamba)):

        if layers:
            h = slim.stack(inp, slim.fully_connected, layers, scope=scope)
            if not out_layers:
                return h
        else:
            h = inp
        outputs = []
        for i, (outdim, activation) in enumerate(out_layers):
            o1 = slim.fully_connected(h, outdim, activation_fn=activation, scope=scope + '_{}'.format(i + 1))
            outputs.append(o1)
        return outputs if len(outputs) > 1 else outputs[0]
network_base.py 文件源码 项目:tf-openpose 作者: ildoonet 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def separable_conv(self, input, k_h, k_w, c_o, stride, name, relu=True):
        with slim.arg_scope([slim.batch_norm], fused=common.batchnorm_fused):
            output = slim.separable_convolution2d(input,
                                                  num_outputs=None,
                                                  stride=stride,
                                                  trainable=self.trainable,
                                                  depth_multiplier=1.0,
                                                  kernel_size=[k_h, k_w],
                                                  activation_fn=None,
                                                  weights_initializer=tf.contrib.layers.xavier_initializer(),
                                                  # weights_initializer=tf.truncated_normal_initializer(stddev=0.09),
                                                  weights_regularizer=tf.contrib.layers.l2_regularizer(0.00004),
                                                  biases_initializer=None,
                                                  padding=DEFAULT_PADDING,
                                                  scope=name + '_depthwise')

            output = slim.convolution2d(output,
                                        c_o,
                                        stride=1,
                                        kernel_size=[1, 1],
                                        activation_fn=tf.nn.relu if relu else None,
                                        weights_initializer=tf.contrib.layers.xavier_initializer(),
                                        # weights_initializer=tf.truncated_normal_initializer(stddev=0.09),
                                        biases_initializer=slim.init_ops.zeros_initializer(),
                                        normalizer_fn=slim.batch_norm,
                                        trainable=self.trainable,
                                        weights_regularizer=tf.contrib.layers.l2_regularizer(common.regularizer_dsconv),
                                        # weights_regularizer=None,
                                        scope=name + '_pointwise')

        return output
resnet_v1.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def resnet_arg_scope(is_training=True,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': False,
    'updates_collections': tf.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
      weights_initializer=slim.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc
mobilenet_v1.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _image_to_head(self, is_training, reuse=False):
    # Base bottleneck
    assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)
    net_conv = self._image
    if cfg.MOBILENET.FIXED_LAYERS > 0:
      with slim.arg_scope(mobilenet_v1_arg_scope(is_training=False)):
        net_conv = mobilenet_v1_base(net_conv,
                                      _CONV_DEFS[:cfg.MOBILENET.FIXED_LAYERS],
                                      starting_layer=0,
                                      depth_multiplier=self._depth_multiplier,
                                      reuse=reuse,
                                      scope=self._scope)
    if cfg.MOBILENET.FIXED_LAYERS < 12:
      with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)):
        net_conv = mobilenet_v1_base(net_conv,
                                      _CONV_DEFS[cfg.MOBILENET.FIXED_LAYERS:12],
                                      starting_layer=cfg.MOBILENET.FIXED_LAYERS,
                                      depth_multiplier=self._depth_multiplier,
                                      reuse=reuse,
                                      scope=self._scope)

    self._act_summaries.append(net_conv)
    self._layers['head'] = net_conv

    return net_conv
cpm_body_slim.py 文件源码 项目:convolutional-pose-machines-tensorflow 作者: timctho 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _middle_conv(self, stage):
        with tf.variable_scope('stage_' + str(stage)):
            self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
                                                 self.sub_stage_img_feature,
                                                 self.center_map],
                                                axis=3)
            with slim.arg_scope([slim.conv2d],
                                padding='SAME',
                                activation_fn=tf.nn.relu,
                                weights_initializer=tf.contrib.layers.xavier_initializer()):
                mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
                mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
                self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
                                                   scope='mid_conv7')
                self.stage_heatmap.append(self.current_heatmap)
cpm_hand_slim.py 文件源码 项目:convolutional-pose-machines-tensorflow 作者: timctho 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _middle_conv(self, stage):
        with tf.variable_scope('stage_' + str(stage)):
            self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
                                                 self.sub_stage_img_feature,
                                                 # self.center_map,
                                                 ],
                                                axis=3)
            with slim.arg_scope([slim.conv2d],
                                padding='SAME',
                                activation_fn=tf.nn.relu,
                                weights_initializer=tf.contrib.layers.xavier_initializer()):
                mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
                mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
                self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
                                                   scope='mid_conv7')
                self.stage_heatmap.append(self.current_heatmap)
model_cifar10.py 文件源码 项目:easy-tensorflow 作者: khanhptnk 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def arg_scope(self):
    """Configure the neural network's layers."""
    batch_norm_params = {
      "is_training" : self.is_training,
      "decay" : 0.9997,
      "epsilon" : 0.001,
      "variables_collections" : {
        "beta" : None,
        "gamma" : None,
        "moving_mean" : ["moving_vars"],
        "moving_variance" : ["moving_vars"]
      }
    }

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(
                          stddev=self._hparams.init_stddev),
                        weights_regularizer=slim.l2_regularizer(
                          self._hparams.regularize_constant),
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params) as sc:
      return sc
adversary.py 文件源码 项目:taskcv-2017-public 作者: VisionLearningGroup 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def adversarial_discriminator(net, layers, scope='adversary', leaky=False):
    if leaky:
        activation_fn = tflearn.activations.leaky_relu
    else:
        activation_fn = tf.nn.relu
    with ExitStack() as stack:
        stack.enter_context(tf.variable_scope(scope))
        stack.enter_context(
            slim.arg_scope(
                [slim.fully_connected],
                activation_fn=activation_fn,
                weights_regularizer=slim.l2_regularizer(2.5e-5)))
        for dim in layers:
            net = slim.fully_connected(net, dim)
        net = slim.fully_connected(net, 2, activation_fn=None)
    return net
vgg_16_fcn8s.py 文件源码 项目:taskcv-2017-public 作者: VisionLearningGroup 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_regularizer=slim.l2_regularizer(weight_decay),
                      biases_initializer=tf.zeros_initializer()):
      with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
          with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
              return arg_sc
fpn.py 文件源码 项目:canshi 作者: hungsing92 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
                     activation_fn=None,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):

  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': tf.GraphKeys.UPDATE_OPS,
  }

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
fpn.py 文件源码 项目:canshi 作者: hungsing92 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc
text_classification_train.py 文件源码 项目:kaggle_redefining_cancer_treatment 作者: jorgemf 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
              vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
        # embeddings
        embeddings = _load_embeddings(vocabulary_size, embeddings_size)
        # model
        with slim.arg_scope(self.text_classification_model.model_arg_scope()):
            outputs = self.text_classification_model.model(input_text_begin, input_text_end,
                                                           gene, variation, output_classes,
                                                           embeddings=embeddings,
                                                           batch_size=batch_size,
                                                           training=False)
        # loss
        targets = self.text_classification_model.targets(expected_labels, output_classes)
        loss = self.text_classification_model.loss(targets, outputs)
        self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
                                            trainable=False)
        self.accumulated_loss = tf.assign_add(self.accumulated_loss, loss)
        step = tf.Variable(0, dtype=tf.int32, name='eval_step', trainable=False)
        step_increase = tf.assign_add(step, 1)
        self.loss = self.accumulated_loss / tf.cast(step_increase, dtype=tf.float32)
        tf.summary.scalar('loss', self.loss)
        # metrics
        self.metrics = metrics.single_label(outputs['prediction'], targets, moving_average=False)
        return None
text_classification_train.py 文件源码 项目:kaggle_redefining_cancer_treatment 作者: jorgemf 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def model(self, input_text_begin, input_text_end, gene, variation, batch_size,
              vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
        # embeddings
        embeddings = _load_embeddings(vocabulary_size, embeddings_size)
        # global step
        self.global_step = training_util.get_or_create_global_step()
        self.global_step = tf.assign_add(self.global_step, 1)
        # model
        with tf.control_dependencies([self.global_step]):
            with slim.arg_scope(self.text_classification_model.model_arg_scope()):
                self.outputs = self.text_classification_model.model(input_text_begin, input_text_end,
                                                                    gene, variation, output_classes,
                                                                    embeddings=embeddings,
                                                                    batch_size=batch_size,
                                                                    training=False)
        # restore only the trainable variables
        self.saver = tf.train.Saver(var_list=tf_variables.trainable_variables())
        return self.outputs
adgm.py 文件源码 项目:TensorFlow-ADGM 作者: dancsalo 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def create_architecture(self, mode, tag=None):

        training = mode == 'TRAIN'
        testing = mode == 'TEST'

        assert tag != None

        # handle most of the regularizers here
        weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
        biases_regularizer = weights_regularizer

        # list as many types of layers as possible, even if they are not used now
        with arg_scope([slim.conv2d, slim.conv2d_in_plane,
                        slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
                       weights_regularizer=weights_regularizer,
                       biases_regularizer=biases_regularizer,
                       biases_initializer=tf.constant_initializer(0.0)):
            self.build_network()

        elbo = self.add_losses()
        self._summary_op = tf.summary.merge_all()
        return elbo
squeezenet_model.py 文件源码 项目:tensorflow_face 作者: ZhihengCV 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def squeezenet(inputs,
               num_classes=1000,
               is_training=True,
               keep_prob=0.5,
               spatial_squeeze=True,
               scope='squeeze'):
    """
    squeezenetv1.1
    """
    with tf.name_scope(scope, 'squeeze', [inputs]) as sc:
        end_points_collection = sc + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with slim.arg_scope([slim.conv2d, slim.max_pool2d,
                             slim.avg_pool2d, fire_module],
                            outputs_collections=end_points_collection):
            nets = squeezenet_inference(inputs, is_training, keep_prob)
            nets = slim.conv2d(nets, num_classes, [1, 1],
                               activation_fn=None,
                               normalizer_fn=None,
                               scope='logits')
            end_points = slim.utils.convert_collection_to_dict(end_points_collection)
            if spatial_squeeze:
                nets = tf.squeeze(nets, [1, 2], name='logits/squeezed')
            return nets, end_points
monodepth_model.py 文件源码 项目:supic 作者: Hirico 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def build_model(self):
        with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], activation_fn=tf.nn.elu):
            with tf.variable_scope('model', reuse=self.reuse_variables):

                self.left_pyramid  = self.scale_pyramid(self.left,  4)
                if self.mode == 'train':
                    self.right_pyramid = self.scale_pyramid(self.right, 4)

                if self.params.do_stereo:
                    self.model_input = tf.concat([self.left, self.right], 3)
                else:
                    self.model_input = self.left

                #build model
                if self.params.encoder == 'vgg':
                    self.build_vgg()
                elif self.params.encoder == 'resnet50':
                    self.build_resnet50()
                else:
                    return None
pascal2007_segmentation_example.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def gcn_block(inputs,
              num_class,
              kernel_size,
              scope=None):
  with tf.variable_scope(scope, 'gcn_block', [inputs]):
    with slim.arg_scope([slim.conv2d],
                        padding='SAME',
                        activation_fn=None,
                        normalizer_fn=None,
                        normalizer_params=None,
                        weights_initializer=tf.contrib.layers.xavier_initializer(),
                        weights_regularizer=tf.contrib.layers.l2_regularizer(0.0001),
                        biases_initializer=tf.zeros_initializer(),
                        biases_regularizer=tf.contrib.layers.l2_regularizer(0.0002)):
      left_conv1 = slim.conv2d(inputs, num_class, [kernel_size, 1])
      left_conv2 = slim.conv2d(left_conv1, num_class, [1, kernel_size])

      right_conv1 = slim.conv2d(inputs, num_class, [1, kernel_size])
      right_conv2 = slim.conv2d(right_conv1, num_class, [kernel_size, 1])

      result_sum = tf.add(left_conv2, right_conv2, name='gcn_module')
      return result_sum
pascal2007_segmentation_example.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def gcn_br(inputs, scope):
  with tf.variable_scope(scope, 'gcn_br', [inputs]):
    with slim.arg_scope([slim.conv2d],
                        padding='SAME',
                        activation_fn=tf.nn.relu,
                        normalizer_fn=None,
                        normalizer_params=None,
                        weights_initializer=tf.contrib.layers.xavier_initializer(),
                        weights_regularizer=tf.contrib.layers.l2_regularizer(0.0001),
                        biases_initializer=tf.zeros_initializer(),
                        biases_regularizer=tf.contrib.layers.l2_regularizer(0.0002)):
      num_class = inputs.get_shape()[3]
      conv = slim.conv2d(inputs, num_class, [3, 3])
      conv = slim.conv2d(conv, num_class, [3, 3], activation_fn=None)
      result_sum = tf.add(inputs, conv, name='fcn_br')
      return result_sum
inception_resnet_v2.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v2(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
inception_resnet_v1.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v1(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
dfc_vae_resnet.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = images

                    net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')

                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')

                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')

                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')

                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
dfc_vae.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
dfc_vae_large.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
inception_resnet_v2_original.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def inference(inputs, keep_prob,
              bottleneck_size=128,
              phase_train=True,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        'decay': 0.995,
        'epsilon': 0.001,
        'updates_collections': None,
        # 'scale': True,  # [test1]
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]}  # [test2: removed from 'trainable_variables']
    with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            biases_regularizer=slim.l2_regularizer(weight_decay),  # [test4: add weight_decay to biases]):
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        return inception_resnet_v2(
            inputs,
            is_training=phase_train,
            keep_prob=keep_prob,
            bottleneck_size=bottleneck_size,
            reuse=reuse)
inception_resnet_v2_modified.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def inference(inputs, keep_prob,
              bottleneck_size=128,
              phase_train=True,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        'decay': 0.995,
        'epsilon': 0.001,
        'updates_collections': None,
        # 'scale': True}  # [test1: add 'gamma']
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]}  # [test2: removed from 'trainable_variables']
    with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            biases_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):  # [test4: add weight_decay to biases]):
        return inception_resnet_v2(
            inputs,
            is_training=phase_train,
            keep_prob=keep_prob,
            bottleneck_size=bottleneck_size,
            reuse=reuse)
inception_resnet_v1.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def inference(inputs, keep_prob,
              bottleneck_size=128,
              phase_train=True,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        'decay': 0.995,
        'epsilon': 0.001,
        'updates_collections': None,
        # 'scale': True,  # [test1]
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]}  # [test2]
    with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        return inception_resnet_v1(inputs, is_training=phase_train, keep_prob=keep_prob,
                                   bottleneck_size=bottleneck_size, reuse=reuse)
pyramid_network.py 文件源码 项目:Master-R-CNN 作者: Mark110 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
                     activation_fn=None,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):

  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': tf.GraphKeys.UPDATE_OPS_EXTRA,
  }

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
pyramid_network.py 文件源码 项目:Master-R-CNN 作者: Mark110 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc
resnet_v1.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def resnet_arg_scope(is_training=True,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': False,
    'updates_collections': tf.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
      weights_initializer=slim.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc


问题


面经


文章

微信
公众号

扫码关注公众号