python类arg_scope()的实例源码

mobilenet_v1.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _image_to_head(self, is_training, reuse=None):
    # Base bottleneck
    assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)
    net_conv = self._image
    if cfg.MOBILENET.FIXED_LAYERS > 0:
      with slim.arg_scope(mobilenet_v1_arg_scope(is_training=False)):
        net_conv = mobilenet_v1_base(net_conv,
                                      _CONV_DEFS[:cfg.MOBILENET.FIXED_LAYERS],
                                      starting_layer=0,
                                      depth_multiplier=self._depth_multiplier,
                                      reuse=reuse,
                                      scope=self._scope)
    if cfg.MOBILENET.FIXED_LAYERS < 12:
      with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)):
        net_conv = mobilenet_v1_base(net_conv,
                                      _CONV_DEFS[cfg.MOBILENET.FIXED_LAYERS:12],
                                      starting_layer=cfg.MOBILENET.FIXED_LAYERS,
                                      depth_multiplier=self._depth_multiplier,
                                      reuse=reuse,
                                      scope=self._scope)

    self._act_summaries.append(net_conv)
    self._layers['head'] = net_conv

    return net_conv
ssd.py 文件源码 项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __arg_scope(self, weight_decay=0.0005, data_format='NHWC'):
        """Defines the VGG arg scope.

        Args:
          weight_decay: The l2 regularization coefficient.

        Returns:
          An arg_scope.
        """
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            activation_fn=tf.nn.relu,
                            weights_regularizer=slim.l2_regularizer(weight_decay),
                            weights_initializer=tf.contrib.layers.xavier_initializer(),
                            biases_initializer=tf.zeros_initializer()):
            with slim.arg_scope([slim.conv2d, slim.max_pool2d],
                                padding='SAME',
                                data_format=data_format):
                with slim.arg_scope([custom_layers.pad2d,
                                     custom_layers.l2_normalization,
                                     custom_layers.channel_to_last],
                                    data_format=data_format) as sc:
                    return sc
Generator.py 文件源码 项目:learning-tensorflow 作者: Salon-sai 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_model(input, image_size=64):
    with slim.arg_scope([slim.conv2d_transpose], kernel_size=[5, 5], stride=2,
                        activation_fn=None):
        net = linear(input, 2 * image_size * image_size, 'generator/linear_1') # output_size=2^13
        net = tf.reshape(net, [-1, image_size // 16, image_size // 16, 512], name='generator/reshape_2')
        net = BatchNorm(net, name="batch_norm_3")
        net = tf.nn.relu(net)

        net = slim.conv2d_transpose(inputs=net, num_outputs=256, padding="SAME", name="generator/deconv_4")
        net = BatchNorm(net, name="batch_norm_5")
        net = tf.nn.relu(net)

        net = slim.conv2d_transpose(inputs=net, num_outputs=128, padding="SAME", name="generator/deconv_6")
        net = BatchNorm(net, name="batch_norm_7")
        net = tf.nn.relu(net)

        net = slim.conv2d_transpose(inputs=net, num_outputs=64, padding="SAME", name="generator/deconv_8")
        net = BatchNorm(net, name="batch_norm_9")
        net = tf.nn.relu(net)

        net = slim.conv2d_transpose(inputs=net, num_outputs=3, padding="SAME", name="generator/deconv_10")
        net = tf.nn.tanh(net)
    return net
began_model.py 文件源码 项目:Awesome-GANs 作者: kozistr 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def encoder(self, x, embedding, reuse=None):
        with tf.variable_scope("encoder", reuse=reuse):
            with slim.arg_scope([slim.conv2d],
                                stride=1, activation_fn=tf.nn.elu, padding="SAME",
                                weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
                                weights_regularizer=slim.l2_regularizer(5e-4),
                                bias_initializer=tf.zeros_initializer()):
                x = slim.conv2d(x, embedding, 3)

                for i in range(self.conv_repeat_num):
                    channel_num = embedding * (i + 1)
                    x = slim.repeat(x, 2, slim.conv2d, channel_num, 3)
                    if i < self.conv_repeat_num - 1:
                        # Is using stride pooling more better method than max pooling?
                        # or average pooling
                        # x = slim.conv2d(x, channel_num, kernel_size=3, stride=2)  # sub-sampling
                        x = slim.avg_pool2d(x, kernel_size=2, stride=2)
                        # x = slim.max_pooling2d(x, 3, 2)

                x = tf.reshape(x, [-1, np.prod([8, 8, channel_num])])
        return x
began_model.py 文件源码 项目:Awesome-GANs 作者: kozistr 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def decoder(self, z, embedding, reuse=None):
        with tf.variable_scope("decoder", reuse=reuse):
            with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
                                weights_regularizer=slim.l2_regularizer(5e-4),
                                bias_initializer=tf.zeros_initializer()):
                with slim.arg_scope([slim.conv2d], padding="SAME",
                                    activation_fn=tf.nn.elu, stride=1):
                    x = slim.fully_connected(z, 8 * 8 * embedding, activation_fn=None)
                    x = tf.reshape(x, [-1, 8, 8, embedding])

                    for i in range(self.conv_repeat_num):
                        x = slim.repeat(x, 2, slim.conv2d, embedding, 3)
                        if i < self.conv_repeat_num - 1:
                            x = resize_nn(x, 2)  # NN up-sampling

                    x = slim.conv2d(x, 3, 3, activation_fn=None)
        return x
discogan_model.py 文件源码 项目:Awesome-GANs 作者: kozistr 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def discriminator(self, x, name, reuse=None):
        with tf.variable_scope(name, reuse=reuse):
            with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
                                weights_regularizer=slim.l2_regularizer(2e-4)):
                with slim.arg_scope([slim.conv2d], padding="SAME", stride=2, kernel_size=4):
                    net = slim.conv2d(x, self.df_dim)
                    net = lrelu(net)

                    mul = 2
                    for bn in self.d_bn:
                        net = slim.conv2d(net, self.df_dim * mul)
                        net = bn(net)
                        net = lrelu(net)
                        mul *= 2

                net = tf.reshape(net, shape=[-1, 2*2*512])
                net = slim.fully_connected(net, 512, activation_fn=lrelu, normalizer_fn=slim.batch_norm)
                net = slim.fully_connected(net, 1, activation_fn=tf.nn.sigmoid)
            return net  # return prob
network_in_network.py 文件源码 项目:Deep_Learning_In_Action 作者: SunnyMarkLiu 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def mlp_conv(self, x, kernel_size, stride, num_filters, micro_layer_size, name):
        """
        multi layer perceptron convolution.

        :param num_filters: number of micro_net filter
        :param micro_layer_size: [hidden_layer]
        :return:
        """
        with tf.variable_scope(name, values=[x]):
            # first convolution
            net = slim.conv2d(inputs=x, num_outputs=num_filters, kernel_size=[kernel_size, kernel_size],
                              stride=stride, scope='first_conv', padding='SAME')
            # cccp layer
            with slim.arg_scope([slim.conv2d], kernel_size=[1, 1], stride=1,
                                padding='VALID', activation_fn=tf.nn.relu):
                for hidden_i, hidden_size in enumerate(micro_layer_size):
                    net = slim.conv2d(net, hidden_size, scope='hidden_' + str(hidden_i))
        return net
deep_latent_gaussian_model.py 文件源码 项目:proximity_vi 作者: altosaar 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def likelihood(self, z, reuse=False):
    """Build likelihood p(x | z_0). """
    cfg = self.config
    n_samples = z.get_shape().as_list()[0]
    with util.get_or_create_scope('model', reuse=reuse):
      n_out = int(np.prod(cfg['train_data/shape']))
      net = z
      with slim.arg_scope(
          [slim.fully_connected],
          activation_fn=util.get_activation(cfg['p_net/activation']),
          outputs_collections=[tf.GraphKeys.ACTIVATIONS],
          variables_collections=['model'],
          weights_initializer=layers.variance_scaling_initializer(
              factor=np.square(cfg['p_net/init_w_stddev']))):
        for i in range(cfg['p_net/n_layers']):
          net = slim.fully_connected(
              net, cfg['p_net/hidden_size'], scope='fc%d' % i)
        logits = slim.fully_connected(
            net, n_out, activation_fn=None, scope='fc_lik')
    logits = tf.reshape(
        logits, [n_samples, cfg['batch_size']] + cfg['train_data/shape'])
    return dist.Bernoulli(logits=logits, validate_args=False)
simple_yolo.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True):
  with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
                      outputs_collections=[end_points_collection]):
    with slim.arg_scope([slim.conv2d],
                        normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training},
                        activation_fn=leaky_relu):
      net = slim.conv2d(inputs, 32, [3, 3], scope='conv1')
      net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
      net = slim.conv2d(net, 64, [3, 3], scope='conv2')
      net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
      net = slim.conv2d(net, 128, [3, 3], scope='conv3')
      net = slim.conv2d(net, 64, [1, 1], scope='conv4')
      box_net = net = slim.conv2d(net, 128, [3, 3], scope='conv5')

      net = slim.max_pool2d(net, [2, 2], 2, scope='pool5')
      net = slim.conv2d(net, 256, [3, 3], scope='conv6')
      net = slim.conv2d(net, 128, [1, 1], scope='conv7')
      net = slim.conv2d(net, 256, [3, 3], scope='conv8')

      box_net = _reorg(box_net, 2)
      net = tf.concat([box_net, net], 3)
      net = slim.conv2d(net, 256, [3, 3], scope='conv9')
      net = slim.conv2d(net, 75, [1, 1], activation_fn=None, scope='conv10')

  return net, end_points_collection
iclr_mnr.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True):
  with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
                      outputs_collections=[end_points_collection]):
    net = slim.conv2d(inputs, 48, [5, 5], scope='conv1')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
    net = slim.conv2d(net, 64, [5, 5], scope='conv2')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
    net = slim.conv2d(net, 128, [5, 5], scope='conv3')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool3')
    net = slim.conv2d(net, 160, [5, 5], scope='conv4')
    net = slim.conv2d(net, 192, [5, 5], scope='conv5')
    net = slim.conv2d(net, 192, [5, 5], scope='conv6')
    net = slim.conv2d(net, 192, [5, 5], scope='conv7')
    net = slim.flatten(net)

    # By removing the fc layer, we'll get much smaller model with almost the same performance
    # net = slim.fully_connected(net, 3072, scope='fc8')

  return net, end_points_collection
iclr_mnr.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def fc_layers(net,
              scope,
              end_points_collection,
              num_classes=10,
              is_training=True,
              dropout_keep_prob=0.5,
              name_prefix=None):

  def full_scope_name(scope_name):
    return scope_name if name_prefix is None else '%s_%s' % (name_prefix, scope_name)

  with slim.arg_scope([slim.fully_connected, slim.dropout],
                      outputs_collections=[end_points_collection]):
    net = slim.fully_connected(net, num_classes, activation_fn=None,
                               scope=full_scope_name('fc9'))

  return net, end_points_collection
lenet_v2.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True):
  with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
                      outputs_collections=[end_points_collection]):
    net = slim.conv2d(inputs, 32, [5, 5], scope='conv1')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
    net = slim.conv2d(net, 64, [5, 5], scope='conv2')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
    net = slim.conv2d(net, 64, [5, 5], scope='conv3')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool3')
    net = slim.conv2d(net, 64, [5, 5], scope='conv4')
    net = slim.conv2d(net, 64, [5, 5], scope='conv5')
    net = slim.conv2d(net, 64, [5, 5], scope='conv6')
    net = slim.conv2d(net, 64, [5, 5], scope='conv7')
    net = slim.flatten(net)

    net = slim.fully_connected(net, 256, scope='fc3')

  return net, end_points_collection
lenet_v1.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True):
  with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
                      outputs_collections=[end_points_collection]):
    net = slim.conv2d(inputs, 32, [5, 5], scope='conv1')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
    net = slim.conv2d(net, 64, [5, 5], scope='conv2')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
    net = slim.conv2d(net, 64, [5, 5], scope='conv3')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool3')
    net = slim.conv2d(net, 64, [5, 5], scope='conv4')
    net = slim.conv2d(net, 64, [5, 5], scope='conv5')
    net = slim.conv2d(net, 64, [5, 5], scope='conv6')
    net = slim.conv2d(net, 64, [5, 5], scope='conv7')
    net = slim.flatten(net)

    net = slim.fully_connected(net, 128, scope='fc3')

  return net, end_points_collection
lenet_v1.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fc_layers(net,
              scope,
              end_points_collection,
              num_classes=10,
              is_training=True,
              dropout_keep_prob=0.5,
              name_prefix=None):

  def full_scope_name(scope_name):
    return scope_name if name_prefix is None else '%s_%s' % (name_prefix, scope_name)

  with slim.arg_scope([slim.fully_connected, slim.dropout],
                      outputs_collections=[end_points_collection]):
    '''
    with droupout accuracy: 0.68, data: 4.2M
    without droupout accuracy: 0.71, data: 4.2M
    '''
    # net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
    #                    scope=full_scope_name('dropout3'))
    net = slim.fully_connected(net, num_classes, activation_fn=None,
                               scope=full_scope_name('fc4'))

  return net, end_points_collection
lenet.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fc_layers(net,
              scope,
              end_points_collection,
              num_classes=10,
              is_training=True,
              dropout_keep_prob=0.5,
              name_prefix=None):

  def full_scope_name(scope_name):
    return scope_name if name_prefix is None else '%s_%s' % (name_prefix, scope_name)

  with slim.arg_scope([slim.fully_connected, slim.dropout],
                      outputs_collections=[end_points_collection]):
    net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
                       scope=full_scope_name('dropout3'))
    net = slim.fully_connected(net, num_classes, activation_fn=None,
                               scope=full_scope_name('fc4'))

  return net, end_points_collection
alexnet.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fc_layers(net,
              scope,
              end_points_collection,
              num_classes=1000,
              is_training=True,
              dropout_keep_prob=0.5,
              spatial_squeeze=True,
              name_prefix=None):
  full_scope_name = lambda scope_name: scope_name if name_prefix is None else '%s_%s' % (name_prefix, scope_name)
  # Use conv2d instead of fully_connected layers.
  with slim.arg_scope([slim.conv2d],
                      weights_initializer=trunc_normal(0.005),
                      biases_initializer=tf.constant_initializer(0.1),
                      outputs_collections=[end_points_collection]):
    net = slim.conv2d(net, num_classes, [1, 1],
                      activation_fn=None,
                      normalizer_fn=None,
                      biases_initializer=tf.zeros_initializer(),
                      scope=full_scope_name('fc8'))

  if spatial_squeeze:
    net = tf.squeeze(net, [1, 2], name=full_scope_name('fc8/squeezed'))
    ops.add_to_collection(end_points_collection, net)
  return net, end_points_collection
inception_resnet_v2.py 文件源码 项目:facenet 作者: davidsandberg 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v2(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
inception_resnet_v1.py 文件源码 项目:facenet 作者: davidsandberg 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v1(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
dfc_vae.py 文件源码 项目:facenet 作者: davidsandberg 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
dfc_vae_large.py 文件源码 项目:facenet 作者: davidsandberg 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2


问题


面经


文章

微信
公众号

扫码关注公众号