python类conv2d()的实例源码

inception_resnet_v1.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def reduction_b(net):
    with tf.variable_scope('Branch_0'):
        tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
        tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
                                   padding='VALID', scope='Conv2d_1a_3x3')
    with tf.variable_scope('Branch_1'):
        tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
        tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,
                                    padding='VALID', scope='Conv2d_1a_3x3')
    with tf.variable_scope('Branch_2'):
        tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
        tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,
                                    scope='Conv2d_0b_3x3')
        tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,
                                    padding='VALID', scope='Conv2d_1a_3x3')
    with tf.variable_scope('Branch_3'):
        tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
                                     scope='MaxPool_1a_3x3')
    net = tf.concat([tower_conv_1, tower_conv1_1,
                        tower_conv2_2, tower_pool], 3)
    return net
inception_resnet_v1.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v1(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
dfc_vae_resnet.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = images

                    net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')

                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')

                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')

                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')

                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
dfc_vae_large.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
a3c_network.py 文件源码 项目:ProgressiveNeuralNetwork 作者: GoingMyWay 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __create_network(self, scope, img_shape=(80, 80)):
        with tf.variable_scope(self.task_name):
            with tf.variable_scope(scope):
                with tf.variable_scope('input_data'):
                    self.inputs = tf.placeholder(shape=[None, *img_shape, cfg.HIST_LEN], dtype=tf.float32)
                with tf.variable_scope('networks'):
                    with tf.variable_scope('conv_1'):
                        self.conv_1 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.inputs, num_outputs=32,
                                                  kernel_size=[8, 8], stride=4, padding='SAME', trainable=self.is_train)
                    with tf.variable_scope('conv_2'):
                        self.conv_2 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv_1, num_outputs=64,
                                                  kernel_size=[4, 4], stride=2, padding='SAME', trainable=self.is_train)
                    with tf.variable_scope('conv_3'):
                        self.conv_3 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv_2, num_outputs=64,
                                                  kernel_size=[3, 3], stride=1, padding='SAME', trainable=self.is_train)
                    with tf.variable_scope('f_c'):
                        self.fc = slim.fully_connected(slim.flatten(self.conv_3), 512,
                                                       activation_fn=tf.nn.elu, trainable=self.is_train)
inception_resnet_v2_original.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def block17(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 17x17 ResNet block."""
    with tf.variable_scope(scope, 'Block17', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 128, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1')
        mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1,
                         normalizer_fn=None,
                         activation_fn=None,
                         scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# Inception-ResNet-C
# (2 branches)
inception_resnet_v2_original.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def block8(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 8x8 ResNet block."""
    with tf.variable_scope(scope, 'Block8', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 192, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1')
        mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1,
                         normalizer_fn=None,
                         activation_fn=None,
                         scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# Reduce-A
# (3 branches)
inception_resnet_v2_modified.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def block17(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 17x17 ResNet block."""
    with tf.variable_scope(scope, 'Block17', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 128, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1')
        mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1,
                         normalizer_fn=None,
                         activation_fn=None,
                         scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# Inception-ResNet-C
# (2 branches)
inception_resnet_v2_modified.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def block8(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 8x8 ResNet block."""
    with tf.variable_scope(scope, 'Block8', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 192, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1')
        mixed = tf.concat(values=[tower_conv, tower_conv1_2], axis=3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1,
                         normalizer_fn=None,
                         activation_fn=None,
                         scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x
inception_resnet_v2_modified.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def inference(inputs, keep_prob,
              bottleneck_size=128,
              phase_train=True,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        'decay': 0.995,
        'epsilon': 0.001,
        'updates_collections': None,
        # 'scale': True}  # [test1: add 'gamma']
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]}  # [test2: removed from 'trainable_variables']
    with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            biases_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):  # [test4: add weight_decay to biases]):
        return inception_resnet_v2(
            inputs,
            is_training=phase_train,
            keep_prob=keep_prob,
            bottleneck_size=bottleneck_size,
            reuse=reuse)
inception_resnet_v1.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def block35(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 35x35 resx block."""
    with tf.variable_scope(scope, 'Block35', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 32, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
        with tf.variable_scope('Branch_2'):
            tower_conv2_0 = slim.conv2d(x, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
            tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
        # tensor dimension: NxWxHxC, concat at dim-c
        mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
        # output_num of up should be equal to input_num of layer
        up = slim.conv2d(mixed, x.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# Inception-ResNet-B
# (2 branches)
inception_resnet_v1.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def block8(x, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 8x8 ResNet block."""
    with tf.variable_scope(scope, 'Block8', [x], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(x, 192, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(x, 192, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3], scope='Conv2d_0b_1x3')
            tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1], scope='Conv2d_0c_3x1')
        mixed = tf.concat([tower_conv, tower_conv1_2], 3)
        up = slim.conv2d(mixed, x.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1')
        x += scale * up
        if activation_fn:
            x = activation_fn(x)
    return x


# 35x35x256 -> 17x17x896
# (3 branches)
inception_resnet_v1.py 文件源码 项目:tf_face 作者: ZhijianChan 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def inference(inputs, keep_prob,
              bottleneck_size=128,
              phase_train=True,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        'decay': 0.995,
        'epsilon': 0.001,
        'updates_collections': None,
        # 'scale': True,  # [test1]
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]}  # [test2]
    with slim.arg_scope(
            [slim.conv2d, slim.fully_connected],
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        return inception_resnet_v1(inputs, is_training=phase_train, keep_prob=keep_prob,
                                   bottleneck_size=bottleneck_size, reuse=reuse)
pyramid_network.py 文件源码 项目:Master-R-CNN 作者: Mark110 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
                     activation_fn=None,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):

  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': tf.GraphKeys.UPDATE_OPS_EXTRA,
  }

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
pyramid_network.py 文件源码 项目:Master-R-CNN 作者: Mark110 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc
resnet_v1.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def resnet_arg_scope(is_training=True,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': False,
    'updates_collections': tf.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
      weights_initializer=slim.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc
vgg16.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _image_to_head(self, is_training, reuse=None):
    with tf.variable_scope(self._scope, self._scope, reuse=reuse):
      net = slim.repeat(self._image, 2, slim.conv2d, 64, [3, 3],
                          trainable=False, scope='conv1')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool1')
      net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3],
                        trainable=False, scope='conv2')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool2')
      net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],
                        trainable=is_training, scope='conv3')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
                        trainable=is_training, scope='conv4')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
                        trainable=is_training, scope='conv5')

    self._act_summaries.append(net)
    self._layers['head'] = net

    return net
agent.py 文件源码 项目:DQN 作者: pekaalto 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_network(self, input, trainable):
        if trainable:
            wr = slim.l2_regularizer(self.regularization)
        else:
            wr = None

        # the input is stack of black and white frames.
        # put the stack in the place of channel (last in tf)
        input_t = tf.transpose(input, [0, 2, 3, 1])

        net = slim.conv2d(input_t, 8, (7, 7), data_format="NHWC",
            activation_fn=tf.nn.relu, stride=3, weights_regularizer=wr, trainable=trainable)
        net = slim.max_pool2d(net, 2, 2)
        net = slim.conv2d(net, 16, (3, 3), data_format="NHWC",
            activation_fn=tf.nn.relu, weights_regularizer=wr, trainable=trainable)
        net = slim.max_pool2d(net, 2, 2)
        net = slim.flatten(net)
        net = slim.fully_connected(net, 256, activation_fn=tf.nn.relu,
            weights_regularizer=wr, trainable=trainable)
        q_state_action_values = slim.fully_connected(net, self.dim_actions,
            activation_fn=None, weights_regularizer=wr, trainable=trainable)

        return q_state_action_values
ssd.py 文件源码 项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __arg_scope(self, weight_decay=0.0005, data_format='NHWC'):
        """Defines the VGG arg scope.

        Args:
          weight_decay: The l2 regularization coefficient.

        Returns:
          An arg_scope.
        """
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            activation_fn=tf.nn.relu,
                            weights_regularizer=slim.l2_regularizer(weight_decay),
                            weights_initializer=tf.contrib.layers.xavier_initializer(),
                            biases_initializer=tf.zeros_initializer()):
            with slim.arg_scope([slim.conv2d, slim.max_pool2d],
                                padding='SAME',
                                data_format=data_format):
                with slim.arg_scope([custom_layers.pad2d,
                                     custom_layers.l2_normalization,
                                     custom_layers.channel_to_last],
                                    data_format=data_format) as sc:
                    return sc
network.py 文件源码 项目:Neural-EM 作者: sjoerdvansteenkiste 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        output, res_state = self._cell(inputs, state)

        projected = None
        with tf.variable_scope((scope or self._name)):
            if self._spec['name'] == 'fc':
                projected = slim.fully_connected(output, self._spec['size'], activation_fn=None)
            elif self._spec['name'] == 't_conv':
                projected = slim.layers.conv2d_transpose(output, self._spec['size'], self._spec['kernel'], self._spec['stride'], activation_fn=None)
            elif self._spec['name'] == 'r_conv':
                resized = tf.image.resize_images(output, (self._spec['stride'][0] * output.get_shape()[1].value,
                                                          self._spec['stride'][1] * output.get_shape()[2].value), method=1)
                projected = slim.layers.conv2d(resized, self._spec['size'], self._spec['kernel'], activation_fn=None)
            else:
                raise ValueError('Unknown layer name "{}"'.format(self._spec['name']))

        return projected, res_state


问题


面经


文章

微信
公众号

扫码关注公众号