python类conv2d()的实例源码

autoencoder.py 文件源码 项目:TensorFlow-World 作者: astorfi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def autoencoder(inputs):
    # encoder
    # 32 x 32 x 1   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  8 x 8 x 16
    # 8 x 8 x 16    ->  2 x 2 x 8
    net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
    # decoder
    # 2 x 2 x 8    ->  8 x 8 x 16
    # 8 x 8 x 16   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  32 x 32 x 1
    net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
    return net

# read MNIST dataset
util.py 文件源码 项目:predictron 作者: zhongwen 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def predictron_arg_scope(weight_decay=0.0001,
                         batch_norm_decay=0.997,
                         batch_norm_epsilon=1e-5,
                         batch_norm_scale=True):
  batch_norm_params = {
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'updates_collections': tf.GraphKeys.UPDATE_OPS,
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=None,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _shortcut(inputs, x): # x = f(inputs)
    # shortcut path
    _, inputs_h, inputs_w, inputs_ch = inputs.shape.as_list()
    _, x_h, x_w, x_ch = x.shape.as_list()
    stride_h = int(round(inputs_h / x_h))
    stride_w = int(round(inputs_w / x_w))
    equal_ch = inputs_ch == x_ch

    if stride_h>1 or stride_w>1 or not equal_ch:
        shortcut = tcl.conv2d(inputs,
                              num_outputs = x_ch,
                              kernel_size = (1, 1),
                              stride = (stride_h, stride_w),
                              padding = 'VALID')
    else:
        shortcut = inputs

    merged = tf.add(shortcut, x)
    return merged
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            x1, down1 = down_block(self.block_fn, 64)(inputs)
            x2, down2 = down_block(self.block_fn, 128)(down1)
            x3, down3 = down_block(self.block_fn, 256)(down2)

            down3 = self.block_fn(512)(down3)

            up3 = up_block(self.block_fn, 256)(x3, down3)
            up2 = up_block(self.block_fn, 128)(x2, up3)
            up1 = up_block(self.block_fn, 64)(x1, up2)

            outputs = tcl.conv2d(up1,
                                 num_outputs = self.output_ch,
                                 kernel_size = (1, 1),
                                 stride = (1, 1),
                                 padding = 'SAME')

            return outputs
components.py 文件源码 项目:decorrelated-adversarial-autoencoder 作者: patrickgadd 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def semi_supervised_encoder_convolutional(input_tensor, z_dim, y_dim, batch_size, network_scale=1.0, img_res=28, img_channels=1):
    f_multiplier = network_scale

    net = tf.reshape(input_tensor, [-1, img_res, img_res, img_channels])

    net = layers.conv2d(net, int(16*f_multiplier), 3, stride=2)
    net = layers.conv2d(net, int(16*f_multiplier), 3, stride=1)
    net = layers.conv2d(net, int(32*f_multiplier), 3, stride=2)
    net = layers.conv2d(net, int(32*f_multiplier), 3, stride=1)
    net = layers.conv2d(net, int(64*f_multiplier), 3, stride=2)
    net = layers.conv2d(net, int(64*f_multiplier), 3, stride=1)
    net = layers.conv2d(net, int(128*f_multiplier), 3, stride=2)

    net = tf.reshape(net, [batch_size, -1])
    net = layers.fully_connected(net, 1000)

    y = layers.fully_connected(net, y_dim, activation_fn=None, normalizer_fn=None)

    z = layers.fully_connected(net, z_dim, activation_fn=None)

    return y, z
vae_conv.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def q_net(x, n_xl, n_z, n_particles, is_training):
    with zs.BayesianNet() as variational:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        lz_x = tf.reshape(tf.to_float(x), [-1, n_xl, n_xl, 1])
        lz_x = layers.conv2d(
            lz_x, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 64, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 128, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.dropout(lz_x, keep_prob=0.9, is_training=is_training)
        lz_x = tf.reshape(lz_x, [-1, 128 * 3 * 3])
        lz_mean = layers.fully_connected(lz_x, n_z, activation_fn=None)
        lz_logstd = layers.fully_connected(lz_x, n_z, activation_fn=None)
        z = zs.Normal('z', lz_mean, logstd=lz_logstd, n_samples=n_particles,
                      group_ndims=1)
    return variational
vgg.py 文件源码 项目:canshi 作者: hungsing92 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc
async_dqn.py 文件源码 项目:chi 作者: rmst 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
dqn_m.py 文件源码 项目:chi 作者: rmst 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
dqn.py 文件源码 项目:reinforceflow 作者: dbobrenko 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def make_dqn_body(input_layer, trainable=True):
    end_points = {}
    net = layers.conv2d(inputs=input_layer,
                        num_outputs=16,
                        kernel_size=[8, 8],
                        stride=[4, 4],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv1",
                        trainable=trainable)
    end_points['conv1'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=32,
                        kernel_size=[4, 4],
                        stride=[2, 2],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv2",
                        trainable=trainable)
    end_points['conv2'] = net
    out = layers.flatten(net)
    end_points['conv2_flatten'] = out
    return out, end_points
cluttered.py 文件源码 项目:information-dropout 作者: ucla-vision 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def conv(self, inputs, num_outputs, activations, normalizer_fn = batch_norm, kernel_size=3, stride=1, scope=None):
        '''Creates a convolutional layer with default arguments'''
        if activations == 'relu':
            activation_fn = tf.nn.relu
        elif activations == 'softplus':
            activation_fn = tf.nn.softplus
        else:
            raise ValueError("Invalid activation function.")
        return conv2d( inputs = inputs,
            num_outputs = num_outputs,
            kernel_size = kernel_size,
            stride = stride,
            padding = 'SAME',
            activation_fn = activation_fn,
            normalizer_fn = batch_norm,
            scope=scope )
cluttered.py 文件源码 项目:information-dropout 作者: ucla-vision 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def information_pool(self, inputs, max_alpha, alpha_mode, lognorm_prior, num_outputs=None, stride=2, scope=None):
        if num_outputs is None:
            num_ouputs = inputs.get_shape()[-1]
        # Creates the output convolutional layer
        network = self.conv(inputs, num_outputs=int(num_outputs), stride=stride)
        with tf.variable_scope(scope,'information_dropout'):
            # Computes the noise parameter alpha for the output
            alpha = conv2d(inputs, num_outputs=int(num_outputs), kernel_size=3,
                stride=stride, activation_fn=tf.sigmoid, scope='alpha')
            # Rescale alpha in the allowed range and add a small value for numerical stability
            alpha = 0.001 + max_alpha * alpha
            # Computes the KL divergence using either log-uniform or log-normal prior
            if not lognorm_prior:
                kl = - tf.log(alpha/(max_alpha + 0.001))
            else:
                mu1 = tf.get_variable('mu1', [], initializer=tf.constant_initializer(0.))
                sigma1 = tf.get_variable('sigma1', [], initializer=tf.constant_initializer(1.))
                kl = KL_div2(tf.log(tf.maximum(network,1e-4)), alpha, mu1, sigma1)
            tf.add_to_collection('kl_terms', kl)
        # Samples the noise with the given parameter
        e = sample_lognormal(mean=tf.zeros_like(network), sigma = alpha, sigma0 = self.sigma0)
        # Returns the noisy output of the dropout
        return network * e
cifar.py 文件源码 项目:information-dropout 作者: ucla-vision 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def conv(self, inputs, num_outputs, activations, normalizer_fn = batch_norm, kernel_size=3, stride=1, scope=None):
        '''Creates a convolutional layer with default arguments'''
        if activations == 'relu':
            activation_fn = tf.nn.relu
        elif activations == 'softplus':
            activation_fn = tf.nn.softplus
        else:
            raise ValueError("Invalid activation function.")
        return conv2d( inputs = inputs,
            num_outputs = num_outputs,
            kernel_size = kernel_size,
            stride = stride,
            padding = 'SAME',
            activation_fn = activation_fn,
            normalizer_fn = normalizer_fn,
            normalizer_params = {'is_training' : self.is_training, 'updates_collections': None, 'decay': 0.9},
            scope=scope )
cifar.py 文件源码 项目:information-dropout 作者: ucla-vision 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def information_pool(self, inputs, max_alpha, alpha_mode, lognorm_prior, num_outputs=None, stride=2, scope=None):
        if num_outputs is None:
            num_ouputs = inputs.get_shape()[-1]
        # Creates the output convolutional layer
        network = self.conv(inputs, num_outputs=int(num_outputs), stride=stride)
        with tf.variable_scope(scope,'information_dropout'):
            # Computes the noise parameter alpha for the output
            alpha = conv2d(inputs, num_outputs=int(num_outputs), kernel_size=3,
                stride=stride, activation_fn=tf.sigmoid, scope='alpha')
            # Rescale alpha in the allowed range and add a small value for numerical stability
            alpha = 0.001 + max_alpha * alpha
            # Computes the KL divergence using either log-uniform or log-normal prior
            if not lognorm_prior:
                kl = - tf.log(alpha/(max_alpha + 0.001))
            else:
                mu1 = tf.get_variable('mu1', [], initializer=tf.constant_initializer(0.))
                sigma1 = tf.get_variable('sigma1', [], initializer=tf.constant_initializer(1.))
                kl = KL_div2(tf.log(tf.maximum(network,1e-4)), alpha, mu1, sigma1)
            tf.add_to_collection('kl_terms', kl)
        # Samples the noise with the given parameter
        e = sample_lognormal(mean=tf.zeros_like(network), sigma = alpha, sigma0 = self.sigma0)
        # Saves the log-output of the network (useful to compute the total correlation)
        tf.add_to_collection('log_network', tf.log(network * e))
        # Returns the noisy output of the dropout
        return network * e
ops.py 文件源码 项目:Mendelssohn 作者: diggerdu 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def conv2d(input_, o_dim, k_size, st, name='conv2d'):
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_dim, kernel_size=k_size, stride=st,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        fil = tf.get_variable('co_f', k_size+\
                [ten_sh(input_)[-1], o_dim],initializer=init)
        co = tf.nn.conv2d(input_, fil, strides=[1]+st+[1], \
                padding='SAME')
        bia = tf.get_variable('co_b', [o_dim])
        co = tf.nn.bias_add(co, bia)
        return co
bn_ops.py 文件源码 项目:Mendelssohn 作者: diggerdu 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def conv2d(input_, o_dim, k_size, st, name='conv2d'):
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_dim, kernel_size=k_size, stride=st,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
    '''
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        fil = tf.get_variable('co_f', k_size+\
                [ten_sh(input_)[-1], o_dim],initializer=init)
        co = tf.nn.conv2d(input_, fil, strides=[1]+st+[1], \
                padding='SAME')
        bia = tf.get_variable('co_b', [o_dim])
        co = tf.nn.bias_add(co, bia)
        return co
    '''
cnn_encoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def image_processing_layers(self) -> List[tf.Tensor]:
        """Do all convolutions and return the last conditional map.

        Applies convolutions on the input tensor with optional max pooling.
        All the intermediate layers are stored in the `image_processing_layers`
        attribute.  There is not dropout between the convolutional layers, by
        default the activation function is ReLU.
        """
        last_layer = self.image_input
        image_processing_layers = []  # type: List[tf.Tensor]

        with tf.variable_scope("convolutions"):
            for i, (filter_size,
                    n_filters,
                    pool_size) in enumerate(self.convolutions):
                with tf.variable_scope("cnn_layer_{}".format(i)):
                    last_layer = conv2d(last_layer, n_filters, filter_size)
                    image_processing_layers.append(last_layer)

                    if pool_size:
                        last_layer = max_pool2d(last_layer, pool_size)
                        image_processing_layers.append(last_layer)

        return image_processing_layers
cnn_encoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def image_processing_layers(self) -> List[tf.Tensor]:
        """Do all convolutions and return the last conditional map.

        Applies convolutions on the input tensor with optional max pooling.
        All the intermediate layers are stored in the `image_processing_layers`
        attribute.  There is not dropout between the convolutional layers, by
        default the activation function is ReLU.
        """
        last_layer = self.image_input
        image_processing_layers = []  # type: List[tf.Tensor]

        with tf.variable_scope("convolutions"):
            for i, (filter_size,
                    n_filters,
                    pool_size) in enumerate(self.convolutions):
                with tf.variable_scope("cnn_layer_{}".format(i)):
                    last_layer = conv2d(last_layer, n_filters, filter_size)
                    image_processing_layers.append(last_layer)

                    if pool_size:
                        last_layer = max_pool2d(last_layer, pool_size)
                        image_processing_layers.append(last_layer)

        return image_processing_layers
cnn_encoder.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def image_processing_layers(self) -> List[tf.Tensor]:
        """Do all convolutions and return the last conditional map.

        Applies convolutions on the input tensor with optional max pooling.
        All the intermediate layers are stored in the `image_processing_layers`
        attribute.  There is not dropout between the convolutional layers, by
        default the activation function is ReLU.
        """
        last_layer = self.image_input
        image_processing_layers = []  # type: List[tf.Tensor]

        with tf.variable_scope("convolutions"):
            for i, (filter_size,
                    n_filters,
                    pool_size) in enumerate(self.convolutions):
                with tf.variable_scope("cnn_layer_{}".format(i)):
                    last_layer = conv2d(last_layer, n_filters, filter_size)
                    image_processing_layers.append(last_layer)

                    if pool_size:
                        last_layer = max_pool2d(last_layer, pool_size)
                        image_processing_layers.append(last_layer)

        return image_processing_layers
nets.py 文件源码 项目:GAN_Theories 作者: YadiraF 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.fully_connected(d, 1, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return d
nets.py 文件源码 项目:GAN_Theories 作者: YadiraF 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.fully_connected(tcl.flatten(d), 256, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))
            mu = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))
            sigma = tcl.fully_connected(d, 100, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))

            return mu, sigma
googlenet_model.py 文件源码 项目:googlenet 作者: da-steve101 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_inception_layer( inputs, conv11_size, conv33_11_size, conv33_size,
                         conv55_11_size, conv55_size, pool11_size ):
    with tf.variable_scope("conv_1x1"):
        conv11 = layers.conv2d( inputs, conv11_size, [ 1, 1 ] )
    with tf.variable_scope("conv_3x3"):
        conv33_11 = layers.conv2d( inputs, conv33_11_size, [ 1, 1 ] )
        conv33 = layers.conv2d( conv33_11, conv33_size, [ 3, 3 ] )
    with tf.variable_scope("conv_5x5"):
        conv55_11 = layers.conv2d( inputs, conv55_11_size, [ 1, 1 ] )
        conv55 = layers.conv2d( conv55_11, conv55_size, [ 5, 5 ] )
    with tf.variable_scope("pool_proj"):
        pool_proj = layers.max_pool2d( inputs, [ 3, 3 ], stride = 1 )
        pool11 = layers.conv2d( pool_proj, pool11_size, [ 1, 1 ] )
    if tf.__version__ == '0.11.0rc0':
        return tf.concat(3, [conv11, conv33, conv55, pool11])
    return tf.concat([conv11, conv33, conv55, pool11], 3)
googlenet_model.py 文件源码 项目:googlenet 作者: da-steve101 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def aux_logit_layer( inputs, num_classes, is_training ):
    with tf.variable_scope("pool2d"):
        pooled = layers.avg_pool2d(inputs, [ 5, 5 ], stride = 3 )
    with tf.variable_scope("conv11"):
        conv11 = layers.conv2d( pooled, 128, [1, 1] )
    with tf.variable_scope("flatten"):
        flat = tf.reshape( conv11, [-1, 2048] )
    with tf.variable_scope("fc"):
        fc = layers.fully_connected( flat, 1024, activation_fn=None )
    with tf.variable_scope("drop"):
        drop = layers.dropout( fc, 0.3, is_training = is_training )
    with tf.variable_scope( "linear" ):
        linear = layers.fully_connected( drop, num_classes, activation_fn=None )
    with tf.variable_scope("soft"):
        soft = tf.nn.softmax( linear )
    return soft
model_lang.py 文件源码 项目:GAN-general 作者: weilinie 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def generatorResNet(z, hidden_num, output_dim, kern_size, out_channels):
    with tf.variable_scope("G") as vs:
        fc = tcl.fully_connected(z, hidden_num*output_dim, activation_fn=None)
        fc = tf.reshape(fc, [-1, output_dim, hidden_num]) # data_format: 'NWC'

        res1 = resBlock(fc, hidden_num, kern_size)
        res2 = resBlock(res1, hidden_num, kern_size)
        res3 = resBlock(res2, hidden_num, kern_size)
        res4 = resBlock(res3, hidden_num, kern_size)
        res5 = resBlock(res4, hidden_num, kern_size)

        logits = tcl.conv2d(res5, out_channels, kernel_size=1)
        fake_data_softmax = tf.reshape(
            tf.nn.softmax(tf.reshape(logits, [-1, out_channels])),
            tf.shape(logits)
        )

    g_vars = tf.contrib.framework.get_variables(vs)
    return fake_data_softmax, g_vars
model_lang.py 文件源码 项目:GAN-general 作者: weilinie 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def discriminatorResNet(x, hidden_num, output_dim, kern_size, in_channels, reuse):
    with tf.variable_scope("D") as vs:
        if reuse:
            vs.reuse_variables()
        conv = tcl.conv2d(x, hidden_num, kernel_size=1)

        res1 = resBlock(conv, hidden_num, kern_size)
        res2 = resBlock(res1, hidden_num, kern_size)
        res3 = resBlock(res2, hidden_num, kern_size)
        res4 = resBlock(res3, hidden_num, kern_size)
        res5 = resBlock(res4, hidden_num, kern_size)

        res5 = tf.reshape(res5, [-1, output_dim*hidden_num])  # data_format: 'NWC'
        disc_out = tcl.fully_connected(res5, 1, activation_fn=None)

    d_vars = tf.contrib.framework.get_variables(vs)
    return disc_out, d_vars
build_resnet.py 文件源码 项目:tensorflow-litterbox 作者: rwightman 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def resnet_arg_scope(
        weight_decay=0.0001,
        batch_norm_decay=0.997,
        batch_norm_epsilon=1e-5,
        batch_norm_scale=True,
):
    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
    }
    l2_regularizer = layers.l2_regularizer(weight_decay)

    arg_scope_layers = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d, layers.fully_connected],
        weights_initializer=layers.variance_scaling_initializer(),
        weights_regularizer=l2_regularizer,
        activation_fn=tf.nn.relu)
    arg_scope_conv = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d],
        normalizer_fn=layers.batch_norm,
        normalizer_params=batch_norm_params)
    with arg_scope_layers, arg_scope_conv as arg_sc:
        return arg_sc
build_inception_v4.py 文件源码 项目:tensorflow-litterbox 作者: rwightman 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def _block_a(net, scope='BlockA'):
    # 35 x 35 x 384 grid
    # default padding = SAME
    # default stride = 1
    with tf.variable_scope(scope):
        with tf.variable_scope('Br1_Pool'):
            br1 = layers.avg_pool2d(net, [3, 3], scope='Pool1_3x3')
            br1 = layers.conv2d(br1, 96, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br2_1x1'):
            br2 = layers.conv2d(net, 96, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br3_3x3'):
            br3 = layers.conv2d(net, 64, [1, 1], scope='Conv1_1x1')
            br3 = layers.conv2d(br3, 96, [3, 3], scope='Conv2_3x3')
        with tf.variable_scope('Br4_3x3Dbl'):
            br4 = layers.conv2d(net, 64, [1, 1], scope='Conv1_1x1')
            br4 = layers.conv2d(br4, 96, [3, 3], scope='Conv2_3x3')
            br4 = layers.conv2d(br4, 96, [3, 3], scope='Conv3_3x3')
        net = tf.concat(3, [br1, br2, br3, br4], name='Concat1')
        # 35 x 35 x 384
    return net
build_inception_v4.py 文件源码 项目:tensorflow-litterbox 作者: rwightman 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _block_b(net, scope='BlockB'):
    # 17 x 17 x 1024 grid
    # default padding = SAME
    # default stride = 1
    with tf.variable_scope(scope):
        with tf.variable_scope('Br1_Pool'):
            br1 = layers.avg_pool2d(net, [3, 3], scope='Pool1_3x3')
            br1 = layers.conv2d(br1, 128, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br2_1x1'):
            br2 = layers.conv2d(net, 384, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br3_7x7'):
            br3 = layers.conv2d(net, 192, [1, 1], scope='Conv1_1x1')
            br3 = layers.conv2d(br3, 224, [1, 7], scope='Conv2_1x7')
            br3 = layers.conv2d(br3, 256, [7, 1], scope='Conv3_7x1')
        with tf.variable_scope('Br4_7x7Dbl'):
            br4 = layers.conv2d(net, 192, [1, 1], scope='Conv1_1x1')
            br4 = layers.conv2d(br4, 192, [1, 7], scope='Conv2_1x7')
            br4 = layers.conv2d(br4, 224, [7, 1], scope='Conv3_7x1')
            br4 = layers.conv2d(br4, 224, [1, 7], scope='Conv4_1x7')
            br4 = layers.conv2d(br4, 256, [7, 1], scope='Conv5_7x1')
        net = tf.concat(3, [br1, br2, br3, br4], name='Concat1')
        # 17 x 17 x 1024
    return net
build_inception_v4.py 文件源码 项目:tensorflow-litterbox 作者: rwightman 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _block_b_reduce(net, endpoints, scope='BlockReduceB'):
    # 17 x 17 -> 8 x 8 reduce
    with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
        with tf.variable_scope(scope):
            with tf.variable_scope('Br1_Pool'):
                br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
            with tf.variable_scope('Br2_3x3'):
                br2 = layers.conv2d(net, 192, [1, 1], padding='SAME', scope='Conv1_1x1')
                br2 = layers.conv2d(br2, 192, [3, 3], stride=2, scope='Conv2_3x3/2')
            with tf.variable_scope('Br3_7x7x3'):
                br3 = layers.conv2d(net, 256, [1, 1], padding='SAME', scope='Conv1_1x1')
                br3 = layers.conv2d(br3, 256, [1, 7], padding='SAME', scope='Conv2_1x7')
                br3 = layers.conv2d(br3, 320, [7, 1], padding='SAME', scope='Conv3_7x1')
                br3 = layers.conv2d(br3, 320, [3, 3], stride=2, scope='Conv4_3x3/2')
            net = tf.concat(3, [br1, br2, br3], name='Concat1')
            endpoints[scope] = net
            print('%s output shape: %s' % (scope, net.get_shape()))
    return net
build_inception_v4.py 文件源码 项目:tensorflow-litterbox 作者: rwightman 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _block_c(net, scope='BlockC'):
    # 8 x 8 x 1536 grid
    # default padding = SAME
    # default stride = 1
    with tf.variable_scope(scope):
        with tf.variable_scope('Br1_Pool'):
            br1 = layers.avg_pool2d(net, [3, 3], scope='Pool1_3x3')
            br1 = layers.conv2d(br1, 256, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br2_1x1'):
            br2 = layers.conv2d(net, 256, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br3_3x3'):
            br3 = layers.conv2d(net, 384, [1, 1], scope='Conv1_1x1')
            br3a = layers.conv2d(br3, 256, [1, 3], scope='Conv2_1x3')
            br3b = layers.conv2d(br3, 256, [3, 1], scope='Conv3_3x1')
        with tf.variable_scope('Br4_7x7Dbl'):
            br4 = layers.conv2d(net, 384, [1, 1], scope='Conv1_1x1')
            br4 = layers.conv2d(br4, 448, [1, 7], scope='Conv2_1x7')
            br4 = layers.conv2d(br4, 512, [7, 1], scope='Conv3_7x1')
            br4a = layers.conv2d(br4, 256, [1, 7], scope='Conv4a_1x7')
            br4b = layers.conv2d(br4, 256, [7, 1], scope='Conv4b_7x1')
        net = tf.concat(3, [br1, br2, br3a, br3b, br4a, br4b], name='Concat1')
        # 8 x 8 x 1536
    return net


问题


面经


文章

微信
公众号

扫码关注公众号