python类flatten()的实例源码

base_network.py 文件源码 项目:cartpoleplusplus 作者: matpalm 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def input_state_network(self, input_state, opts):
    # TODO: use in lrpg and ddpg too
    if opts.use_raw_pixels:
      input_state = self.simple_conv_net_on(input_state, opts)
    flattened_input_state = slim.flatten(input_state, scope='flat')
    return self.hidden_layers_starting_at(flattened_input_state, opts.hidden_layers, opts)
predictron.py 文件源码 项目:predictron 作者: zhongwen 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def iter_func(self, state):
    sc = predictron_arg_scope()

    with tf.variable_scope('value'):
      value_net = slim.fully_connected(slim.flatten(state), 32, scope='fc0')
      value_net = layers.batch_norm(value_net, activation_fn=tf.nn.relu, scope='fc0/preact')
      value_net = slim.fully_connected(value_net, self.maze_size, activation_fn=None, scope='fc1')

    with slim.arg_scope(sc):
      net = slim.conv2d(state, 32, [3, 3], scope='conv1')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv1/preact')
      net_flatten = slim.flatten(net, scope='conv1/flatten')

      with tf.variable_scope('reward'):
        reward_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        reward_net = layers.batch_norm(reward_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        reward_net = slim.fully_connected(reward_net, self.maze_size, activation_fn=None, scope='fc1')

      with tf.variable_scope('gamma'):
        gamma_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        gamma_net = layers.batch_norm(gamma_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        gamma_net = slim.fully_connected(gamma_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')

      with tf.variable_scope('lambda'):
        lambda_net = slim.fully_connected(net_flatten, 32, scope='fc0')
        lambda_net = layers.batch_norm(lambda_net, activation_fn=tf.nn.relu, scope='fc0/preact')
        lambda_net = slim.fully_connected(lambda_net, self.maze_size, activation_fn=tf.nn.sigmoid, scope='fc1')

      net = slim.conv2d(net, 32, [3, 3], scope='conv2')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv2/preact')

      net = slim.conv2d(net, 32, [3, 3], scope='conv3')
      net = layers.batch_norm(net, activation_fn=tf.nn.relu, scope='conv3/preact')
    return net, reward_net, gamma_net, lambda_net, value_net
vgg16.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _head_to_tail(self, pool5, is_training, reuse=None):
    with tf.variable_scope(self._scope, self._scope, reuse=reuse):
      pool5_flat = slim.flatten(pool5, scope='flatten')
      fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
      if is_training:
        fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, 
                            scope='dropout6')
      fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
      if is_training:
        fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, 
                            scope='dropout7')

    return fc7
data_generator.py 文件源码 项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def my_cnn(images, num_classes, is_training):  # is_training is not used...
    with slim.arg_scope([slim.max_pool2d], kernel_size=[3, 3], stride=2):
        net = slim.conv2d(images, 64, [5, 5])
        net = slim.max_pool2d(net)
        net = slim.conv2d(net, 64, [5, 5])
        net = slim.max_pool2d(net)
        net = slim.flatten(net)
        net = slim.fully_connected(net, 192)
        net = slim.fully_connected(net, num_classes, activation_fn=None)       
    return net
network.py 文件源码 项目:Neural-EM 作者: sjoerdvansteenkiste 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, cell, shape='flatten', apply_to='output'):
        self._cell = cell
        self._shape = shape
        self._apply_to = apply_to
lenet.py 文件源码 项目:LeNet 作者: ganyc717 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def construct_net(self,is_trained = True):
        with slim.arg_scope([slim.conv2d], padding='VALID',
                            weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                            weights_regularizer=slim.l2_regularizer(0.0005)):
            net = slim.conv2d(self.input_images,6,[5,5],1,padding='SAME',scope='conv1')
            net = slim.max_pool2d(net, [2, 2], scope='pool2')
            net = slim.conv2d(net,16,[5,5],1,scope='conv3')
            net = slim.max_pool2d(net, [2, 2], scope='pool4')
            net = slim.conv2d(net,120,[5,5],1,scope='conv5')
            net = slim.flatten(net, scope='flat6')
            net = slim.fully_connected(net, 84, scope='fc7')
            net = slim.dropout(net, self.dropout,is_training=is_trained, scope='dropout8')
            digits = slim.fully_connected(net, 10, scope='fc9')
        return digits
lenet.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True):
  with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
                      outputs_collections=[end_points_collection]):
    net = slim.conv2d(inputs, 32, [5, 5], scope='conv1')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
    net = slim.conv2d(net, 64, [5, 5], scope='conv2')
    net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
    net = slim.flatten(net)

    net = slim.fully_connected(net, 1024, scope='fc3')

  return net, end_points_collection
dfc_vae_resnet.py 文件源码 项目:facenet 作者: davidsandberg 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = images

                    net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b')

                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b')

                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b')

                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a')
                    net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b')

                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2
vgg16.py 文件源码 项目:nexar-2 作者: lbin 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _head_to_tail(self, pool5, is_training, reuse=False):
    with tf.variable_scope(self._scope, self._scope, reuse=reuse):
      pool5_flat = slim.flatten(pool5, scope='flatten')
      fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
      if is_training:
        fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, 
                            scope='dropout6')
      fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
      if is_training:
        fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, 
                            scope='dropout7')

    return fc7
vaegan.py 文件源码 项目:tf-vaegan 作者: JeremyCCHsu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _encoder(self, x, is_training):
        n_layer = len(self.arch['encoder']['output'])
        subnet = self.arch['encoder']
        with slim.arg_scope(
                [slim.batch_norm],
                scale=True,
                updates_collections=None,
                decay=0.9, epsilon=1e-5,
                is_training=is_training,
                reuse=None):
            with slim.arg_scope(
                    [slim.conv2d],
                    weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
                    normalizer_fn=slim.batch_norm,
                    activation_fn=lrelu):

                for i in range(n_layer):
                    x = slim.conv2d(
                        x,
                        subnet['output'][i],
                        subnet['kernel'][i],
                        subnet['stride'][i])

        x = slim.flatten(x)

        with slim.arg_scope(
            [slim.fully_connected],
            num_outputs=self.arch['z_dim'],
            weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
            normalizer_fn=None,
            activation_fn=None):
            z_mu = slim.fully_connected(x)
            z_lv = slim.fully_connected(x)
        return z_mu, z_lv
cvae.py 文件源码 项目:Gumbel-Softmax-VAE-in-tensorflow 作者: JeremyCCHsu 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _classifier(self, x, is_training):
        n_layer = len(self.arch['classifier']['output'])
        subnet = self.arch['classifier']

        with slim.arg_scope(
            [slim.batch_norm],
            scale=True, scope='BN',
            updates_collections=None,
            # decay=0.9, epsilon=1e-5,  # [TODO] Test these hyper-parameters
            is_training=is_training):
            with slim.arg_scope(
                [slim.conv2d],
                weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
                normalizer_fn=slim.batch_norm,
                activation_fn=lrelu):

                for i in range(n_layer):
                    x = slim.conv2d(
                        x,
                        subnet['output'][i],
                        subnet['kernel'][i],
                        subnet['stride'][i])
                    tf.summary.image(
                        'down-sample{:d}'.format(i),
                        tf.transpose(x[:, :, :, 0:3], [2, 1, 0, 3]))

        x = slim.flatten(x)

        with slim.arg_scope(
            [slim.fully_connected],
            num_outputs=self.arch['y_dim'],
            weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
            normalizer_fn=None,
            activation_fn=None):
            y_logit = slim.fully_connected(x)
            # z_mu = slim.fully_connected(x)
            # z_lv = slim.fully_connected(x)
        # return z_mu, z_lv
        return y_logit
gvae.py 文件源码 项目:Gumbel-Softmax-VAE-in-tensorflow 作者: JeremyCCHsu 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _classifier(self, x, is_training):
        n_layer = len(self.arch['classifier']['output'])
        subnet = self.arch['classifier']

        with slim.arg_scope(
            [slim.batch_norm],
            scale=True, scope='BN',
            updates_collections=None,
            # decay=0.9, epsilon=1e-5,  # [TODO] Test these hyper-parameters
            is_training=is_training):
            with slim.arg_scope(
                [slim.conv2d],
                weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
                normalizer_fn=slim.batch_norm,
                activation_fn=lrelu):

                for i in range(n_layer):
                    x = slim.conv2d(
                        x,
                        subnet['output'][i],
                        subnet['kernel'][i],
                        subnet['stride'][i])
                    tf.summary.image(
                        'down-sample{:d}'.format(i),
                        tf.transpose(x[:, :, :, 0:3], [2, 1, 0, 3]))

        x = slim.flatten(x)

        with slim.arg_scope(
            [slim.fully_connected],
            num_outputs=self.arch['y_dim'],
            weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
            normalizer_fn=None,
            activation_fn=None):
            y_logit = slim.fully_connected(x)
            # z_mu = slim.fully_connected(x)
            # z_lv = slim.fully_connected(x)
        # return z_mu, z_lv
        return y_logit
layer.py 文件源码 项目:Gumbel-Softmax-VAE-in-tensorflow 作者: JeremyCCHsu 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def test_fc_cnn():
    x = tf.placeholder(name='x', shape=[50, 32, 32, 1], dtype=tf.float32)
    c1 = conv2d(x, [5, 5], [1, 2, 2, 1], 16, scope='conv1')
    c2 = conv2d(c1, [5, 5], [1, 2, 2, 1], 64, scope='conv2')
    f0 = slim.flatten(c2)
    f1 = dense(f0, 100, scope='dense1')
    f2 = dense(f1, 10, scope='dense2')
    return f2


# [TODO] Need to test BN
# d_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)
layer.py 文件源码 项目:Gumbel-Softmax-VAE-in-tensorflow 作者: JeremyCCHsu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def discriminator(x):
    with tf.variable_scope('Discriminator'):
        c1 = conv2d(x, [5, 5], [1, 2, 2, 1], 16, scope='conv1')
        c2 = conv2d(c1, [5, 5], [1, 2, 2, 1], 64, scope='conv2')
        f0 = slim.flatten(c2)
        f1 = dense(f0, 100, scope='dense1')
        f2 = dense(f1, 10, scope='dense2')
    return f2
DDTL_resnet.py 文件源码 项目:tensorflow-DDT 作者: wangchao66 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def resnet_model(image,reuse):
    with tf.variable_scope("model",reuse=reuse):
        with slim.arg_scope(resnet_v1.resnet_arg_scope()):
            outputs,_ = resnet_v1.resnet_v1_50(image)
            #outputs,_ = inception_resnet_v2(image)
            outputs = slim.flatten(outputs)
            outputs = slim.fully_connected(outputs,256)
            logits = slim.fully_connected(outputs,num_classes,activation_fn=None)
    return outputs,logits

# -------- train -----------------------------------
ops.py 文件源码 项目:DCGAN-LSGAN-WGAN-WGAN-GP-Tensorflow 作者: LynnHo 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def flatten_fully_connected(inputs,
                            num_outputs,
                            activation_fn=tf.nn.relu,
                            normalizer_fn=None,
                            normalizer_params=None,
                            weights_initializer=slim.xavier_initializer(),
                            weights_regularizer=None,
                            biases_initializer=tf.zeros_initializer(),
                            biases_regularizer=None,
                            reuse=None,
                            variables_collections=None,
                            outputs_collections=None,
                            trainable=True,
                            scope=None):
    with tf.variable_scope(scope, 'flatten_fully_connected', [inputs]):
        if inputs.shape.ndims > 2:
            inputs = slim.flatten(inputs)
        return slim.fully_connected(inputs,
                                    num_outputs,
                                    activation_fn,
                                    normalizer_fn,
                                    normalizer_params,
                                    weights_initializer,
                                    weights_regularizer,
                                    biases_initializer,
                                    biases_regularizer,
                                    reuse,
                                    variables_collections,
                                    outputs_collections,
                                    trainable,
                                    scope)
WhatWhereAutoencoder.py 文件源码 项目:Tensorflow_WhatWhereAutoencoder 作者: yselivonchyk 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_mnist_model(self, input, use_unpooling):
    """
    Build autoencoder model for mnist dataset as described in the Stacked What-Where autoencoders paper

    :param input: 4D tensor of source data of shae [batch_size, w, h, channels]
    :param use_unpooling: indicate whether unpooling layer should be used instead of naive upsampling
    :return: tuple of tensors:
      train - train operation
      encode - bottleneck tensor of the autoencoder network
      decode - reconstruction of the input
    """
    # Encoder. (16)5c-(32)3c-Xp
    net = slim.conv2d(input, 16, [5, 5])
    net = slim.conv2d(net, 32, [3, 3])

    if use_unpooling:
      encode, mask = max_pool_with_argmax(net, FLAGS.pool_size)
      net = unpool(encode, mask, stride=FLAGS.pool_size)
    else:
      encode = slim.max_pool2d(net, kernel_size=[FLAGS.pool_size, FLAGS.pool_size], stride=FLAGS.pool_size)
      net = upsample(encode, stride=FLAGS.pool_size)

    # Decoder
    net = slim.conv2d_transpose(net, 16, [3, 3])
    net = slim.conv2d_transpose(net, 1, [5, 5])
    decode = net

    loss_l2 = tf.nn.l2_loss(slim.flatten(input) - slim.flatten(net))

    # Optimizer
    train = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate).minimize(loss_l2)
    return train, encode, decode
conv_vae.py 文件源码 项目:vae-flow 作者: andymiller 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def inference_network(x, xwidth=28, xheight=28, zdim=2):
  """Inference network to parameterize variational model. It takes
  data as input and outputs the variational parameters.
  mu, sigma = neural_network(x)
  """
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.elu,
                      normalizer_fn=slim.batch_norm,
                      normalizer_params={'scale': True}):
    net = tf.reshape(x, [N_MINIBATCH, 28, 28, 1])
    net = slim.conv2d(net, 32, 5, stride=2)
    net = slim.conv2d(net, 64, 5, stride=2)
    net = slim.conv2d(net, 128, 5, padding='VALID')
    net = slim.dropout(net, 0.9)
    net = slim.flatten(net)
    params = slim.fully_connected(net, zdim * 2, activation_fn=None)

  mu    = params[:, :zdim]
  #sigma = tf.nn.softplus(params[:, zdim:])
  sigma = params[:, zdim:]
  return mu, sigma


##########################################
# make variational lower bound objective #
##########################################
simple.py 文件源码 项目:darkflow 作者: thtrieu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self):
        temp = tf.transpose(
            self.inp.out, [0,3,1,2])
        self.out = slim.flatten(
            temp, scope = self.scope)
model_interpreter.py 文件源码 项目:TensorFlow_DCIGN 作者: yselivonchyk 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_encoder(net, layer_config, i=1, reuse=False):
  if i == len(layer_config):
    return net

  cfg = layer_config[i]
  cfg.shape = net.get_shape().as_list()
  name = cfg.enc_op_name if reuse else None
  cfg.ein = net
  if cfg.type == FC:
    if len(cfg.shape) > 2:
      net = slim.flatten(net)
    net = slim.fully_connected(net, cfg.size, activation_fn=cfg.activation,
                               scope=name, reuse=reuse)
  elif cfg.type == CONV:
    net = slim.conv2d(net, cfg.size, [cfg.kernel, cfg.kernel], stride=cfg.stride,
                      activation_fn=cfg.activation, padding=PADDING,
                      scope=name, reuse=reuse)
  elif cfg.type == POOL_ARG:
    net, cfg.argmax = nut.max_pool_with_argmax(net, cfg.kernel)
    # if not reuse:
    #   mask = nut.fake_arg_max_of_max_pool(cfg.shape, cfg.kernel)
    #   cfg.argmax_dummy = tf.constant(mask.flatten(), shape=mask.shape)
  elif cfg.type == POOL:
    net = slim.max_pool2d(net, kernel_size=[cfg.kernel, cfg.kernel], stride=cfg.kernel)
  elif cfg.type == DO:
    net = tf.nn.dropout(net, keep_prob=cfg.keep_prob)
  elif cfg.type == LOSS:
    cfg.arg1 = net
  elif cfg.type == INPUT:
    assert False

  if not reuse:
    cfg.enc_op_name = net.name.split('/')[0]
  if not reuse:
    ut.print_info('\rencoder_%d\t%s\t%s' % (i, str(net), cfg.enc_op_name), color=CONFIG_COLOR)
  return build_encoder(net, layer_config, i + 1, reuse=reuse)


问题


面经


文章

微信
公众号

扫码关注公众号