python类conv2d()的实例源码

autoencoder_simple.py 文件源码 项目:Machine-Learning 作者: hadikazemi 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def autoencoder(inputs):
    # encoder
    # 32 x 32 x 1   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  8 x 8 x 16
    # 8 x 8 x 16    ->  2 x 2 x 8
    net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
    # decoder
    # 2 x 2 x 8    ->  8 x 8 x 16
    # 8 x 8 x 16   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  32 x 32 x 1
    net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
    return net

# read MNIST dataset
train_tf.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            # tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            x = tcl.conv2d(inputs,
                           num_outputs = 64,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.conv2d(x,
                           num_outputs = 128,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.flatten(x)
            logits = tcl.fully_connected(x, num_outputs = self.num_output)

            return logits
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _bn_relu_conv(filters, kernel_size = (3, 3), stride = (1, 1)):
    def f(inputs):
        x = tcl.batch_norm(inputs)
        x = tf.nn.relu(x)
        x = tcl.conv2d(x,
                       num_outputs = filters,
                       kernel_size = kernel_size,
                       stride = stride,
                       padding = 'SAME')
        return x
    return f
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            conv1 = tcl.conv2d(inputs,
                               num_outputs = 64,
                               kernel_size = (7, 7),
                               stride = (2, 2),
                               padding = 'SAME')
            conv1 = tcl.batch_norm(conv1)
            conv1 = tf.nn.relu(conv1)
            conv1 = tcl.max_pool2d(conv1,
                                   kernel_size = (3, 3),
                                   stride = (2, 2),
                                   padding = 'SAME')

            x = conv1
            filters = 64
            first_layer = True
            for i, r in enumerate(self.repetitions):
                x = _residual_block(self.block_fn,
                                    filters = filters,
                                    repetition = r,
                                    is_first_layer = first_layer)(x)
                filters *= 2
                if first_layer:
                    first_layer = False

            _, h, w, ch = x.shape.as_list()
            outputs = tcl.avg_pool2d(x,
                                     kernel_size = (h, w),
                                     stride = (1, 1))
            outputs = tcl.flatten(outputs)
            logits = tcl.fully_connected(outputs, num_outputs = self.num_output,
                                         activation_fn = None)
            return logits
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _conv_relu(filters, kernel_size = (3, 3), stride = (1, 1)):
    def f(inputs):
        x = tcl.conv2d(inputs,
                       num_outputs = filters,
                       kernel_size = kernel_size,
                       stride = stride,
                       padding = 'SAME')
        x = tf.nn.relu(x)
        return x
    return f
blocks.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def bn_relu_conv(inputs, num_outputs, kernel_size, stride = (1, 1), padding = 'SAME'):
    x = tcl.batch_norm(inputs)
    x = tf.nn.relu(x)
    x = tcl.conv2d(x,
                   num_outputs = num_outputs,
                   kernel_size = kernel_size,
                   stride = stride,
                   padding = padding)
    return x
subpixel.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def PhaseShift_withConv(x, r, filters, kernel_size = (3, 3), stride = (1, 1)):
    # output shape(batch, r*x_h, r*x_w, filters)

    x = tcl.conv2d(x,
                   num_outputs = filters*r**2,
                   kernel_size = kernel_size,
                   stride = stride,
                   padding = 'SAME')
    x = PhaseShift(x, r)
    return x
pretrained_models.py 文件源码 项目:DocumentSegmentation 作者: SeguinBe 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def vgg_16_fn(input_tensor: tf.Tensor, scope='vgg_16', blocks=5, weight_decay=0.0005) \
        -> (tf.Tensor, list):  # list of tf.Tensors (layers)
    intermediate_levels = []
    # intermediate_levels.append(input_tensor)
    with slim.arg_scope(nets.vgg.vgg_arg_scope(weight_decay=weight_decay)):
        with tf.variable_scope(scope, 'vgg_16', [input_tensor]) as sc:
            input_tensor = mean_substraction(input_tensor)
            end_points_collection = sc.original_name_scope + '_end_points'
            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with slim.arg_scope(
                    [layers.conv2d, layers.fully_connected, layers.max_pool2d],
                    outputs_collections=end_points_collection):
                net = layers.repeat(
                    input_tensor, 2, layers.conv2d, 64, [3, 3], scope='conv1')
                intermediate_levels.append(net)
                net = layers.max_pool2d(net, [2, 2], scope='pool1')
                if blocks >= 2:
                    net = layers.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool2')
                if blocks >= 3:
                    net = layers.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool3')
                if blocks >= 4:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool4')
                if blocks >= 5:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool5')

                return net, intermediate_levels
async_dqn.py 文件源码 项目:chi 作者: rmst 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def delling_network():
    """ Architecture according to Duelling DQN:
    https://arxiv.org/abs/1511.06581
    """

    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)

        xv = layers.fully_connected(x, 512)
        val = layers.fully_connected(xv, 1, activation_fn=None)
        # val = tf.squeeze(val, 1)

        xa = layers.fully_connected(x, 512)
        adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)

        q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
        q = tf.identity(q, name='Q')
        return q


# Tests
dqn_m.py 文件源码 项目:chi 作者: rmst 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def delling_network():
    """ Architecture according to Duelling DQN:
    https://arxiv.org/abs/1511.06581
    """

    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)

        xv = layers.fully_connected(x, 512)
        val = layers.fully_connected(xv, 1, activation_fn=None)
        # val = tf.squeeze(val, 1)

        xa = layers.fully_connected(x, 512)
        adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)

        q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
        q = tf.identity(q, name='Q')
        return q


# Tests
alexnet_model.py 文件源码 项目:tensorflow_face 作者: ZhihengCV 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def alexnet_v2_arg_scope(weight_decay=0.0005):
    with arg_scope(
            [layers.conv2d, layers_lib.fully_connected],
            activation_fn=nn_ops.relu,
            biases_initializer=init_ops.constant_initializer(0.1),
            weights_regularizer=regularizers.l2_regularizer(weight_decay)):
        with arg_scope([layers.conv2d], padding='SAME'):
            with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
                return arg_sc
dqn.py 文件源码 项目:reinforceflow 作者: dbobrenko 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def make_dqn_body_nature(input_layer, trainable=True):
    end_points = {}
    net = layers.conv2d(inputs=input_layer,
                        num_outputs=32,
                        kernel_size=[8, 8],
                        stride=[4, 4],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv1",
                        trainable=trainable)
    end_points['conv1'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=64,
                        kernel_size=[4, 4],
                        stride=[2, 2],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv2",
                        trainable=trainable)
    end_points['conv2'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=64,
                        kernel_size=[3, 3],
                        stride=[1, 1],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv3",
                        trainable=trainable)
    end_points['conv3'] = net
    out = layers.flatten(net)
    end_points['conv3_flatten'] = out
    return out, end_points
nets.py 文件源码 项目:ICGANs 作者: cameronfabbri 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def encZ(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 32, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 4096, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)
   #fc1 = tcl.dropout(fc1, 0.5)

   fc2 = tcl.fully_connected(fc1, 100, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2
train.py 文件源码 项目:ICGANs 作者: cameronfabbri 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def encoder(x,y):

   y_dim = int(y.get_shape().as_list()[-1])

   # reshape so it's batchx1x1xy_size
   y = tf.reshape(y, shape=[BATCH_SIZE, 1, 1, y_dim])
   input_ = conv_cond_concat(x, y)

   conv1 = tcl.conv2d(input_, 64, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv1')
   conv1 = lrelu(conv1)
   conv1 = conv_cond_concat(conv1, y)

   conv2 = tcl.conv2d(conv1, 128, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv2')
   conv2 = lrelu(conv2)
   conv2 = conv_cond_concat(conv2, y)

   conv3 = tcl.conv2d(conv2, 256, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv3')
   conv3 = lrelu(conv3)
   conv3 = conv_cond_concat(conv3, y)

   conv4 = tcl.conv2d(conv3, 512, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv4')
   conv4 = lrelu(conv4)
   conv4 = conv_cond_concat(conv4, y)

   conv5 = tcl.conv2d(conv4, 512, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv5')
   conv5 = lrelu(conv5)
   conv5 = conv_cond_concat(conv5, y)

   conv6 = tcl.conv2d(conv5, 512, 4, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_enc_conv6')
   conv6 = lrelu(conv6)

   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'conv5:',conv5
   print 'conv6:',conv6
   out = [conv1, conv2, conv3, conv4, conv5, conv6]
   return out,y
nets.py 文件源码 项目:ICGANs 作者: cameronfabbri 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def netD(input_images, y, BATCH_SIZE, reuse=False):

   print 'DISCRIMINATOR reuse = '+str(reuse)
   sc = tf.get_variable_scope()
   with tf.variable_scope(sc, reuse=reuse):

      y_dim = int(y.get_shape().as_list()[-1])

      # reshape so it's batchx1x1xy_size
      y = tf.reshape(y, shape=[BATCH_SIZE, 1, 1, y_dim])
      input_ = conv_cond_concat(input_images, y)

      conv1 = tcl.conv2d(input_, 64, 5, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv1')
      conv1 = lrelu(conv1)

      conv2 = tcl.conv2d(conv1, 128, 5, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv2')
      conv2 = lrelu(conv2)

      conv3 = tcl.conv2d(conv2, 256, 5, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv3')
      conv3 = lrelu(conv3)

      conv4 = tcl.conv2d(conv3, 512, 5, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv4')
      conv4 = lrelu(conv4)

      conv5 = tcl.conv2d(conv4, 1, 4, 1, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='d_conv5')

      print 'input images:',input_images
      print 'conv1:',conv1
      print 'conv2:',conv2
      print 'conv3:',conv3
      print 'conv4:',conv4
      print 'conv5:',conv5
      print 'END D\n'

      tf.add_to_collection('vars', conv1)
      tf.add_to_collection('vars', conv2)
      tf.add_to_collection('vars', conv3)
      tf.add_to_collection('vars', conv4)
      tf.add_to_collection('vars', conv5)

      return conv5
nets.py 文件源码 项目:ICGANs 作者: cameronfabbri 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def encZ(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 32, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 4096, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)

   fc2 = tcl.fully_connected(fc1, 100, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2
enc_y.py 文件源码 项目:ICGANs 作者: cameronfabbri 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def encY(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 512, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 512, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)

   fc2 = tcl.fully_connected(fc1, 10, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2
ops.py 文件源码 项目:Mendelssohn 作者: diggerdu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def reconv2d(input_, o_size, k_size, name='reconv2d'):
    print name, 'input', ten_sh(input_)
    print name, 'output', o_size
    input_ = tf.image.resize_nearest_neighbor(input_, o_size[:3])
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.conv2d(input_, num_outputs=o_size[-1], kernel_size=k_size, stride=1,\
                activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME',\
                weights_initializer=init)
        return output
nets.py 文件源码 项目:GAN_Theories 作者: YadiraF 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()
            # --- conv
            size = 64
            d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
                        stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            h = tcl.fully_connected(tcl.flatten(d), self.n_hidden, activation_fn=lrelu, weights_initializer=tf.random_normal_initializer(0, 0.02))

            # -- deconv
            d = tcl.fully_connected(h, 4 * 4 * 512, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
            d = tf.reshape(d, (-1, 4, 4, 512))  # size
            d = tcl.conv2d_transpose(d, 256, 3, stride=2, # size*2
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d_transpose(d, 128, 3, stride=2, # size*4
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            d = tcl.conv2d_transpose(d, 64, 3, stride=2, # size*8
                                    activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))

            d = tcl.conv2d_transpose(d, 3, 3, stride=2, # size*16
                                    activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
            return d
a3c.py 文件源码 项目:deep_rl_vizdoom 作者: mihahauke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self,
                 initial_entropy_beta=0.05,
                 final_entropy_beta=0.0,
                 decay_steps=1e5,
                 thread="global",
                 **settings):

        super(_BaseACNet, self).__init__(**settings)
        self.network_state = None
        self._name_scope = "net_" + str(thread)

        if initial_entropy_beta == final_entropy_beta:
            self._entropy_beta = initial_entropy_beta
        else:
            self._entropy_beta = tf.train.polynomial_decay(
                name="entropy_beta",
                learning_rate=initial_entropy_beta,
                end_learning_rate=final_entropy_beta,
                decay_steps=decay_steps,
                global_step=tf.train.get_global_step())

        with arg_scope([conv2d], data_format="NCHW"), \
             arg_scope([fully_connected, conv2d],
                       activation_fn=self.activation_fn,
                       biases_initializer=tf.constant_initializer(self.init_bias)):
            self.create_architecture()

        self._prepare_loss_op()
        self.params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self._name_scope)


问题


面经


文章

微信
公众号

扫码关注公众号