python类flatten()的实例源码

train_cifar_feature_matching_ali_tf.py 文件源码 项目:Semi_Supervised_GAN 作者: ChunyuanLI 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def inference(input_img):
    # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype)
    with tf.variable_scope('Net_Inf') as scope:
        xx = layers.convolution2d(input_img, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)
        xx = layers.convolution2d(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)    
        xx = layers.convolution2d(xx, 512, kernel_size=(5,5), stride=(2, 2),  padding='SAME', activation_fn=None)
        xx = layers.batch_norm(xx)
        xx = tf.nn.relu(xx)  
        xx = layers.flatten(xx)
        xx = layers.fully_connected(xx, num_outputs=latent_size, activation_fn=None)
        xx = layers.batch_norm(xx)
        inf_latent = tf.nn.tanh(xx)
    return inf_latent

# specify discriminative model
model.py 文件源码 项目:combine-DT-with-NN-in-RL 作者: Burning-Bear 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def model(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
model.py 文件源码 项目:combine-DT-with-NN-in-RL 作者: Burning-Bear 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
model.py 文件源码 项目:combine-DT-with-NN-in-RL 作者: Burning-Bear 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def model(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
model.py 文件源码 项目:combine-DT-with-NN-in-RL 作者: Burning-Bear 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
net_frame.py 文件源码 项目:DRLModule 作者: halleanwoo 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _action_norm_dist(inpt, num_actions, w_init, activation_fn_v, activation_fn_a):
    mu = layers.fully_connected(inpt, num_outputs=num_actions, weights_initializer=w_init, activation_fn=activation_fn_v)
    sigma = layers.fully_connected(inpt, num_outputs=num_actions, weights_initializer=w_init, activation_fn=activation_fn_a)
    return mu, sigma



# # cnn network frame
# def cnn_frame_continu(hiddens, kerners, strides, inpt, num_actions, scope=None, activation_fn=tf.nn.relu, activation_fn_mu=tf.nn.relu, activation_fn_sigma=tf.nn.relu, reuse=None):
#     with tf.variable_scope(scope, reuse=reuse):
#         out = inpt
#         for kerner, stride in kerners, strides:
#             out = tf.nn.conv2d(input=out, filter=kerner, stride=stride)
#         out = layers.flatten(out)
#         with tf.name_scope("out"):
#             mu = layers.fully_connected(out, num_outputs=num_actions, weights_initializer=tf.truncated_normal_initializer(0 , 0.3), activation_fn=None)
#             sigma = layers.fully_connected(out, num_outputs=num_actions, weights_initializer=tf.truncated_normal_initializer(0 , 0.3), activation_fn=tf.nn.softplus)
#         return mu, sigma
model.py 文件源码 项目:rl-attack-detection 作者: yenchenlin 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def model(img_in, num_actions, scope, reuse=False, concat_softmax=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
            if concat_softmax:
                out = tf.nn.softmax(out)

        return out
model.py 文件源码 项目:rl-attack-detection 作者: yenchenlin 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
model.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def model(img_in, num_actions, scope, reuse=False, layer_norm=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        conv_out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            value_out = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
            if layer_norm:
                value_out = layer_norm_fn(value_out, relu=True)
            else:
                value_out = tf.nn.relu(value_out)
            value_out = layers.fully_connected(value_out, num_outputs=num_actions, activation_fn=None)
        return value_out
async_dqn.py 文件源码 项目:chi 作者: rmst 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
dqn_m.py 文件源码 项目:chi 作者: rmst 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
models.py 文件源码 项目:RL_FlappyBird 作者: iGuaZi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def model(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
models.py 文件源码 项目:RL_FlappyBird 作者: iGuaZi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def dueling_model(img_in, num_actions, scope, reuse=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)

        return state_score + action_scores
models.py 文件源码 项目:RL_FlappyBird 作者: iGuaZi 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def Actor(img_in, num_actions, scope, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        with tf.variable_scope("action_prob"):
            out = tf.nn.softmax(out)
        return out
models.py 文件源码 项目:RL_FlappyBird 作者: iGuaZi 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def Critic(img_in, scope, reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=1, activation_fn=None)

        return out




# models defined in the original code
spatial_transformer.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def to_loc(input, is_simple=False):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    num_outputs = 3 if is_simple else 6
    W_init = tf.constant_initializer(
            np.zeros((num_inputs, num_outputs)))
    if is_simple:
        b_init = tf.constant_initializer(np.array([1.,0.,0.]))
    else:
        b_init = tf.constant_initializer(np.array([1.,0.,0.,0.,1.,0.]))

    return layers.fully_connected(input, num_outputs,
            activation_fn=None,
            weights_initializer=W_init,
            biases_initializer=b_init)
spatial_transformer.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def to_loc(input, is_simple=False):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    num_outputs = 3 if is_simple else 6
    W_init = tf.constant_initializer(
            np.zeros((num_inputs, num_outputs)))
    if is_simple:
        b_init = tf.constant_initializer(np.array([1.,0.,0.]))
    else:
        b_init = tf.constant_initializer(np.array([1.,0.,0.,0.,1.,0.]))

    return layers.fully_connected(input, num_outputs,
            activation_fn=None,
            weights_initializer=W_init,
            biases_initializer=b_init)
a3c.py 文件源码 项目:reinforceflow 作者: dbobrenko 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, input_space, output_space, layer_sizes=(512, 512, 512), trainable=True):
        if isinstance(input_space, Tuple) or isinstance(output_space, Tuple):
            raise ValueError('For tuple action and observation spaces '
                             'consider implementing custom network architecture.')
        self._input_ph = tf.placeholder('float32', shape=[None] + list(input_space.shape),
                                        name='inputs')
        end_points = {}
        net = layers.flatten(self._input_ph)
        for i, units in enumerate(layer_sizes):
            name = 'fc%d' % i
            net = layers.fully_connected(net, num_outputs=units, activation_fn=tf.nn.relu,
                                         trainable=trainable, scope=name)
            end_points[name] = net
        gaussian = tf.random_normal_initializer
        v = layers.fully_connected(net, num_outputs=1,
                                   activation_fn=None,
                                   weights_initializer=gaussian(0.0, 0.1),
                                   biases_initializer=gaussian(0.05, 0.1),
                                   scope='out_value',
                                   trainable=trainable)
        end_points['out_value'] = tf.squeeze(v)
        header_endpoints = make_a3c_header(net, input_space, output_space, trainable)
        end_points.update(header_endpoints)
        self.end_points = end_points
        self.output_policy = self.output
dueling.py 文件源码 项目:reinforceflow 作者: dbobrenko 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, input_space, output_space, layer_sizes=(512, 512), dueling_type='mean',
                 advantage_layers=(256,), value_layers=(256,), trainable=True):
        if isinstance(input_space, Tuple) or isinstance(output_space, Tuple):
            raise ValueError('For tuple action and observation spaces '
                             'consider implementing custom network architecture.')
        self._input_ph = tf.placeholder('float32', shape=[None] + list(input_space.shape),
                                        name='inputs')

        end_points = {}
        net = layers.flatten(self.input_ph)
        for i, units in enumerate(layer_sizes):
            name = 'fc%d' % i
            net = layers.fully_connected(net, num_outputs=units, activation_fn=tf.nn.relu,
                                         trainable=trainable, scope=name)
            end_points[name] = net
        net, dueling_endpoints = make_dueling_header(input_layer=net,
                                                     output_size=output_space.shape[0],
                                                     dueling_type=dueling_type,
                                                     advantage_layers=advantage_layers,
                                                     value_layers=value_layers,
                                                     trainable=trainable)
        end_points.update(dueling_endpoints)
        self._output = net
        self.end_points = end_points
dqn.py 文件源码 项目:reinforceflow 作者: dbobrenko 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def make_dqn_body(input_layer, trainable=True):
    end_points = {}
    net = layers.conv2d(inputs=input_layer,
                        num_outputs=16,
                        kernel_size=[8, 8],
                        stride=[4, 4],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv1",
                        trainable=trainable)
    end_points['conv1'] = net
    net = layers.conv2d(inputs=net,
                        num_outputs=32,
                        kernel_size=[4, 4],
                        stride=[2, 2],
                        activation_fn=tf.nn.relu,
                        padding="same",
                        scope="conv2",
                        trainable=trainable)
    end_points['conv2'] = net
    out = layers.flatten(net)
    end_points['conv2_flatten'] = out
    return out, end_points
__init__.py 文件源码 项目:tensorflow-infogan 作者: JonathanRaiman 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def discriminator_forward(img,
                          network_description,
                          is_training,
                          reuse=None,
                          name="discriminator",
                          use_batch_norm=True,
                          debug=False):
    with tf.variable_scope(name, reuse=reuse):
        out = run_network(img,
                          network_description,
                          is_training=is_training,
                          use_batch_norm=use_batch_norm,
                          debug=debug)
        out = layers.flatten(out)
        prob = layers.fully_connected(
            out,
            num_outputs=1,
            activation_fn=tf.nn.sigmoid,
            scope="prob_projection"
        )

    return {"prob":prob, "hidden":out}
model.py 文件源码 项目:NoisyNet-DQN 作者: andrewliao11 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def model(img_in, num_actions, scope, noisy=False, reuse=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            if noisy:
                # Apply noisy network on fully connected layers
                # ref: https://arxiv.org/abs/1706.10295
                out = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu)
                out = noisy_dense(out, name='noisy_fc2', size=num_actions)
            else:
                out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
                out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
build_vgg.py 文件源码 项目:tensorflow-litterbox 作者: rwightman 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _block_output(net, endpoints, num_classes, dropout_keep_prob=0.5):
    with tf.variable_scope('Output'):
        net = layers.flatten(net, scope='Flatten')

        # 7 x 7 x 512
        net = layers.fully_connected(net, 4096, scope='Fc1')
        net = endpoints['Output/Fc1'] = layers.dropout(net, dropout_keep_prob, scope='Dropout1')

        # 1 x 1 x 4096
        net = layers.fully_connected(net, 4096, scope='Fc2')
        net = endpoints['Output/Fc2'] = layers.dropout(net, dropout_keep_prob, scope='Dropout2')

        logits = layers.fully_connected(net, num_classes, activation_fn=None, scope='Logits')
        # 1 x 1 x num_classes
        endpoints['Logits'] = logits
    return logits
models.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        with tf.variable_scope("convnet"):
            for num_outputs, kernel_size, stride in convs:
                out = layers.convolution2d(out,
                                           num_outputs=num_outputs,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           activation_fn=tf.nn.relu)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            action_out = out
            for hidden in hiddens:
                action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
            action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

        if dueling:
            with tf.variable_scope("state_value"):
                state_out = out
                for hidden in hiddens:
                    state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=tf.nn.relu)
                state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
            return state_score + action_scores_centered
        else:
            return action_scores
        return out
train_tf.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            # tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            x = tcl.conv2d(inputs,
                           num_outputs = 64,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.conv2d(x,
                           num_outputs = 128,
                           kernel_size = (4, 4),
                           stride = (1, 1),
                           padding = 'SAME')
            x = tcl.batch_norm(x)
            x = tf.nn.relu(x)
            x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME')
            x = tcl.flatten(x)
            logits = tcl.fully_connected(x, num_outputs = self.num_output)

            return logits
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            conv1 = tcl.conv2d(inputs,
                               num_outputs = 64,
                               kernel_size = (7, 7),
                               stride = (2, 2),
                               padding = 'SAME')
            conv1 = tcl.batch_norm(conv1)
            conv1 = tf.nn.relu(conv1)
            conv1 = tcl.max_pool2d(conv1,
                                   kernel_size = (3, 3),
                                   stride = (2, 2),
                                   padding = 'SAME')

            x = conv1
            filters = 64
            first_layer = True
            for i, r in enumerate(self.repetitions):
                x = _residual_block(self.block_fn,
                                    filters = filters,
                                    repetition = r,
                                    is_first_layer = first_layer)(x)
                filters *= 2
                if first_layer:
                    first_layer = False

            _, h, w, ch = x.shape.as_list()
            outputs = tcl.avg_pool2d(x,
                                     kernel_size = (h, w),
                                     stride = (1, 1))
            outputs = tcl.flatten(outputs)
            logits = tcl.fully_connected(outputs, num_outputs = self.num_output,
                                         activation_fn = None)
            return logits
run_dqn_atari.py 文件源码 项目:deep-q-learning 作者: alvinwan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def atari_model(img_in, num_actions, scope, reuse=False):
    # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512,         activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
run_dqn_atari.py 文件源码 项目:deep-q-learning 作者: alvinwan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64):
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        gauss_initializer = initializers.xavier_initializer(uniform=False)  # stddev = 1/n
        with tf.variable_scope("convnet"):
            out = layers.convolution2d(
                out, num_outputs=num_filters, kernel_size=8, stride=4,
                activation_fn=tf.nn.relu, weights_initializer=gauss_initializer,
                trainable=False)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
run_dqn_atari.py 文件源码 项目:deep-q-learning 作者: alvinwan 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        out = layers.flatten(out)
        # stddev = 1/n, where n = number of inputs
        gauss_initializer = initializers.xavier_initializer(uniform=False)
        with tf.variable_scope("action_value"):
            out = layers.fully_connected(
                out,
                num_outputs=num_actions,
                activation_fn=tf.nn.relu,
                biases_initializer=None,
                weights_initializer=gauss_initializer,
                weights_regularizer=None)
        return out
components.py 文件源码 项目:decorrelated-adversarial-autoencoder 作者: patrickgadd 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def semi_supervised_decoder_convolutional(input_tensor, batch_size, n_dimensions, network_scale=1.0, img_res=28, img_channels=1):
    f_multiplier = network_scale

    net = layers.fully_connected(input_tensor, 2*2*int(128*f_multiplier))
    net = tf.reshape(net, [-1, 2, 2, int(128*f_multiplier)])

    assert(img_res in [28, 32])

    if img_res==28:
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 4, stride=1, padding='VALID')
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 4, stride=1)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=1)
    else:
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(64*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(32*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(16*f_multiplier), 3, stride=1)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=2)
        net = layers.conv2d_transpose(net, int(8*f_multiplier), 3, stride=1)

    net = layers.conv2d_transpose(net, img_channels, 5, stride=1, activation_fn=tf.nn.sigmoid)
    net = layers.flatten(net)

    return net


问题


面经


文章

微信
公众号

扫码关注公众号