python类random_uniform_initializer()的实例源码

model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b
rnn_decoder.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _build(self, initial_state, helper):
    if not self.initial_state:
      self._setup(initial_state, helper)

    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    maximum_iterations = None
    if self.mode == tf.contrib.learn.ModeKeys.INFER:
      maximum_iterations = self.params["max_decode_length"]

    outputs, final_state = dynamic_decode(
        decoder=self,
        output_time_major=True,
        impute_finished=False,
        maximum_iterations=maximum_iterations)
    return self.finalize(outputs, final_state)
rnn_encoder.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, state = tf.nn.dynamic_rnn(
        cell=cell,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)
    return EncoderOutput(
        outputs=outputs,
        final_state=state,
        attention_values=outputs,
        attention_values_length=sequence_length)
rnn_encoder.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, states = tf.nn.bidirectional_dynamic_rnn(
        cell_fw=cell_fw,
        cell_bw=cell_bw,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)

    # Concatenate outputs and states of the forward and backward RNNs
    outputs_concat = tf.concat(outputs, 2)

    return EncoderOutput(
        outputs=outputs_concat,
        final_state=states,
        attention_values=outputs_concat,
        attention_values_length=sequence_length)
models.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, obs, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()

            x = obs
            x = tf.layers.dense(x, 64)
            if self.layer_norm:
                x = tc.layers.layer_norm(x, center=True, scale=True)
            x = tf.nn.relu(x)

            x = tf.layers.dense(x, 64)
            if self.layer_norm:
                x = tc.layers.layer_norm(x, center=True, scale=True)
            x = tf.nn.relu(x)

            x = tf.layers.dense(x, self.nb_actions, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
            x = tf.nn.tanh(x)
        return x
models.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, obs, action, reuse=False):
        with tf.variable_scope(self.name) as scope:
            if reuse:
                scope.reuse_variables()

            x = obs
            x = tf.layers.dense(x, 64)
            if self.layer_norm:
                x = tc.layers.layer_norm(x, center=True, scale=True)
            x = tf.nn.relu(x)

            x = tf.concat([x, action], axis=-1)
            x = tf.layers.dense(x, 64)
            if self.layer_norm:
                x = tc.layers.layer_norm(x, center=True, scale=True)
            x = tf.nn.relu(x)

            x = tf.layers.dense(x, 1, kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
        return x
tensorbox.py 文件源码 项目:cancer 作者: yancz1989 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_lstm_inner(H, lstm_input):
  '''
  build lstm decoder
  '''
  lstm_cell = rnn_cell.BasicLSTMCell(H['lstm_size'], forget_bias=0.0, state_is_tuple=False)
  if H['num_lstm_layers'] > 1:
    lstm = rnn_cell.MultiRNNCell([lstm_cell] * H['num_lstm_layers'], state_is_tuple=False)
  else:
    lstm = lstm_cell

  batch_size = H['batch_size'] * H['grid_height'] * H['grid_width']
  state = tf.zeros([batch_size, lstm.state_size])

  outputs = []
  with tf.variable_scope('RNN', initializer=tf.random_uniform_initializer(-0.1, 0.1)):
    for time_step in range(H['rnn_len']):
      if time_step > 0: tf.get_variable_scope().reuse_variables()
      output, state = lstm(lstm_input, state)
      outputs.append(output)
  return outputs
value_function.py 文件源码 项目:-NIPS-2017-Learning-to-Run 作者: kyleliang919 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def create_net(self, shape):
        hidden_size = 64
        print(shape)
        self.x = tf.placeholder(tf.float32, shape=[None, shape], name="x")
        self.y = tf.placeholder(tf.float32, shape=[None], name="y")

        weight_init = tf.random_uniform_initializer(-0.05, 0.05)
        bias_init = tf.constant_initializer(0)

        with tf.variable_scope("VF"):
            h1 = tf.nn.relu(fully_connected(self.x, shape, hidden_size, weight_init, bias_init, "h1"))
            h2 = tf.nn.relu(fully_connected(h1, hidden_size, hidden_size, weight_init, bias_init, "h2"))
            h3 = fully_connected(h2, hidden_size, 1, weight_init, bias_init, "h3")
        self.net = tf.reshape(h3, (-1,))
        l2 = tf.nn.l2_loss(self.net - self.y)
        self.train = tf.train.AdamOptimizer().minimize(l2)
        self.session.run(tf.initialize_all_variables())
model.py 文件源码 项目:DHP 作者: YuhangSong 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]

        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])

        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters

        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer,
                            collections=collections)

        return tf.nn.conv2d(x, w, stride_shape, pad) + b
rnn_decoder.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _build(self, initial_state, helper):
    if not self.initial_state:
      self._setup(initial_state, helper)

    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    maximum_iterations = None
    if self.mode == tf.contrib.learn.ModeKeys.INFER:
      maximum_iterations = self.params["max_decode_length"]

    outputs, final_state = dynamic_decode(
        decoder=self,
        output_time_major=True,
        impute_finished=False,
        maximum_iterations=maximum_iterations)
    return self.finalize(outputs, final_state)
rnn_encoder.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, state = tf.nn.dynamic_rnn(
        cell=cell,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)
    return EncoderOutput(
        outputs=outputs,
        final_state=state,
        attention_values=outputs,
        attention_values_length=sequence_length)
rnn_encoder.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, states = tf.nn.bidirectional_dynamic_rnn(
        cell_fw=cell_fw,
        cell_bw=cell_bw,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)

    # Concatenate outputs and states of the forward and backward RNNs
    outputs_concat = tf.concat(outputs, 2)

    return EncoderOutput(
        outputs=outputs_concat,
        final_state=states,
        attention_values=outputs_concat,
        attention_values_length=sequence_length)
model.py 文件源码 项目:show-adapt-and-tell 作者: tsenghungchen 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def domain_classifier(self, images, name="G", reuse=False): 
    random_uniform_init = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)    
    with tf.variable_scope(name):
        tf.get_variable_scope().reuse_variables()
        with tf.variable_scope("images"):
                # "generator/images"
                images_W = tf.get_variable("images_W", [self.img_dims, self.G_hidden_size], "float32", random_uniform_init)
        images_emb = tf.matmul(images, images_W)    # B,H

        l2_loss = tf.constant(0.0)
    with tf.variable_scope("domain"):
        if reuse:
        tf.get_variable_scope().reuse_variables()
        with tf.variable_scope("output"):
            output_W = tf.get_variable("output_W", [self.G_hidden_size, self.num_domains],
                                                "float32", random_uniform_init)
                output_b = tf.get_variable("output_b", [self.num_domains], "float32", random_uniform_init)
        l2_loss += tf.nn.l2_loss(output_W)
        l2_loss += tf.nn.l2_loss(output_b)
        logits = tf.nn.xw_plus_b(images_emb, output_W, output_b, name="logits")
        predictions = tf.argmax(logits, 1, name="predictions")

        return predictions, logits, l2_loss
PTB-release.py 文件源码 项目:YellowFin 作者: JianGoForIt 项目源码 文件源码 阅读 56 收藏 0 点赞 0 评论 0
def construct_model(config, eval_config, raw_data, opt_method):
  train_data, valid_data, test_data, _ = raw_data

  eval_config.batch_size = 1
  eval_config.num_steps = 1

  initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
  with tf.name_scope("Train"):
    train_input = PTBInput(config=config, data=train_data, name="TrainInput")
    with tf.variable_scope("Model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config, input_=train_input, opt_method=opt_method)

  with tf.name_scope("Valid"):
    valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
    with tf.variable_scope("Model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, config=config, input_=valid_input, opt_method=opt_method)

  with tf.name_scope("Test"):
    test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
    with tf.variable_scope("Model", reuse=True, initializer=initializer):
      mtest = PTBModel(is_training=False, config=eval_config, input_=test_input, opt_method=opt_method)

  return m, mvalid, mtest
dynamic_seq2seq_model.py 文件源码 项目:seq2seq_chatterbot 作者: StephenLee2016 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _init_embeddings(self):
        with tf.variable_scope("embedding") as scope:

            sqrt3 = math.sqrt(3)
            initializer = tf.random_uniform_initializer(-sqrt3, sqrt3)

            self.encoder_embedding_matrix = tf.get_variable(
                name="encoder_embedding_matrix",
                shape=[self.encoder_vocab_size, self.embedding_size],
                initializer=initializer,
                dtype=tf.float32)

            self.decoder_embedding_matrix = tf.get_variable(
                name="decoder_embedding_matrix",
                shape=[self.decoder_vocab_size, self.embedding_size],
                initializer=initializer,
                dtype=tf.float32)

            # encoder?embedd
            self.encoder_inputs_embedded = tf.nn.embedding_lookup(
                self.encoder_embedding_matrix, self.encoder_inputs)

            # decoder?embedd
            self.decoder_train_inputs_embedded = tf.nn.embedding_lookup(
                self.decoder_embedding_matrix, self.decoder_train_inputs)
rhn_train.py 文件源码 项目:RecurrentHighwayNetworks 作者: julian121266 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def evaluate_mc(data_path, dataset, load_model, mc_steps, seed):
  """Evaluate the model on the given data using MC averaging."""
  ex.commands['print_config']()
  print("MC Evaluation of model:", load_model)
  assert mc_steps > 0
  reader, (train_data, valid_data, test_data, _) = get_data(data_path, dataset)

  config = get_config()
  val_config = deepcopy(config)
  test_config = deepcopy(config)
  test_config.batch_size = test_config.num_steps = 1
  with tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      _ = Model(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      _ = Model(is_training=False, config=val_config)
      mtest = Model(is_training=False, config=test_config)
    tf.initialize_all_variables()
    saver = tf.train.Saver()
    saver.restore(session, load_model)

    print("Testing on non-batched Test ...")
    test_perplexity = run_mc_epoch(seed, session, mtest, test_data, tf.no_op(), test_config, mc_steps, verbose=True)
    print("Full Test Perplexity: %.3f, Bits: %.3f" % (test_perplexity, np.log2(test_perplexity)))
model_level1.py 文件源码 项目:Skeleton-key 作者: feiyu1990 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, config, mode):
        self.config = config
        self.mode = mode
        # self.train_resnet = (train_resnet & (mode == 'training'))

        self.weight_initializer = tf.contrib.layers.xavier_initializer()
        self.const_initializer = tf.constant_initializer(0.0)
        self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0)
        self.level1_word2ix = json.load(open('data/train/word2ix_stem.json'))

        self.level1_model = level1_model.Level1Model(word_to_idx=self.level1_word2ix,
                                                     dim_feature=config.LEVEL1_dim_feature,
                                                     dim_embed=config.LEVEL1_dim_embed,
                                                     dim_hidden=config.LEVEL1_dim_hidden,
                                                     alpha_c=config.LEVEL1_alpha, dropout=config.LEVEL1_dropout,
                                                     n_time_step=config.LEVEL1_T,
                                                     train=(self.mode == 'training'))
model.py 文件源码 项目:Skeleton-key 作者: feiyu1990 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self, config, mode):
        self.config = config
        self.mode = mode

        self.weight_initializer = tf.contrib.layers.xavier_initializer()
        self.const_initializer = tf.constant_initializer(0.0)
        self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0)

        self.level1_word2ix = json.load(open('data/train/word2ix_stem.json'))
        self.level2_word2ix = json.load(open('data/train/word2ix_attr.json'))

        self.level1_model = level1_model.Level1Model(word_to_idx=self.level1_word2ix,
                                                     dim_feature=config.LEVEL1_dim_feature,
                                                     dim_embed=config.LEVEL1_dim_embed,
                                                     dim_hidden=config.LEVEL1_dim_hidden,
                                                     alpha_c=config.LEVEL1_alpha, dropout=config.LEVEL1_dropout,
                                                     n_time_step=config.LEVEL1_T, train=(self.mode == 'training'))

        self.level2_model = level2_model.Level2Model(word_to_idx=self.level2_word2ix,
                                                     dim_feature=config.LEVEL2_dim_feature,
                                                     dim_embed=config.LEVEL2_dim_embed,
                                                     dim_hidden=config.LEVEL2_dim_hidden,
                                                     dropout=config.LEVEL2_dropout, n_time_step=config.LEVEL2_T)
ddpg_cartpole.py 文件源码 项目:cartpoleplusplus 作者: matpalm 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, namespace, input_state, action_dim):
    super(ActorNetwork, self).__init__(namespace)

    self.input_state = input_state

    self.exploration_noise = util.OrnsteinUhlenbeckNoise(action_dim, 
                                                         opts.action_noise_theta,
                                                         opts.action_noise_sigma)

    with tf.variable_scope(namespace):
      opts.hidden_layers = opts.actor_hidden_layers
      final_hidden = self.input_state_network(self.input_state, opts)
      # action dim output. note: actors out is (-1, 1) and scaled in env as required.
      weights_initializer = tf.random_uniform_initializer(-0.001, 0.001)
      self.output_action = slim.fully_connected(scope='output_action',
                                                inputs=final_hidden,
                                                num_outputs=action_dim,
                                                weights_initializer=weights_initializer,
                                                weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
                                                activation_fn=tf.nn.tanh)
ddpg_network.py 文件源码 项目:SRLF 作者: Fritz449 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def create_critic(self, name, state_input, action_input, reuse=False):
        hidden = state_input
        weights = []
        with tf.variable_scope(name, reuse=reuse):
            for index, n_hidden in enumerate(self.n_hiddens):
                if index == 1:
                    hidden = tf.concat([hidden, action_input], axis=1)
                hidden, layer_weights = denselayer("hidden_critic_{}".format(index), hidden, n_hidden,
                                                   self.nonlinearity, tf.truncated_normal_initializer())
                weights += layer_weights

            value, layer_weights = denselayer("value", hidden, 1,
                                              w_initializer=tf.random_uniform_initializer(-3e-3, 3e-3))
            value = tf.reshape(value, [-1])
            weights += layer_weights
            weight_phs = [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
        return value, weights, weight_phs
trainer.py 文件源码 项目:XMUNMT 作者: XMUNLP 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_initializer(params):
    if params.initializer == "uniform":
        max_val = params.initializer_gain
        return tf.random_uniform_initializer(-max_val, max_val)
    elif params.initializer == "normal":
        return tf.random_normal_initializer(0.0, params.initializer_gain)
    elif params.initializer == "normal_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="normal")
    elif params.initializer == "uniform_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="uniform")
    else:
        raise ValueError("Unrecognized initializer: %s" % params.initializer)
models.py 文件源码 项目:rl-server 作者: parilo 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, input_sizes, output_size, scope):
        """Cretes a neural network layer."""
        if type(input_sizes) != list:
            input_sizes = [input_sizes]

        self.input_sizes = input_sizes
        self.output_size = output_size
        self.scope       = scope or "Layer"

        with tf.variable_scope(self.scope):
            self.Ws = []
            get_W_index = 0;
            for input_idx, input_size in enumerate(input_sizes):
                W_name = "W_%d" % (input_idx,)
                W_initializer =  tf.random_uniform_initializer(
                        -0.003, 0.003)
                W_var = tf.get_variable(W_name, (input_size, output_size), initializer=W_initializer)
                get_W_index += 1
                self.Ws.append(W_var)
            self.b = tf.get_variable("b", (output_size,), initializer=tf.constant_initializer(0))
network_ops.py 文件源码 项目:DeepRL 作者: arnomoonens 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    """
    2-dimensional convolutional layer.
    Source: https://github.com/openai/universe-starter-agent/blob/a3fdfba297c8c24d62d3c53978fb6fb26f80e76e/model.py
    """

    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b
model.py 文件源码 项目:FeatureControlHRL 作者: Nat-D 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        # Use dropout here to help prevent overfitting?
        #return tf.nn.dropout(tf.nn.conv2d(x, w, stride_shape, pad) + b, keep_prob=0.7, name='dropout_%s' % name)
        # Turn out it is better without dropout
        return tf.nn.conv2d(x, w, stride_shape, pad) + b
mean_pooling.py 文件源码 项目:adascan_public 作者: amlankar 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_fc_layer(name,size):
    '''
    name - Name to be added after W_ and b_ (can be any datatype convertible to str)
    size - [inp_size,out_size]
    tf.get_variable looks for variable name in current scope and returns it.
    If not found, it uses the initializer
    '''
    with tf.device('/cpu:0'):
        W = tf.get_variable('W_'+str(name),
                            shape=[size[0],size[1]],
                            initializer=tf.contrib.layers.xavier_initializer(uniform=True,
                                                                             seed=None,
                                                                             dtype=tf.float32))
        b = tf.get_variable('b_'+str(name),
                            shape=[size[1]],
                            initializer=tf.random_uniform_initializer(-model_options['init_scale'],\
                                                                      model_options['init_scale']))
    return W,b
adascan.py 文件源码 项目:adascan_public 作者: amlankar 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_fc_layer(name,size):
    '''
    name - Name to be added after W_ and b_ (can be any datatype convertible to str)
    size - [inp_size,out_size]
    tf.get_variable looks for variable name in current scope and returns it.
    If not found, it uses the initializer
    '''
    with tf.device('/cpu:0'):
        W = tf.get_variable('W_'+str(name),
                            shape=[size[0],size[1]],
                            initializer=tf.contrib.layers.xavier_initializer(uniform=True,
                                                                             seed=None,
                                                                             dtype=tf.float32))
        b = tf.get_variable('b_'+str(name),
                            shape=[size[1]],
                            initializer=tf.random_uniform_initializer(-model_options['init_scale'],\
                                                                      model_options['init_scale']))
    return W,b
decoder.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def _build(self, initial_state, helper):
        if not self.initial_state:
            self._setup(initial_state, helper)

        scope = tf.get_variable_scope()
        scope.set_initializer(tf.random_uniform_initializer(
            -self.params["init_scale"],
            self.params["init_scale"]))

        maximum_iterations = None
        if self.mode == tf.contrib.learn.ModeKeys.INFER:
            maximum_iterations = self.params["max_decode_length"]

        outputs, final_state = dynamic_decode(
            decoder=self,
            output_time_major=True,
            impute_finished=False,
            maximum_iterations=maximum_iterations)
        return self.finalize(outputs, final_state)
encoder.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def encode(self, inputs, sequence_length, **kwargs):
        scope = tf.get_variable_scope()
        scope.set_initializer(tf.random_uniform_initializer(
            -self.params["init_scale"],
            self.params["init_scale"]))

        cell = _get_rnn_cell(**self.params["rnn_cell"])
        outputs, state = tf.nn.dynamic_rnn(
            cell=cell,
            inputs=inputs,
            sequence_length=sequence_length,
            dtype=tf.float32,
            **kwargs)
        return EncoderOutput(
            outputs=outputs,
            final_state=state,
            attention_values=outputs,
            attention_values_length=sequence_length)
nn.py 文件源码 项目:image_captioning 作者: bityangke 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def weight(name, shape, init='he', range=1, stddev=0.33, init_val=None):
    if init_val is not None:
        initializer = tf.constant_initializer(init_val)
    elif init == 'uniform':
        initializer = tf.random_uniform_initializer(-range, range)
    elif init == 'normal':
        initializer = tf.random_normal_initializer(stddev = stddev)
    elif init == 'he':
        fan_in, _ = _get_dims(shape)
        std = math.sqrt(2.0 / fan_in)
        initializer = tf.random_normal_initializer(stddev = std)
    elif init == 'xavier':
        fan_in, fan_out = _get_dims(shape)
        range = math.sqrt(6.0 / (fan_in + fan_out))
        initializer = tf.random_uniform_initializer(-range, range)
    else:
        initializer = tf.truncated_normal_initializer(stddev = stddev)

    var = tf.get_variable(name, shape, initializer = initializer)
    tf.add_to_collection('l2', tf.nn.l2_loss(var))
    return var


问题


面经


文章

微信
公众号

扫码关注公众号