python类random_uniform_initializer()的实例源码

model.py 文件源码 项目:ai-copywriter 作者: ematvey 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _init_embeddings(self):
    with tf.variable_scope("embedding") as scope:

      # Uniform(-sqrt(3), sqrt(3)) has variance=1.
      sqrt3 = math.sqrt(3)
      initializer = tf.random_uniform_initializer(-sqrt3, sqrt3)

      self.embedding_matrix = tf.get_variable(
        name="embedding_matrix",
        shape=[self.vocab_size, self.embedding_size],
        initializer=initializer,
        dtype=tf.float32)

      self.encoder_inputs_embedded = embedding_lookup_unique(
        self.embedding_matrix, self.encoder_inputs)

      self.decoder_train_inputs_embedded = embedding_lookup_unique(
        self.embedding_matrix, self.decoder_train_inputs)
value_function.py 文件源码 项目:parallel-trpo 作者: kvfrans 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_net(self, shape):
        hidden_size = 64
        print(shape)
        self.x = tf.placeholder(tf.float32, shape=[None, shape], name="x")
        self.y = tf.placeholder(tf.float32, shape=[None], name="y")

        weight_init = tf.random_uniform_initializer(-0.05, 0.05)
        bias_init = tf.constant_initializer(0)

        with tf.variable_scope("VF"):
            h1 = tf.nn.relu(fully_connected(self.x, shape, hidden_size, weight_init, bias_init, "h1"))
            h2 = tf.nn.relu(fully_connected(h1, hidden_size, hidden_size, weight_init, bias_init, "h2"))
            h3 = fully_connected(h2, hidden_size, 1, weight_init, bias_init, "h3")
        self.net = tf.reshape(h3, (-1,))
        l2 = tf.nn.l2_loss(self.net - self.y)
        self.train = tf.train.AdamOptimizer().minimize(l2)
        self.session.run(tf.initialize_all_variables())
model.py 文件源码 项目:noreward-rl 作者: pathak22 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b
model.py 文件源码 项目:noreward-rl 作者: pathak22 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def deconv2d(x, out_shape, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None, prevNumFeat=None):
    with tf.variable_scope(name):
        num_filters = out_shape[-1]
        prevNumFeat = int(x.get_shape()[3]) if prevNumFeat is None else prevNumFeat
        stride_shape = [1, stride[0], stride[1], 1]
        # transpose_filter : [height, width, out_channels, in_channels]
        filter_shape = [filter_size[0], filter_size[1], num_filters, prevNumFeat]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:2]) * prevNumFeat
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width"
        fan_out = np.prod(filter_shape[:3])
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        deconv2d = tf.nn.conv2d_transpose(x, w, tf.pack(out_shape), stride_shape, pad)
        # deconv2d = tf.reshape(tf.nn.bias_add(deconv2d, b), deconv2d.get_shape())
        return deconv2d
model.py 文件源码 项目:universe-starter-agent 作者: openai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b
model.py 文件源码 项目:Attention_Based_LSTM_AspectBased_SA 作者: gangeshwark 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _init_aspect_embeddings(self):
        with tf.variable_scope("AspectEmbedding") as scope:
            self.input_shape = tf.shape(self.inputs)
            # Uniform(-sqrt(3), sqrt(3)) has variance=1.
            sqrt3 = tf.sqrt(3.0)
            initializer = tf.random_uniform_initializer(-sqrt3, sqrt3)

            """self.aspect_embedding_matrix = tf.get_variable(
                name="aspect_embedding_matrix",
                shape=[self.aspect_vocab_size, self.aspect_embedding_size],
                initializer=initializer,
                dtype=tf.float32)"""
            self.aspect_embedding_matrix = tf.Variable(
                tf.constant(0.0, shape=[self.aspect_vocab_size, self.aspect_embedding_size]),
                trainable=False, name="aspect_embedding_matrix")
            self.aspect_embedding_placeholder = tf.placeholder(tf.float32,
                                                               [self.aspect_vocab_size, self.aspect_embedding_size])
            self.aspect_embedding_init = self.aspect_embedding_matrix.assign(self.aspect_embedding_placeholder)

            self.input_aspect_embedded = tf.nn.embedding_lookup(
                self.aspect_embedding_matrix, self.input_aspect)  # -> [batch_size, da]
            s = tf.shape(self.input_aspect_embedded)
            self.input_aspect_embedded_final = tf.tile(tf.reshape(self.input_aspect_embedded, (s[0], -1, s[1])),
                                                       (1, self.input_shape[1], 1))  # -> [batch_size, N, da]
layers.py 文件源码 项目:deepsleepnet 作者: akaraspt 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(
        self,
        net_encode_in = None,
        net_decode_in = None,
        cell_fn = None,#tf.nn.rnn_cell.LSTMCell,
        cell_init_args = {'state_is_tuple':True},
        n_hidden = 256,
        initializer = tf.random_uniform_initializer(-0.1, 0.1),
        in_sequence_length = None,
        out_sequence_length = None,
        initial_state = None,
        dropout = None,
        n_layer = 1,
        # return_last = False,
        return_seq_2d = False,
        name = 'peeky_seq2seq',
    ):
        Layer.__init__(self, name=name)
        if cell_fn is None:
            raise Exception("Please put in cell_fn")
        # self.inputs = layer.outputs
        print("  [TL] PeekySeq2seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" %
              (self.name, n_hidden, cell_fn.__name__, dropout, n_layer))
layers.py 文件源码 项目:deepsleepnet 作者: akaraspt 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(
        self,
        net_encode_in = None,
        net_decode_in = None,
        cell_fn = None,#tf.nn.rnn_cell.LSTMCell,
        cell_init_args = {'state_is_tuple':True},
        n_hidden = 256,
        initializer = tf.random_uniform_initializer(-0.1, 0.1),
        in_sequence_length = None,
        out_sequence_length = None,
        initial_state = None,
        dropout = None,
        n_layer = 1,
        # return_last = False,
        return_seq_2d = False,
        name = 'attention_seq2seq',
    ):
        Layer.__init__(self, name=name)
        if cell_fn is None:
            raise Exception("Please put in cell_fn")
        # self.inputs = layer.outputs
        print("  [TL] PeekySeq2seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" %
              (self.name, n_hidden, cell_fn.__name__, dropout, n_layer))

## Shape layer
nn.py 文件源码 项目:image_captioning 作者: DeepRNN 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def weight(name, shape, init='he', range=0.1, stddev=0.01, init_val=None, group_id=0):
    """ Get a weight variable. """
    if init_val != None:
        initializer = tf.constant_initializer(init_val)
    elif init == 'uniform':
        initializer = tf.random_uniform_initializer(-range, range)
    elif init == 'normal':
        initializer = tf.random_normal_initializer(stddev=stddev)
    elif init == 'he':
        fan_in, _ = _get_dims(shape)
        std = math.sqrt(2.0 / fan_in)
        initializer = tf.random_normal_initializer(stddev=std)
    elif init == 'xavier':
        fan_in, fan_out = _get_dims(shape)
        range = math.sqrt(6.0 / (fan_in + fan_out))
        initializer = tf.random_uniform_initializer(-range, range)
    else:
        initializer = tf.truncated_normal_initializer(stddev = stddev)

    var = tf.get_variable(name, shape, initializer = initializer)
    tf.add_to_collection('l2_'+str(group_id), tf.nn.l2_loss(var))
    return var
hier_rnn_model.py 文件源码 项目:seqGan_chatbot 作者: zpppy 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def main(_):
    with tf.Session() as sess:
        query = [[1],[2],[3],[4],[5]]
        answer = [[6],[7],[8],[9],[0],[0],[0],[0],[0],[0]]
        target = [1]
        config = Config
        initializer = tf.random_uniform_initializer(-1 * config.init_scale, 1 * config.init_scale)
        with tf.variable_scope(name_or_scope="rnn_model", initializer=initializer):
            model = Hier_rnn_model(config, name_scope=config.name_model)
            sess.run(tf.global_variables_initializer())
        input_feed = {}
        for i in range(config.buckets[0][0]):
            input_feed[model.query[i].name] = query[i]
        for i in range(config.buckets[0][1]):
            input_feed[model.answer[i].name] = answer[i]
        input_feed[model.target.name] = target

        fetches = [model.b_train_op[0], model.b_query_state[0],  model.b_state[0], model.b_logits[0]]

        train_op, query, state, logits = sess.run(fetches=fetches, feed_dict=input_feed)

        print("query: ", np.shape(query))

    pass
rnn_decoder.py 文件源码 项目:automatic-summarization 作者: mozilla 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _build(self, initial_state, helper):
    if not self.initial_state:
      self._setup(initial_state, helper)

    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    maximum_iterations = None
    if self.mode == tf.contrib.learn.ModeKeys.INFER:
      maximum_iterations = self.params["max_decode_length"]

    outputs, final_state = dynamic_decode(
        decoder=self,
        output_time_major=True,
        impute_finished=False,
        maximum_iterations=maximum_iterations)
    return self.finalize(outputs, final_state)
rnn_encoder.py 文件源码 项目:automatic-summarization 作者: mozilla 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, state = tf.nn.dynamic_rnn(
        cell=cell,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)
    return EncoderOutput(
        outputs=outputs,
        final_state=state,
        attention_values=outputs,
        attention_values_length=sequence_length)
rnn_encoder.py 文件源码 项目:automatic-summarization 作者: mozilla 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, states = tf.nn.bidirectional_dynamic_rnn(
        cell_fw=cell_fw,
        cell_bw=cell_bw,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)

    # Concatenate outputs and states of the forward and backward RNNs
    outputs_concat = tf.concat(outputs, 2)

    return EncoderOutput(
        outputs=outputs_concat,
        final_state=states,
        attention_values=outputs_concat,
        attention_values_length=sequence_length)
misc.py 文件源码 项目:ray 作者: ray-project 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME",
           dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]),
                        num_filters]

        # There are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit.
        fan_in = np.prod(filter_shape[:3])
        # Each unit in the lower layer receives a gradient from: "num output
        # feature maps * filter height * filter width" / pooling size.
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # Initialize weights with random weights.
        w_bound = np.sqrt(6 / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype,
                            tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters],
                            initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b
trainer.py 文件源码 项目:THUMT 作者: thumt 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_initializer(params):
    if params.initializer == "uniform":
        max_val = params.initializer_gain
        return tf.random_uniform_initializer(-max_val, max_val)
    elif params.initializer == "normal":
        return tf.random_normal_initializer(0.0, params.initializer_gain)
    elif params.initializer == "normal_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="normal")
    elif params.initializer == "uniform_unit_scaling":
        return tf.variance_scaling_initializer(params.initializer_gain,
                                               mode="fan_avg",
                                               distribution="uniform")
    else:
        raise ValueError("Unrecognized initializer: %s" % params.initializer)
a3C.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def v(self):
        with tf.variable_scope('critic'):
            w_i = tf.random_uniform_initializer(0., 0.1)
            b_i = tf.zeros_initializer()
            with tf.variable_scope('dense1'):
                dense1 = dense(self.state_input, 100, [100], w_i, activation=tf.nn.relu6)
            with tf.variable_scope('dense2'):
                dense2 = dense(dense1, 1, [1], w_i, b_i, activation=None)
            return dense2

    # Note: We need 2 return value here: mu & sigma. So it is not suitable to use lazy_property.
a3C.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_mu_sigma(self):
        with tf.variable_scope('actor'):
            w_i = tf.random_uniform_initializer(0., 0.1)
            dense1 = dense(self.state_input, 200, None, w_i, None, activation=tf.nn.relu6)
            with tf.variable_scope('mu'):
                mu = dense(dense1, self.action_dim, None, w_i, None, activation=tf.nn.tanh)
            with tf.variable_scope('sigma'):
                sigma = dense(dense1, self.action_dim, None, w_i, None, activation=tf.nn.softplus)
            # return mu * self.config.ACTION_BOUND[1], sigma + 1e-4
            return mu, sigma + 1e-4
a3C.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def a_prob(self):
        with tf.variable_scope('actor'):
            w_i = tf.random_uniform_initializer(0., 0.1)
            b_i = tf.zeros_initializer()
            with tf.variable_scope('dense1'):
                dense1 = dense(self.state_input, 200, None, w_i, b_i, activation=tf.nn.relu6)
            with tf.variable_scope('dense2'):
                dense2 = dense(dense1, self.action_dim, None, w_i, b_i, activation=tf.nn.softmax)
            return dense2
tf_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
           summary_tag=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = intprod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = intprod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
                            collections=collections)

        if summary_tag is not None:
            tf.summary.image(summary_tag,
                             tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
                                          [2, 0, 1, 3]),
                             max_images=10)

        return tf.nn.conv2d(x, w, stride_shape, pad) + b
seq2seq_model.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def source_embedding(self):
    """Returns the embedding used for the source sequence.
    """
    return tf.get_variable(
        name="W",
        shape=[self.source_vocab_info.total_size, self.params["embedding.dim"]],
        initializer=tf.random_uniform_initializer(
            -self.params["embedding.init_scale"],
            self.params["embedding.init_scale"]))


问题


面经


文章

微信
公众号

扫码关注公众号