python类matmul()的实例源码

actor_net.py 文件源码 项目:ddpg-aigym 作者: stevenpjg 项目源码 文件源码 阅读 58 收藏 0 点赞 0 评论 0
def create_actor_net(self, num_states=4, num_actions=1):
        """ Network that takes states and return action """
        N_HIDDEN_1 = 400
        N_HIDDEN_2 = 300
        actor_state_in = tf.placeholder("float",[None,num_states])    
        W1_a=tf.Variable(tf.random_uniform([num_states,N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states)))
        B1_a=tf.Variable(tf.random_uniform([N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states)))
        W2_a=tf.Variable(tf.random_uniform([N_HIDDEN_1,N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1),1/math.sqrt(N_HIDDEN_1)))
        B2_a=tf.Variable(tf.random_uniform([N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1),1/math.sqrt(N_HIDDEN_1)))
        W3_a=tf.Variable(tf.random_uniform([N_HIDDEN_2,num_actions],-0.003,0.003))
        B3_a=tf.Variable(tf.random_uniform([num_actions],-0.003,0.003))

        H1_a=tf.nn.softplus(tf.matmul(actor_state_in,W1_a)+B1_a)
        H2_a=tf.nn.tanh(tf.matmul(H1_a,W2_a)+B2_a)
        actor_model=tf.matmul(H2_a,W3_a) + B3_a
        return W1_a, B1_a, W2_a, B2_a, W3_a, B3_a, actor_state_in, actor_model
ops.py 文件源码 项目:TAC-GAN 作者: dashayushman 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def attention(decoder_output, seq_outputs, output_size, time_steps,
              name="attention"):

    with tf.variable_scope(name):
        ui = []
        w_1 = tf.get_variable("w1", [output_size, output_size],
                              tf.float32,
                              tf.contrib.layers.xavier_initializer())
        w_2 = tf.get_variable("w2", [output_size, output_size],
                              tf.float32,
                              tf.contrib.layers.xavier_initializer())
        v = tf.get_variable("v", [output_size, 1],
                              tf.float32,
                              tf.contrib.layers.xavier_initializer())
        for seq_out in seq_outputs:
            ui.append(tf.matmul(tf.nn.tanh(tf.matmul(seq_out, w_1) +
                                    tf.matmul(decoder_output, w_2)), v))

        return ui
querysum_model.py 文件源码 项目:querysum 作者: helmertz 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _score(self, prev_decoder_state, prev_embedding):
        # Returns scores in a tensor of shape [batch_size, input_sequence_length]

        if self.mode == 'decode':
            query_part = self.query_attention_partial_score_placeholder
            encoder_part = self.encoder_state_attention_partial_scores_placeholder
        else:
            query_part = self.query_attention_partial_score
            encoder_part = self.encoder_state_attention_partial_scores

        embedding_part = tf.matmul(prev_embedding, self.attention_w_e)

        output = tf.matmul(prev_decoder_state,
                           self.attention_w) + embedding_part + query_part + encoder_part + self.attention_b
        output = tf.tanh(output)
        output = tf.reduce_sum(self.attention_v * output, axis=2)
        output = tf.transpose(output, [1, 0])

        # Handle input document padding by giving a large penalty, eliminating it from the weighted average
        padding_penalty = -1e20 * tf.to_float(1 - tf.sign(self.documents_placeholder))
        masked = output + padding_penalty

        return masked
model.py 文件源码 项目:Seq2Seq-Tensorflow 作者: keon 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def build_model(self):
        self.build_memory()

        self.W = tf.Variable(tf.random_normal([self.edim, self.nwords], stddev=self.init_std))
        z = tf.matmul(self.hid[-1], self.W)

        self.loss = tf.nn.softmax_cross_entropy_with_logits(z, self.target)

        self.lr = tf.Variable(self.current_lr)
        self.opt = tf.train.GradientDescentOptimizer(self.lr)

        params = [self.A, self.B, self.C, self.T_A, self.T_B, self.W]
        grads_and_vars = self.opt.compute_gradients(self.loss,params)
        clipped_grads_and_vars = [(tf.clip_by_norm(gv[0], self.max_grad_norm), gv[1]) \
                                   for gv in grads_and_vars]

        inc = self.global_step.assign_add(1)
        with tf.control_dependencies([inc]):
            self.optim = self.opt.apply_gradients(clipped_grads_and_vars)

        tf.initialize_all_variables().run()
        self.saver = tf.train.Saver()
widgets.py 文件源码 项目:photinia 作者: XoriieInpottn 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _setup(self, x, prev_state, prev_output):
        """Setup the cell.

        :param x: Input tensor.
        :param prev_state: Previous cell state tensor.
        :param prev_output: Previous cell output tensor.
        :return: Tuple of cell state and cell output tensors.
        """
        # Input gate.
        i = tf.nn.sigmoid(tf.matmul(x, self._wi) + tf.matmul(prev_output, self._ui) + self._bi)
        # Forget gate.
        f = tf.nn.sigmoid(tf.matmul(x, self._wf) + tf.matmul(prev_output, self._uf) + self._bf)
        # Output gate.
        o = tf.nn.sigmoid(tf.matmul(x, self._wo) + tf.matmul(prev_output, self._uo) + self._bo)
        # Output and state.
        lin_state = tf.matmul(x, self._wc) + tf.matmul(prev_output, self._uc) + self._bc
        state = self._activation(lin_state) if self._activation is not None else lin_state
        state = f * prev_state + i * state
        output = o * state
        return state, output
custom_ops.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __call__(self, input_layer, output_size, scope=None, in_dim=None, stddev=0.02, bias_start=0.0):
        shape = input_layer.shape
        input_ = input_layer.tensor
        try:
            if len(shape) == 4:
                input_ = tf.reshape(input_, tf.pack([tf.shape(input_)[0], np.prod(shape[1:])]))
                input_.set_shape([None, np.prod(shape[1:])])
                shape = input_.get_shape().as_list()

            with tf.variable_scope(scope or "Linear"):
                matrix = self.variable("Matrix", [in_dim or shape[1], output_size], dt=tf.float32,
                                       init=tf.random_normal_initializer(stddev=stddev))
                bias = self.variable("bias", [output_size], init=tf.constant_initializer(bias_start))
                return input_layer.with_tensor(tf.matmul(input_, matrix) + bias, parameters=self.vars)
        except Exception:
            import ipdb; ipdb.set_trace()
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def setupOutput(self):
        if len(self.input.get_shape()) > 2:
            input = tf.reshape(self.input,[-1,self.inputShape]) # flatten reduced image into a vector
        else:
            input = self.input
        self.output = tf.matmul(input,self.W)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def setupOutput(self):
        if len(self.input.get_shape()) > 2:
            input = tf.reshape(self.input,[-1,self.inputShape]) # flatten reduced image into a vector
        else:
            input = self.input
        self.output = tf.nn.softmax(tf.matmul(input,self.W) + self.b)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setupOutput(self):
        if len(self.input.get_shape()) > 2:
            input = tf.reshape(self.input,[-1,self.inputShape]) # flatten reduced image into a vector
        else:
            input = self.input
        self.output = tf.nn.relu(tf.matmul(input,self.W) + self.b)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def setupOutput(self):
        if len(self.input.get_shape()) > 2:
            input = tf.reshape(self.input,[-1,self.inputShape]) # flatten reduced image into a vector
        else:
            input = self.input
        self.output = tf.matmul(input,self.W)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def setupOutput(self):
        if len(self.input.get_shape()) > 2:
            input = tf.reshape(self.input,[-1,self.inputShape]) # flatten reduced image into a vector
        else:
            input = self.input
        self.output = tf.nn.softmax(tf.matmul(input,self.W) + self.b)
myUtils.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def noisy_dense(inputs, units, bias_shape, c_names, w_i, b_i=None, activation=tf.nn.relu, noisy_distribution='factorised'):
    def f(e_list):
        return tf.multiply(tf.sign(e_list), tf.pow(tf.abs(e_list), 0.5))
    # ??tf.layers?????flatten
    # dense1 = tf.layers.dense(tf.contrib.layers.flatten(relu5), activation=tf.nn.relu, units=50)
    if not isinstance(inputs, ops.Tensor):
        inputs = ops.convert_to_tensor(inputs, dtype='float')
        # dim_list = inputs.get_shape().as_list()
        # flatten_shape = dim_list[1] if len(dim_list) <= 2 else reduce(lambda x, y: x * y, dim_list[1:])
        # reshaped = tf.reshape(inputs, [dim_list[0], flatten_shape])
    if len(inputs.shape) > 2:
        inputs = tf.contrib.layers.flatten(inputs)
    flatten_shape = inputs.shape[1]
    weights = tf.get_variable('weights', shape=[flatten_shape, units], initializer=w_i)
    w_noise = tf.get_variable('w_noise', [flatten_shape, units], initializer=w_i, collections=c_names)
    if noisy_distribution == 'independent':
        weights += tf.multiply(tf.random_normal(shape=w_noise.shape), w_noise)
    elif noisy_distribution == 'factorised':
        noise_1 = f(tf.random_normal(tf.TensorShape([flatten_shape, 1]), dtype=tf.float32))  # ???????????????
        noise_2 = f(tf.random_normal(tf.TensorShape([1, units]), dtype=tf.float32))
        weights += tf.multiply(noise_1 * noise_2, w_noise)
    dense = tf.matmul(inputs, weights)
    if bias_shape is not None:
        assert bias_shape[0] == units
        biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i)
        b_noise = tf.get_variable('b_noise', [1, units], initializer=b_i, collections=c_names)
        if noisy_distribution == 'independent':
            biases += tf.multiply(tf.random_normal(shape=b_noise.shape), b_noise)
        elif noisy_distribution == 'factorised':
            biases += tf.multiply(noise_2, b_noise)
        return activation(dense + biases) if activation is not None else dense + biases
    return activation(dense) if activation is not None else dense


# ???bias??????relu
twitter_pos.py 文件源码 项目:GELUs 作者: hendrycks 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def model(data_feed):
        h1 = f(tf.matmul(data_feed, w1) + b1)
        h1 = tf.cond(is_training, lambda: tf.nn.dropout(h1, p), lambda: h1)
        h2 = f(tf.matmul(h1, w2) + b2)
        h2 = tf.cond(is_training, lambda: tf.nn.dropout(h2, p), lambda: h2)
        return tf.matmul(h2, w_out) + b_out
timit_fcn.py 文件源码 项目:GELUs 作者: hendrycks 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def feedforward(x):
        h1 = f(tf.matmul(x, W['1']) + b['1'])
        h1 = tf.cond(is_training, lambda: tf.nn.dropout(h1, p), lambda: h1)
        h2 = f(tf.matmul(h1, W['2']) + b['2'])
        h2 = tf.cond(is_training, lambda: tf.nn.dropout(h2, p), lambda: h2)
        h3 = f(tf.matmul(h2, W['3']) + b['3'])
        h3 = tf.cond(is_training, lambda: tf.nn.dropout(h3, p), lambda: h3)
        h4 = f(tf.matmul(h3, W['4']) + b['4'])
        h4 = tf.cond(is_training, lambda: tf.nn.dropout(h4, p), lambda: h4)
        h5 = f(tf.matmul(h4, W['5']) + b['5'])
        h5 = tf.cond(is_training, lambda: tf.nn.dropout(h5, p), lambda: h5)

        return tf.matmul(h5, W['6']) + b['6']
tdlm_model.py 文件源码 项目:topically-driven-language-model 作者: jhlau 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_topics(self, sess, topn):
        topics = []
        entropy = []
        tw_dist = sess.run(tf.nn.softmax(tf.matmul(self.topic_output_embedding, self.tm_softmax_w) + self.tm_softmax_b))
        for ti in xrange(self.config.topic_number):
            best = matutils.argsort(tw_dist[ti], topn=topn, reverse=True)
            topics.append(best)
            entropy.append(scipy.stats.entropy(tw_dist[ti]))

        return topics, entropy

    #get top topics and words given a doc
gen_embeddings.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def make_skipgram_softmax_loss(embeddings_matrix, vocabulary_size, vector_size):
    vectors = tf.get_variable('vectors', (vocabulary_size, vector_size), dtype=tf.float32, initializer=tf.constant_initializer(embeddings_matrix))
    minibatch = tf.placeholder(shape=(None, 2), dtype=tf.int32)

    center_word_vector = tf.nn.embedding_lookup(vectors, minibatch[:,0])
    yhat = tf.matmul(center_word_vector, vectors, transpose_b=True)

    predict_word = minibatch[:,1]
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=predict_word, logits=yhat)
    loss = tf.reduce_mean(loss)
    return vectors, minibatch, loss
seq2seq_helpers.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def call(self, input):
        if self._space_transform:
            input = tf.matmul(input, self._space_transform)

        # input is batch by depth
        # self._against is output by depth
        # result is batch by output
        return tf.matmul(input, self._against, transpose_b=True)
seq2seq_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def add_decoder_op(self, enc_final_state, enc_hidden_states, output_embed_matrix, training):
        cell_dec = tf.contrib.rnn.MultiRNNCell([self.make_rnn_cell(i, True) for i in range(self.config.rnn_layers)])

        encoder_hidden_size = int(enc_hidden_states.get_shape()[-1])
        decoder_hidden_size = int(cell_dec.output_size)

        # if encoder and decoder have different sizes, add a projection layer
        if encoder_hidden_size != decoder_hidden_size:
            assert False, (encoder_hidden_size, decoder_hidden_size)
            with tf.variable_scope('hidden_projection'):
                kernel = tf.get_variable('kernel', (encoder_hidden_size, decoder_hidden_size), dtype=tf.float32)

                # apply a relu to the projection for good measure
                enc_final_state = nest.map_structure(lambda x: tf.nn.relu(tf.matmul(x, kernel)), enc_final_state)
                enc_hidden_states = tf.nn.relu(tf.tensordot(enc_hidden_states, kernel, [[2], [1]]))
        else:
            # flatten and repack the state
            enc_final_state = nest.pack_sequence_as(cell_dec.state_size, nest.flatten(enc_final_state))

        if self.config.connect_output_decoder:
            cell_dec = ParentFeedingCellWrapper(cell_dec, enc_final_state)
        else:
            cell_dec = InputIgnoringCellWrapper(cell_dec, enc_final_state)
        if self.config.apply_attention:
            attention = LuongAttention(self.config.decoder_hidden_size, enc_hidden_states, self.input_length_placeholder,
                                       probability_fn=tf.nn.softmax)
            cell_dec = AttentionWrapper(cell_dec, attention,
                                        cell_input_fn=lambda inputs, _: inputs,
                                        attention_layer_size=self.config.decoder_hidden_size,
                                        initial_cell_state=enc_final_state)
            enc_final_state = cell_dec.zero_state(self.batch_size, dtype=tf.float32)
        decoder = Seq2SeqDecoder(self.config, self.input_placeholder, self.input_length_placeholder,
                                 self.output_placeholder, self.output_length_placeholder, self.batch_number_placeholder)
        return decoder.decode(cell_dec, enc_final_state, self.config.grammar.output_size, output_embed_matrix, training)
nasm.py 文件源码 项目:variational-text-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_decoder(self):
    """Inference Network. p(X|h)"""
    with tf.variable_scope("decoder"):
      R = tf.get_variable("R", [self.reader.vocab_size, self.h_dim])
      b = tf.get_variable("b", [self.reader.vocab_size])

      x_i = tf.diag([1.]*self.reader.vocab_size)

      e = -tf.matmul(tf.matmul(self.h, R, transpose_b=True), x_i) + b
      self.p_x_i = tf.squeeze(tf.nn.softmax(e))


问题


面经


文章

微信
公众号

扫码关注公众号