python类BasicLSTMCell()的实例源码

critic_network.py 文件源码 项目:-NIPS-2017-Learning-to-Run 作者: kyleliang919 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def create_q_network(self,state_dim,action_dim,scope):
        # the layer size could be changed
        with tf.variable_scope(scope,reuse=False) as s:
            state_input = tf.placeholder("float",[None,None,state_dim])
            action_input = tf.placeholder("float",[None,None,action_dim])

            # creating the recurrent part
            lstm_cell=rnn.BasicLSTMCell(LSTM_HIDDEN_UNIT)
            lstm_output,lstm_state=tf.nn.dynamic_rnn(cell=lstm_cell,inputs=tf.concat([state_input,action_input],2),dtype=tf.float32)

            W3 = tf.Variable(tf.random_uniform([lstm_cell.output_size,1],-3e-3,3e-3))
            b3 = tf.Variable(tf.random_uniform([1],-3e-3,3e-3))
            q_value_output = tf.identity(tf.matmul(layer2,W3) + b3)
            net = [v for v in tf.trainable_variables() if scope in v.name]
        return state_input,action_input,q_value_output,net
07_recurrentNeuralNet(2).py 文件源码 项目:start_DeepLearning 作者: SONG-WONHO 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def lstm_cell():
    cell = rnn.BasicLSTMCell(hidden_dim, state_is_tuple= True)
    return cell

#stacked LSTM
07_recurrentNeuralNet(3).py 文件源码 项目:start_DeepLearning 作者: SONG-WONHO 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def lstm_cell():
    cell = rnn.BasicLSTMCell(hidden_dim, state_is_tuple=True, activation=tf.tanh)
    return cell

#stacked LSTM
cells.py 文件源码 项目:opinatt 作者: epochx 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, cell, zoneout_prob, is_training=True):
    if not isinstance(cell, RNNCell):
      raise TypeError("The parameter cell is not an RNNCell.")
    if isinstance(cell, BasicLSTMCell):
      self._tuple = lambda x: LSTMStateTuple(*x)
    else:
      self._tuple = lambda x: tuple(x)
    if (isinstance(zoneout_prob, float) and
          not (zoneout_prob >= 0.0 and zoneout_prob <= 1.0)):
      raise ValueError("Parameter zoneout_prob must be between 0 and 1: %d"
                       % zoneout_prob)
    self._cell = cell
    self._zoneout_prob = zoneout_prob
    self.is_training = is_training
1-9. RNN.py 文件源码 项目:Project101 作者: Wonjuseo 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def RNN(x, weights, biases):

    x = tf.unstack(x, n_steps, 1)
    # Define a lstm cell
    lstem_cell = rnn.BasicLSTMCell(n_hidden,forget_bias = 1.0)

    outputs, states = rnn.static_rnn(lstem_cell,x,dtype=tf.float32)

    return tf.matmul(outputs[-1],weights['out'])+biases['out']
similarity_model.py 文件源码 项目:nlvr_tau_nlp_final_proj 作者: udiNaveh 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def build_sentence_encoder(vocabulary_size, embeddings_matrix):
    """
    build the computational graph for the lstm sentence encoder. Return only the palceholders and tensors
    that are called from other methods
    """
    sentence_oh_placeholder = tf.placeholder(shape=[None, vocabulary_size], dtype=tf.float32,
                                             name="sentence_placeholder")
    word_embeddings_matrix = tf.get_variable("W_we",  # shape=[vocabulary_size, WORD_EMB_SIZE]
                                             initializer=tf.constant(embeddings_matrix, dtype=tf.float32))
    sentence_embedded = tf.expand_dims(tf.matmul(sentence_oh_placeholder, word_embeddings_matrix), 0)
    # placeholders for sentence and it's length
    sent_lengths = tf.placeholder(dtype=tf.int32, name="sent_length_placeholder")

    # Forward cell
    lstm_fw_cell = BasicLSTMCell(LSTM_HIDDEN_SIZE, forget_bias=1.0)
    # Backward cell
    lstm_bw_cell = BasicLSTMCell(LSTM_HIDDEN_SIZE, forget_bias=1.0)
    # stack cells together in RNN
    outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, sentence_embedded, sent_lengths,
                                                 dtype=tf.float32)
    #    outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`.
    #    both output_fw, output_bw will be a `Tensor` shaped: [batch_size, max_time, cell_fw.output_size]`

    # outputs is a (output_forward,output_backwards) tuple. concat them together to receive h vector
    lstm_outputs = tf.concat(outputs, 2)[0]  # shape: [max_time, 2 * hidden_layer_size ]
    final_fw = outputs[0][:, -1, :]
    final_bw = outputs[1][:, 0, :]
    e_m = tf.concat((final_fw, final_bw), axis=1)
    sentence_words_bow = tf.placeholder(tf.float32, [None, len(words_vocabulary)], name="sentence_words_bow")
    e_m_with_bow = tf.concat([e_m, sentence_words_bow], axis=1)

    return sentence_oh_placeholder, sent_lengths, sentence_words_bow, lstm_outputs, e_m_with_bow
seq2seq.py 文件源码 项目:nlvr_tau_nlp_final_proj 作者: udiNaveh 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build_sentence_encoder(vocabulary_size):
    """
    build the computational graph for the lstm sentence encoder. Return only the palceholders and tensors
    that are called from other methods
    """
    sentence_oh_placeholder = tf.placeholder(shape=[None, vocabulary_size], dtype=tf.float32,
                                             name="sentence_placeholder")
    word_embeddings_matrix = tf.get_variable("W_we",  # shape=[vocabulary_size, WORD_EMB_SIZE]
                                             initializer=tf.constant(embeddings_matrix, dtype=tf.float32))
    sentence_embedded = tf.expand_dims(tf.matmul(sentence_oh_placeholder, word_embeddings_matrix), 0)
    # placeholders for sentence and it's length
    sent_lengths = tf.placeholder(dtype=tf.int32, name="sent_length_placeholder")

    # Forward cell
    lstm_fw_cell = BasicLSTMCell(LSTM_HIDDEN_SIZE, forget_bias=1.0)
    # Backward cell
    lstm_bw_cell = BasicLSTMCell(LSTM_HIDDEN_SIZE, forget_bias=1.0)
    # stack cells together in RNN
    outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, sentence_embedded, sent_lengths,
                                                 dtype=tf.float32)
    #    outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`.
    #    both output_fw, output_bw will be a `Tensor` shaped: [batch_size, max_time, cell_fw.output_size]`

    # outputs is a (output_forward,output_backwards) tuple. concat them together to receive h vector
    lstm_outputs = tf.concat(outputs, 2)[0]  # shape: [max_time, 2 * hidden_layer_size ]
    final_fw = outputs[0][:, -1, :]
    final_bw = outputs[1][:, 0, :]
    e_m = tf.concat((final_fw, final_bw), axis=1)
    sentence_words_bow = tf.placeholder(tf.float32, [None, len(words_vocabulary)], name="sentence_words_bow")
    e_m_with_bow = tf.concat([e_m, sentence_words_bow], axis=1)

    return sentence_oh_placeholder, sent_lengths, sentence_words_bow, lstm_outputs, e_m_with_bow
    # TODO return sentence_oh_placeholder, sent_lengths, sentence_words_bow, lstm_outputs, e_m
seq2seq.py 文件源码 项目:nlvr_tau_nlp_final_proj 作者: udiNaveh 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_sentence_encoder2(vocabulary_size, embeddings_matrix):
    """
    build the computational graph for the lstm sentence encoder. Return only the palceholders and tensors
    that are called from other methods
    """
    sentence_oh_placeholder2 = tf.placeholder(shape=[None, vocabulary_size], dtype=tf.float32,
                                             name="sentence_placeholder")
    word_embeddings_matrix2 = tf.get_variable("W_we",  # shape=[vocabulary_size, WORD_EMB_SIZE]
                                             initializer=tf.constant(embeddings_matrix, dtype=tf.float32))
    sentence_embedded2 = tf.expand_dims(tf.matmul(sentence_oh_placeholder2, word_embeddings_matrix2), 0)
    # placeholders for sentence and it's length
    sent_lengths2 = tf.placeholder(dtype=tf.int32, name="sent_length_placeholder")

    # Forward cell
    lstm_fw_cell2 = BasicLSTMCell(LSTM_HIDDEN_SIZE, forget_bias=1.0)
    # Backward cell
    lstm_bw_cell2 = BasicLSTMCell(LSTM_HIDDEN_SIZE, forget_bias=1.0)
    # stack cells together in RNN
    outputs2, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell2, lstm_bw_cell2, sentence_embedded2, sent_lengths2,
                                                 dtype=tf.float32)
    #    outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`.
    #    both output_fw, output_bw will be a `Tensor` shaped: [batch_size, max_time, cell_fw.output_size]`

    # outputs is a (output_forward,output_backwards) tuple. concat them together to receive h vector
    lstm_outputs2 = tf.concat(outputs2, 2)[0]  # shape: [max_time, 2 * hidden_layer_size ]
    final_fw2 = outputs2[0][:, -1, :]
    final_bw2 = outputs2[1][:, 0, :]
    e_m2 = tf.concat((final_fw2, final_bw2), axis=1)
    sentence_words_bow2 = tf.placeholder(tf.float32, [None, len(words_vocabulary)], name="sentence_words_bow")
    e_m_with_bow2 = tf.concat([e_m2, sentence_words_bow2], axis=1)

    return sentence_oh_placeholder2, sent_lengths2, sentence_words_bow2, lstm_outputs2, e_m_with_bow2
LSTM_model_1.py 文件源码 项目:Deep-Learning-with-TensorFlow 作者: PacktPublishing 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def RNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
bidirectional_RNN_1.py 文件源码 项目:Deep-Learning-with-TensorFlow 作者: PacktPublishing 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def BiRNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
LSTM_model_1.py 文件源码 项目:Deep-Learning-with-TensorFlow 作者: PacktPublishing 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def RNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
bidirectional_RNN_1.py 文件源码 项目:Deep-Learning-with-TensorFlow 作者: PacktPublishing 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def BiRNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
lstm_predictior.py 文件源码 项目:LSTM-Time-Series-Analysis-using-Tensorflow 作者: pusj 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def lstm_model(time_steps, rnn_layers, dense_layers=None, learning_rate=0.01, optimizer='Adagrad',learning_rate_decay_fn = None): # [Ftrl, Adam, Adagrad, Momentum, SGD, RMSProp]
    print(time_steps)
    #exit(0)
    """
        Creates a deep model based on:
            * stacked lstm cells
            * an optional dense layers
        :param num_units: the size of the cells.
        :param rnn_layers: list of int or dict
                             * list of int: the steps used to instantiate the `BasicLSTMCell` cell
                             * list of dict: [{steps: int, keep_prob: int}, ...]
        :param dense_layers: list of nodes for each layer
        :return: the model definition
        """

    def lstm_cells(layers):
        print('-------------------------sdsdsdsdssd---------------------------------------------',layers)
        if isinstance(layers[0], dict):
            return [rnn.DropoutWrapper(rnn.BasicLSTMCell(layer['num_units'],state_is_tuple=True),layer['keep_prob'])
                    if layer.get('keep_prob')
                    else rnn.BasicLSTMCell(layer['num_units'], state_is_tuple=True)
                    for layer in layers]

        return [rnn.BasicLSTMCell(steps, state_is_tuple=True) for steps in layers]

    def dnn_layers(input_layers, layers):
        if layers and isinstance(layers, dict):
            return tflayers.stack(input_layers, tflayers.fully_connected,
                                  layers['layers'],
                                  activation=layers.get('activation'),
                                  dropout=layers.get('dropout'))
        elif layers:
            return tflayers.stack(input_layers, tflayers.fully_connected, layers)
        else:
            return input_layers

    def _lstm_model(X, y):
        stacked_lstm = rnn.MultiRNNCell(lstm_cells(rnn_layers), state_is_tuple=True)
        x_ =  tf.unstack(X, num=time_steps, axis=1)


        output, layers = rnn.static_rnn(stacked_lstm, x_, dtype=dtypes.float32)
        output = dnn_layers(output[-1], dense_layers)
        prediction, loss = tflearn.models.linear_regression(output, y)
        train_op = tf.contrib.layers.optimize_loss(
            loss, tf.contrib.framework.get_global_step(), optimizer=optimizer,
            learning_rate = tf.train.exponential_decay(learning_rate, tf.contrib.framework.get_global_step(), decay_steps = 1000, decay_rate = 0.9, staircase=False, name=None))

        print('learning_rate',learning_rate)
        return prediction, loss, train_op

    # https://www.tensorflow.org/versions/r0.10/api_docs/python/train/decaying_the_learning_rate

    return _lstm_model
feudal_network.py 文件源码 项目:tensorflow-rl 作者: steveKapturowski 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _build_lstm(self, input_state):
        initial_lstm_state = tf.placeholder(
            tf.float32, [None, 2*self.hidden_state_size], name='initital_state')
        lstm_cell = BasicLSTMCell(self.hidden_state_size, forget_bias=1.0, state_is_tuple=True)

        batch_size = tf.shape(self.step_size)[0]
        ox_reshaped = tf.reshape(input_state,
            batch_size, -1, input_state.get_shape().as_list()[-1]])

        lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
            lstm_cell,
            ox_reshaped,
            initial_state=initial_lstm_state,
            sequence_length=self.step_size,
            time_major=False)
feudal_network.py 文件源码 项目:tensorflow-rl 作者: steveKapturowski 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, inputs, initial_state, hidden_state_size
        ,max_steps, num_cores=10, pool_size=10):

        self.shared_cell = BasicLSTMCell(hidden_state_size)
        self.initial_state = initial_state
        self.max_steps = max_steps
        self.num_cores = num_cores
        self.pool_size = pool_size
        self.inputs = inputs
        self._build_ops()
model.py 文件源码 项目:universe-starter-agent 作者: openai 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, ob_space, ac_space):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
        x = tf.expand_dims(flatten(x), [0])

        size = 256
        if use_tf100_api:
            lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)
        else:
            lstm = rnn.rnn_cell.BasicLSTMCell(size, state_is_tuple=True)
        self.state_size = lstm.state_size
        step_size = tf.shape(self.x)[:1]

        c_init = np.zeros((1, lstm.state_size.c), np.float32)
        h_init = np.zeros((1, lstm.state_size.h), np.float32)
        self.state_init = [c_init, h_init]
        c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
        h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
        self.state_in = [c_in, h_in]

        if use_tf100_api:
            state_in = rnn.LSTMStateTuple(c_in, h_in)
        else:
            state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)
        lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
            lstm, x, initial_state=state_in, sequence_length=step_size,
            time_major=False)
        lstm_c, lstm_h = lstm_state
        x = tf.reshape(lstm_outputs, [-1, size])
        self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
        self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
        self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
lstm.py 文件源码 项目:MovieComment2Rating 作者: yaokai1117 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, sent_length, class_num,
                 embedding_size, initial_embedding_dict,
                 l2_lambda, hidden_size):

        self.input_x = tf.placeholder(tf.int32, [None, sent_length], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, class_num], name="input_y")
        self.dropout_keep_prob_1 = tf.placeholder(tf.float32, name="dropout_keep_prob_1")
        self.dropout_keep_prob_2 = tf.placeholder(tf.float32, name="dropout_keep_prob_2")

        l2_loss = tf.constant(0.0)

        with tf.name_scope("embedding"):
            self.embedding_dict = tf.Variable(initial_embedding_dict, name="Embedding", dtype=tf.float32)
            self.embedded_chars = tf.nn.embedding_lookup(self.embedding_dict, self.input_x)
            # unstack embedded input
            self.unstacked = tf.unstack(self.embedded_chars, sent_length, 1)

        with tf.name_scope("lstm"):
            # create a LSTM network
            lstm_cell = rnn.BasicLSTMCell(hidden_size)
            self.output, self.states = rnn.static_rnn(lstm_cell, self.unstacked, dtype=tf.float32)
            self.pooling = tf.reduce_mean(self.output, 0)

        with tf.name_scope("linear"):
            weights = tf.get_variable(
                "W",
                shape=[hidden_size, class_num],
                initializer=tf.contrib.layers.xavier_initializer())
            bias = tf.Variable(tf.constant(0.1, shape=[class_num]), name="b")
            l2_loss += tf.nn.l2_loss(weights)
            l2_loss += tf.nn.l2_loss(bias)
            self.linear_result = tf.nn.xw_plus_b(self.pooling, weights, bias, name="linear")
            self.predictions = tf.arg_max(self.linear_result, 1, name="predictions")

        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.linear_result, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_lambda * l2_loss

        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
lstm.py 文件源码 项目:ray 作者: ray-project 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _init(self, inputs, num_outputs, options):
        use_tf100_api = (distutils.version.LooseVersion(tf.VERSION) >=
                         distutils.version.LooseVersion("1.0.0"))

        self.x = x = inputs
        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # Introduce a "fake" batch dimension of 1 after flatten so that we can
        # do LSTM over the time dim.
        x = tf.expand_dims(flatten(x), [0])

        size = 256
        if use_tf100_api:
            lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)
        else:
            lstm = rnn.rnn_cell.BasicLSTMCell(size, state_is_tuple=True)
        step_size = tf.shape(self.x)[:1]

        c_init = np.zeros((1, lstm.state_size.c), np.float32)
        h_init = np.zeros((1, lstm.state_size.h), np.float32)
        self.state_init = [c_init, h_init]
        c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
        h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
        self.state_in = [c_in, h_in]

        if use_tf100_api:
            state_in = rnn.LSTMStateTuple(c_in, h_in)
        else:
            state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)
        lstm_out, lstm_state = tf.nn.dynamic_rnn(lstm, x,
                                                 initial_state=state_in,
                                                 sequence_length=step_size,
                                                 time_major=False)
        lstm_c, lstm_h = lstm_state
        x = tf.reshape(lstm_out, [-1, size])
        logits = linear(x, num_outputs, "action", normc_initializer(0.01))
        self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
        return logits, x
rnn.py 文件源码 项目:deeplearning 作者: fanfanfeng 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def RNN(x,weights,biases):
    #x = tf.transpose(x,[1,0,2])  #x = tf.unstack(x,n_steps,1)
   # x = tf.reshape(x, [-1, n_input])
    x = tf.unstack(x, n_steps, 1)
    lstm_cell = rnn.BasicLSTMCell(n_hidden,forget_bias=1.0)
    outputs,states = rnn.static_rnn(lstm_cell,x,dtype=tf.float32)

    return tf.matmul(outputs[-1],weights['out']) + biases['out']
biRNN.py 文件源码 项目:deeplearning 作者: fanfanfeng 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def BiRNN(x,weights,biases):
    x = tf.unstack(x,n_steps,1)
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden,forget_bias=1.0)
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden,forget_bias=1.0)

    outputs,_,_ = rnn.static_bidirectional_rnn(lstm_fw_cell,lstm_bw_cell,x,dtype=tf.float32)

    return tf.matmul(outputs[-1],weights['out']) + biases['out']


问题


面经


文章

微信
公众号

扫码关注公众号