11-lstm-tensorflow-char-pat.py 文件源码

python
阅读 34 收藏 0 点赞 0 评论 0

项目:albemarle 作者: SeanTater 项目源码 文件源码
def RNN(inputs, lens, name, reuse):
    print ("Building network " + name)
    # Define weights
    inputs = tf.gather(one_hots, inputs)
    weights = tf.Variable(tf.random_normal([__n_hidden, n_output]), name=name+"_weights")
    biases = tf.Variable(tf.random_normal([n_output]), name=name+"_biases")

    # Define a lstm cell with tensorflow

    outputs, states = rnn.dynamic_rnn(
        __cell_kind(__n_hidden),
        inputs,
        sequence_length=lens,
        dtype=tf.float32,
        scope=name,
        time_major=False)

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (__batch_size, __n_steps, n_input)
    # Required shape: '__n_steps' tensors list of shape (__batch_size, n_input)

    '''outputs, states = rnn.rnn(
        __cell_kind(__n_hidden),
        tf.unpack(tf.transpose(inputs, [1, 0, 2])),
        sequence_length=lens,
        dtype=tf.float32,
        scope=name)
    outputs = tf.transpose(tf.pack(outputs), [1, 0, 2])'''
    print ("Done building network " + name)

    # Asserts are actually documentation: they can't be out of date
    assert outputs.get_shape() == (__batch_size, __n_steps, __n_hidden)
    # Linear activation, using rnn output for each char
    # Reshaping here for a `batch` matrix multiply
    # It's faster than `batch_matmul` probably because it can guarantee a
    # static shape
    outputs = tf.reshape(outputs, [__batch_size * __n_steps, __n_hidden])
    finals = tf.matmul(outputs, weights)
    return tf.reshape(finals, [__batch_size, __n_steps, n_output]) + biases

# tf Graph input
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号