def RNN(X, num_hidden_layers):
# reshape to [1, n_input]
std_dev_He = np.sqrt(2 / np.prod(X.get_shape().as_list()[1:]))
X = tf.reshape(X, [-1, sequence_length* 8*8])
# Generate a n_input-element sequence of inputs
# (eg. [had] [a] [general] -> [20] [6] [33])
X = tf.split(X, sequence_length, 1)
# 1-layer LSTM with n_hidden units.
# rnn_cell = rnn.BasicLSTMCell(n_hidden)
with tf.variable_scope('RNN', tf.random_normal_initializer(mean=0.0, stddev=std_dev_He)): #tf.random_normal_initializer(mean=0.0, stddev=std_dev_He) #initializer=tf.contrib.layers.xavier_initializer()
# weights = {
# 'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
# }
# biases = {
# 'out': tf.Variable(tf.random_normal([num_classes]))
# }
weights = tf.get_variable(
name='weights',
shape=[num_hidden, num_classes], # 1 x 64 filter in, 1 class out
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(
name='biases',
shape=[num_classes],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
GRU_cell_layer = [rnn.GRUCell(num_hidden)]
# LSTM_cell_layer = [rnn.BasicLSTMCell(num_hidden, forget_bias=1)]
rnn_cell = rnn.MultiRNNCell(GRU_cell_layer * num_hidden_layers)
# generate prediction
outputs, states = rnn.static_rnn(rnn_cell, X, dtype=tf.float32)
# there are n_input outputs but
# we only want the last output
# return tf.matmul(outputs[-1], weights['out']) + biases['out']
return tf.matmul(outputs[-1], weights) + biases
评论列表
文章目录