def __init__(self, input_size, target_size, num_layers, hidden_layer_size,
init_scale, dropout_keep_prob):
""" A base class for LSTM models that includes predictions and loss.
Args:
input_size: An integer. The number of inputs per time step.
target_size: An integer. The dimensionality of the one-hot
encoded targets.
num_layers: An integer. The number of hidden layers.
hidden_layer_size: An integer. The number of hidden units per layer.
init_scale: A float. All weights will be initialized using a
uniform distribution over `[-init_scale, init_scale]`.
dropout_keep_prob: A float. The fraction of inputs to keep whenever
dropout is applied. Dropout is applied to the inputs/outputs
of each time step, and is never applied across time steps.
"""
self.input_size = input_size
self.target_size = target_size
self.num_layers = num_layers
self.hidden_layer_size = hidden_layer_size
self.init_scale = init_scale
self.dropout_keep_prob = dropout_keep_prob
self._inputs = tf.placeholder(
tf.float32, shape=[None, None, input_size], name='inputs')
self._resets = tf.placeholder(
tf.bool, shape=[None, None, 1], name='resets')
self._targets = tf.placeholder(
tf.float32, shape=[None, None, target_size], name='targets')
self._training = tf.placeholder(tf.bool, shape=[], name='training')
outputs = self._compute_rnn_outputs()
output_size = self._compute_rnn_output_size()
initializer = tf.random_uniform_initializer(-self.init_scale,
self.init_scale)
with tf.variable_scope('logits', initializer=initializer):
W = tf.get_variable('W', shape=[output_size, self.target_size])
b = tf.get_variable('b', shape=[self.target_size])
outputs_matrix = tf.reshape(outputs, [-1, output_size])
logits = tf.nn.xw_plus_b(outputs_matrix, W, b)
batch_size, duration, _ = tf.unpack(tf.shape(self.inputs))
logits_shape = tf.pack([batch_size, duration, self.target_size])
self._logits = tf.reshape(logits, logits_shape, name='logits')
with tf.variable_scope('loss'):
logits = tf.reshape(self.logits, [-1, self.target_size])
targets = tf.reshape(self.targets, [-1, self.target_size])
cross_entropies = tf.nn.softmax_cross_entropy_with_logits(logits,
targets)
self._loss = tf.reduce_mean(cross_entropies, name='loss')
models.py 文件源码
python
阅读 22
收藏 0
点赞 0
评论 0
评论列表
文章目录