def loss(self, actual_out):
"""
Compute the cross-entropy loss between the actual
output and the desired targets.
"""
cost_sum = None
for timestep, actual_term in enumerate(actual_out):
target_term = self.outputs[timestep]
log_probs = tf.log(tf.nn.softmax(actual_term))
loss = -tf.tensordot(log_probs, target_term, axes=2)
if cost_sum is None:
cost_sum = loss
else:
cost_sum += loss
return cost_sum / (self.batch * self.length)
评论列表
文章目录