def createLoss(self, x_in_indices, y_in, length):
"""perform loss calculation for one bin """
# create mask
mask_1 = tf.cast(tf.equal(x_in_indices, 0), tf.float32)
mask_2 = tf.cast(tf.equal(y_in, 0), tf.float32)
mask = tf.stack([1.0-mask_1*mask_2]*self.num_units,axis=2)
# the input layer
x_in = tf.one_hot(x_in_indices, self.n_input, dtype=tf.float32)
cur = self.conv_linear(x_in, 1, self.n_input, self.num_units, 0.0, "input")
cur = self.hard_tanh(cur, length)
cur = self.dropout(cur)
cur*=mask
allMem = [cur] #execution trace
#computation steps
with vs.variable_scope("steps") as gruScope:
for i in range(length):
cur = self.DCGRU(cur, 3, "dcgru")
cur *= mask
allMem.append(cur)
gruScope.reuse_variables()
# output layer and loss
allMem_tensor = tf.stack(allMem)
prediction = self.conv_linear(cur, 1, self.num_units, self.n_classes, 0.0, "output")
costVector = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = prediction, labels = y_in) # Softmax loss
result = tf.argmax(prediction, 2)
correct_pred = tf.equal(result, y_in)
perItemCost = tf.reduce_mean(costVector, (1))
cost = tf.reduce_mean(perItemCost)
correct_pred = tf.cast(correct_pred, tf.float32)
accuracy = tf.reduce_mean(correct_pred)
return cost, accuracy, allMem_tensor, prediction, perItemCost, result
评论列表
文章目录