def bilinear_answer_layer(size, encoded_question, question_length, encoded_support, support_length,
support2question, answer2support, is_eval, beam_size=1,
max_span_size=10000):
"""Answer layer for multiple paragraph QA."""
# computing single time attention over question
size = encoded_support.get_shape()[-1].value
question_state = compute_question_state(encoded_question, question_length)
# compute logits
hidden = tf.gather(tf.layers.dense(question_state, 2 * size, name="hidden"), support2question)
hidden_start, hidden_end = tf.split(hidden, 2, 1)
support_mask = misc.mask_for_lengths(support_length)
start_scores = tf.einsum('ik,ijk->ij', hidden_start, encoded_support)
start_scores = start_scores + support_mask
end_scores = tf.einsum('ik,ijk->ij', hidden_end, encoded_support)
end_scores = end_scores + support_mask
return compute_spans(start_scores, end_scores, answer2support, is_eval, support2question,
beam_size, max_span_size)
评论列表
文章目录