def question_attn_vector(question_encoding, question_mask, context_encoding, repeat=True):
"""Attention over question."""
question_attention_vector = TimeDistributed(Dense(1))(question_encoding)
# apply masking
question_attention_vector = Lambda(lambda q: masked_softmax(q[0], q[1]))([question_attention_vector, question_mask])
# apply the attention
question_attention_vector = Lambda(lambda q: q[0] * q[1])([question_encoding, question_attention_vector])
question_attention_vector = Lambda(lambda q: K.sum(q, axis=1))(question_attention_vector)
if repeat==True:
question_attention_vector = Lambda(lambda q: repeat_vector(q[0], q[1]))([question_attention_vector, context_encoding])
return question_attention_vector
评论列表
文章目录