def attention_step(
self,
attended,
attention_states,
step_input,
recurrent_states
):
[attention_tm1, kappa_tm1] = attention_states
params = self.params_layer(
concatenate([step_input, recurrent_states[0]])
)
attention, kappa = self._get_attention_and_kappa(
attended,
params,
kappa_tm1
)
return attention, [attention, kappa]
评论列表
文章目录