def __init__(
self,
n_components,
alpha_activation=None,
beta_activation=None,
kappa_activation=None,
*args,
**kwargs
):
super(GravesSequenceAttention, self).__init__(*args, **kwargs)
self.distribution = AlexGravesSequenceAttentionParams(
n_components,
alpha_activation,
beta_activation,
kappa_activation,
)
self._attention_states = [None, None]
self._attention_state_spec = [
InputSpec(ndim=2), # attention (tm1)
InputSpec(shape=(None, 1)) # kappa
]
评论列表
文章目录