def __init__(self, output_dim, memory_dim=128, memory_size=20,
controller_output_dim=100, location_shift_range=1,
num_read_head=1, num_write_head=1,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, R_regularizer=None,
b_regularizer=None, W_y_regularizer=None,
W_xi_regularizer=None, W_r_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(NTM, self).__init__(**kwargs)
评论列表
文章目录