def exe_maxru(length, num_units, position, binominal):
batch_size = BATCH_SIZE
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
target_var = T.ivector(name='targets')
layer_input = lasagne.layers.InputLayer(shape=(None, length, 1), input_var=input_var, name='input')
time_updategate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None)
time_update = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
b=lasagne.init.Constant(0.), nonlinearity=nonlinearities.tanh)
resetgate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
updategate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
hiden_update = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
b=lasagne.init.Constant(0.), nonlinearity=nonlinearities.tanh)
layer_taru = MAXRULayer(layer_input, num_units, max_length=length,
P_time=lasagne.init.GlorotUniform(), nonlinearity=nonlinearities.tanh,
resetgate=resetgate, updategate=updategate, hidden_update=hiden_update,
time_updategate=time_updategate, time_update=time_update,
only_return_final=True, name='MAXRU', p=0.)
# W = layer_taru.W_hid_to_hidden_update.sum()
# U = layer_taru.W_in_to_hidden_update.sum()
# b = layer_taru.b_hidden_update.sum()
layer_output = DenseLayer(layer_taru, num_units=1, nonlinearity=nonlinearities.sigmoid, name='output')
return train(layer_output, input_var, target_var, batch_size, length, position, binominal)
评论列表
文章目录