def lstm_memory_train(X_train_list,y_train,vocab_size):
N=len(X_train_list)
X_train_list = [sequence.pad_sequences(x_train, maxlen=MAX_LEN) for x_train in X_train_list]
input_list=[]
out_list=[]
for i in range(N):
input,out=get_embedding_input_output('f%d' %i,vocab_size)
input_list.append(input)
out_list.append(out)
x = merge(out_list,mode='concat')
lstm_out = LSTM(HIDDEN_SIZE, return_sequences=True)(x)
lstm_share=GRU(HIDDEN_SIZE, return_sequences=True)
x = lstm_out
for i in range(2):
att = TimeDistributed(Dense(1))(x)
att = Flatten()(att)
att = Activation(activation="softmax")(att)
att = RepeatVector(HIDDEN_SIZE)(att)
att = Permute((2,1))(att)
mer = merge([att, lstm_out], "mul")
mer = merge([mer, out_list[-1]], 'mul')
z = merge([lstm_out,mer],'sum')
z = lstm_share(z)
x = z
hid = AveragePooling1D(pool_length=2)(x)
hid = Flatten()(hid)
#hid = merge([hid,out_list[-1]], mode='concat')
main_loss = Dense(1, activation='sigmoid', name='main_output')(hid)
model = Model(input=input_list, output=main_loss)
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(X_train_list, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS)
return model
评论列表
文章目录