def creat_binary_tag_LSTM( sourcevocabsize,targetvocabsize, source_W,input_seq_lenth ,output_seq_lenth ,
hidden_dim ,emd_dim,loss='categorical_crossentropy',optimizer = 'rmsprop'):
encoder_a = Sequential()
encoder_b = Sequential()
encoder_c = Sequential()
l_A_embedding = Embedding(input_dim=sourcevocabsize+1,
output_dim=emd_dim,
input_length=input_seq_lenth,
mask_zero=True,
weights=[source_W])
encoder_a.add(l_A_embedding)
encoder_a.add(Dropout(0.3))
encoder_b.add(l_A_embedding)
encoder_b.add(Dropout(0.3))
encoder_c.add(l_A_embedding)
Model = Sequential()
encoder_a.add(LSTM(hidden_dim,return_sequences=True))
encoder_b.add(LSTM(hidden_dim,return_sequences=True,go_backwards=True))
encoder_rb = Sequential()
encoder_rb.add(ReverseLayer2(encoder_b))
encoder_ab=Merge(( encoder_a,encoder_rb),mode='concat')
Model.add(encoder_ab)
decodelayer=LSTMDecoder_tag(hidden_dim=hidden_dim, output_dim=hidden_dim
, input_length=input_seq_lenth,
output_length=output_seq_lenth,
state_input=False,
return_sequences=True)
Model.add(decodelayer)
Model.add(TimeDistributedDense(targetvocabsize+1))
Model.add(Activation('softmax'))
Model.compile(loss=loss, optimizer=optimizer)
return Model
评论列表
文章目录