def lstm_word_model(self):
embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))
output = Bidirectional(LSTM(self.opt['units_lstm'], activation='tanh',
kernel_regularizer=l2(self.opt['regul_coef_lstm']),
dropout=self.opt['dropout_rate']))(embed_input)
output = Dropout(rate=self.opt['dropout_rate'])(output)
output = Dense(self.opt['dense_dim'], activation=None,
kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
output = BatchNormalization()(output)
output = Activation('relu')(output)
output = Dropout(rate=self.opt['dropout_rate'])(output)
output = Dense(1, activation=None,
kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
output = BatchNormalization()(output)
act_output = Activation('sigmoid')(output)
model = Model(inputs=embed_input, outputs=act_output)
return model
评论列表
文章目录