def cnn_combine_train(X_train_list,y_train,vocab_size):
N=len(X_train_list)
X_train_list = [sequence.pad_sequences(x_train, maxlen=MAX_LEN) for x_train in X_train_list]
input_list=[]
out_list=[]
for i in range(N):
input,out=get_embedding_input_output('f%d' %i,vocab_size)
input_list.append(input)
out_list.append(out)
x = merge(out_list,mode='concat')
x = Dropout(0.25)(x)
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
x = Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1)(x)
# we use standard max pooling (halving the output of the previous layer):
x = MaxPooling1D(pool_length=2)(x)
# We flatten the output of the conv layer,
# so that we can add a vanilla dense layer:
x = Flatten()(x)
# We add a vanilla hidden layer:
x = Dense(HIDDEN_SIZE)(x)
x = Dropout(0.25)(x)
x = Activation('relu')(x)
# We project onto a single unit output layer, and squash it with a sigmoid:
x = Dense(1)(x)
x = Activation('sigmoid')(x)
model = Model(input=input_list, output=x)
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(X_train_list, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS)
return model
评论列表
文章目录