def baseModel(self, nb_filter=250, filter_length=3, hidden_dims=125):
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(self.max_words + self.index_from,self.embedding_dims,
input_length=self.max_length))
model.add(Dropout(0.25))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
# filter_length is like filter size, subsample_length is like step in 2D CNN.
model.add(Convolution1D(filters=nb_filter,
kernel_size=filter_length,
padding='valid',
activation='relu',
strides=1))
# we use standard max pooling (halving the output of the previous layer):
model.add(MaxPooling1D(pool_size=2))
# We flatten the output of the conv layer,
# so that we can add a vanilla dense layer:
model.add(Flatten())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.25))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop')
return model
评论列表
文章目录