def get_model(self, num_features):
embedding_dims = 128
nb_filter = 250
filter_length = 8
drop = 0.2
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(self.vocab_size,
embedding_dims,
input_length=num_features[0],
dropout=0.2))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
# we use max over time pooling by defining a python function to use
# in a Lambda layer
def max_1d(X):
return K.max(X, axis=1)
model.add(Lambda(max_1d, output_shape=(nb_filter,)))
model.add(Dropout(drop))
model.add(Dense(1024, init='glorot_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(drop))
model.add(Dense(1024, init='glorot_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
评论列表
文章目录