def __get_base_model(maxlen, max_features, word_idx, use_pretrained_embeddings=False):
"""
:param maxlen: sentence size. Longer sentences will be truncated.
:param max_features: vocab size.
:param word_idx: {word1: index1, word2: index2}
:return:
"""
print >> sys.stderr, 'Build model...'
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
if use_pretrained_embeddings:
print >> sys.stderr, 'Reading embeddings...'
embedding_weights = get_embedding_weights(word_idx)
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen,
dropout=0.0,
weights=[embedding_weights]))
else:
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen,
dropout=0.0))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
# we use max over time pooling by defining a python function to use
# in a Lambda layer
model.add(Lambda(max_1d, output_shape=(nb_filter,)))
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(1))
return model
__init__.py 文件源码
python
阅读 15
收藏 0
点赞 0
评论 0
评论列表
文章目录