def prep_embd(self):
# Add a Embed Layer to convert word index to vector
if not os.path.exists('GloVe_' + self.dataset + '.npy'):
self.load_GloVe()
embed_matrix = np.load('GloVe_' + self.dataset + '.npy')
self.Embed = Embedding(input_dim = self.Vocab,
output_dim = self.EmbeddingSize,
input_length = self.SentMaxLen,
trainable = False,
weights = [embed_matrix],
name = 'embed_snli')
# TODO Decomposable Attention Model by Ankur P. Parikh et al. 2016
评论列表
文章目录