def test_elmo_4D_input(self):
sentences = [[['The', 'sentence', '.'],
['ELMo', 'helps', 'disambiguate', 'ELMo', 'from', 'Elmo', '.']],
[['1', '2'], ['1', '2', '3', '4', '5', '6', '7']],
[['1', '2', '3', '4', '50', '60', '70'], ['The']]]
all_character_ids = []
for batch_sentences in sentences:
all_character_ids.append(self._sentences_to_ids(batch_sentences))
# (2, 3, 7, 50)
character_ids = torch.cat([ids.unsqueeze(1) for ids in all_character_ids], dim=1)
embeddings_4d = self.elmo(character_ids)
# Run the individual batches.
embeddings_3d = []
for char_ids in all_character_ids:
self.elmo._elmo_lstm._elmo_lstm.reset_states()
embeddings_3d.append(self.elmo(char_ids))
for k in range(3):
numpy.testing.assert_array_almost_equal(
embeddings_4d['elmo_representations'][0][:, k, :, :].data.numpy(),
embeddings_3d[k]['elmo_representations'][0].data.numpy()
)
评论列表
文章目录