def features_to_minibatch(self, features, sentences, max_feature_length, max_sentence_length, gpu=True):
x_batch, x_length_batch, t_batch, t_length_batch, bigram_batch = self.processor.features_to_minibatch(features, sentences, max_feature_length, max_sentence_length, self.token_ids,
self.id_blank)
if self.stats_total > 0:
for x, length in zip(x_batch, x_length_batch):
self._update_stats_recursively(x[..., :length])
x_mean, x_std = self.get_mean_and_std()
x_batch = (x_batch - x_mean) / x_std
if gpu:
x_batch = cuda.to_gpu(x_batch.astype(np.float32))
t_batch = cuda.to_gpu(t_batch.astype(np.int32))
bigram_batch = cuda.to_gpu(bigram_batch.astype(np.int32))
x_length_batch = cuda.to_gpu(np.asarray(x_length_batch).astype(np.int32))
t_length_batch = cuda.to_gpu(np.asarray(t_length_batch).astype(np.int32))
return x_batch, x_length_batch, t_batch, t_length_batch, bigram_batch
评论列表
文章目录