def as_tensor(self,
padding_lengths: Dict[str, int],
cuda_device: int = -1,
for_training: bool = True) -> torch.Tensor:
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
return_array = numpy.ones(max_shape, "float32") * self.padding_value
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.shape)
if len(self.array.shape) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]
slices = [slice(0, x) for x in slicing_shape]
return_array[slices] = self.array
tensor = Variable(torch.from_numpy(return_array), volatile=not for_training)
return tensor if cuda_device == -1 else tensor.cuda(cuda_device)
评论列表
文章目录