def rand_softmax(phi: T.FloatTensor) -> T.FloatTensor:
"""
Draw random 1-hot samples according to softmax probabilities.
Given an effective field vector v,
the softmax probabilities are p = exp(v) / sum(exp(v))
A 1-hot vector x is sampled according to p.
Args:
phi (tensor (batch_size, num_units)): the effective field
Returns:
tensor (batch_size, num_units): random 1-hot samples
from the softmax distribution.
"""
max_index = matrix.shape(phi)[1]-1
probs = nl.softmax(phi)
cum_probs = torch.cumsum(probs, 1)
ref_probs = rand((len(phi), 1))
on_units = matrix.int_tensor(matrix.tsum(cum_probs < ref_probs, axis=1, keepdims=True))
matrix.clip_inplace(on_units, a_min=0, a_max=max_index)
return matrix.zeros_like(phi).scatter_(1, on_units, 1)
python类cumsum()的实例源码
def test_cumsum(self):
x = torch.rand(100, 100)
res1 = torch.cumsum(x, 1)
res2 = torch.Tensor()
torch.cumsum(res2, x, 1)
self.assertEqual(res1, res2)
def _consecutive(self, size, start=1):
sequence = torch.ones(int(torch.Tensor(size).prod(0)[0])).cumsum(0)
sequence.add_(start - 1)
return sequence.resize_(*size)
def forward(self, input):
return torch.cumsum(input, dim=self.dim)
def backward(self, grad_output):
grad_input = torch.cumsum(-grad_output, dim=self.dim)
end_idx = grad_input.size(self.dim) - 1
grad_sum = grad_input.narrow(self.dim, end_idx, 1)
grad_input -= grad_sum.expand_as(grad_input)
grad_input += grad_output
return grad_input
# TODO: unfold
def test_cumsum(self):
x = torch.rand(100, 100)
res1 = torch.cumsum(x, 1)
res2 = torch.Tensor()
torch.cumsum(x, 1, out=res2)
self.assertEqual(res1, res2)
def _consecutive(self, size, start=1):
sequence = torch.ones(int(torch.Tensor(size).prod(0)[0])).cumsum(0)
sequence.add_(start - 1)
return sequence.resize_(*size)
def forward(self, input):
return torch.cumsum(input, dim=self.dim)
def backward(self, grad_output):
grad_input = torch.cumsum(-grad_output, dim=self.dim)
end_idx = grad_input.size(self.dim) - 1
grad_sum = grad_input.narrow(self.dim, end_idx, 1)
grad_input -= grad_sum.expand_as(grad_input)
grad_input += grad_output
return grad_input
def test_cumsum(self):
x = torch.rand(100, 100)
res1 = torch.cumsum(x, 1)
res2 = torch.Tensor()
torch.cumsum(x, 1, out=res2)
self.assertEqual(res1, res2)
def _consecutive(self, size, start=1):
sequence = torch.ones(int(torch.Tensor(size).prod(0)[0])).cumsum(0)
sequence.add_(start - 1)
return sequence.resize_(*size)
def user_representation(self, item_sequences):
"""
Compute user representation from a given sequence.
Returns
-------
tuple (all_representations, final_representation)
The first element contains all representations from step
-1 (no items seen) to t - 1 (all but the last items seen).
The second element contains the final representation
at step t (all items seen). This final state can be used
for prediction or evaluation.
"""
# Make the embedding dimension the channel dimension
sequence_embeddings = (self.item_embeddings(item_sequences)
.permute(0, 2, 1))
# Add a trailing dimension of 1
sequence_embeddings = (sequence_embeddings
.unsqueeze(3))
# Pad it with zeros from left
sequence_embeddings = F.pad(sequence_embeddings,
(0, 0, 1, 0))
# Average representations, ignoring padding.
sequence_embedding_sum = torch.cumsum(sequence_embeddings, 2)
non_padding_entries = (
torch.cumsum((sequence_embeddings != 0.0).float(), 2)
.expand_as(sequence_embedding_sum)
)
user_representations = (
sequence_embedding_sum / (non_padding_entries + 1)
).squeeze(3)
return user_representations[:, :, :-1], user_representations[:, :, -1]
def sum_scan_exclusive(x, dim):
ret = torch.cumsum(-x, dim=dim)
end_idx = ret.size(dim) - 1
ret_sum = ret.narrow(dim, end_idx, 1).clone()
ret -= ret_sum.expand_as(ret)
ret += x
return ret
def forward(ctx, input, dim):
ctx.dim = dim
return torch.cumsum(input, dim=ctx.dim)
def test_cumsum(self):
x = torch.rand(100, 100)
res1 = torch.cumsum(x, 1)
res2 = torch.Tensor()
torch.cumsum(x, 1, out=res2)
self.assertEqual(res1, res2)
def _consecutive(self, size, start=1):
sequence = torch.ones(int(torch.Tensor(size).prod(0)[0])).cumsum(0)
sequence.add_(start - 1)
return sequence.resize_(*size)
def sum_scan_exclusive(x, dim):
ret = torch.cumsum(-x, dim=dim)
end_idx = ret.size(dim) - 1
ret_sum = ret.narrow(dim, end_idx, 1).clone()
ret -= ret_sum.expand_as(ret)
ret += x
return ret
def forward(ctx, input, dim):
ctx.dim = dim
return torch.cumsum(input, dim=ctx.dim)
def test_cumsum(self):
x = torch.rand(100, 100)
res1 = torch.cumsum(x, 1)
res2 = torch.Tensor()
torch.cumsum(x, 1, out=res2)
self.assertEqual(res1, res2)
def _consecutive(self, size, start=1):
sequence = torch.ones(int(torch.Tensor(size).prod(0)[0])).cumsum(0)
sequence.add_(start - 1)
return sequence.resize_(*size)
def cumsum(x, axis=0):
def _cumsum(x, axis=axis):
y = torch.cumsum(x, axis)
return y
def _compute_output_shape(x, axis=axis):
return _get_shape(x)
return get_op(_cumsum, output_shape=_compute_output_shape, arguments=[axis])(x)
#~~~~~~~~~~~~~~ UNIMPLEMENTED IN PYTORCH !! ~~~~~~~~~~~~~~#