def forward(self, prob, target, reward):
"""
Args:
prob: (N, C), torch Variable
target : (N, ), torch Variable
reward : (N, ), torch Variable
"""
N = target.size(0)
C = prob.size(1)
one_hot = torch.zeros((N, C))
if prob.is_cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(1, target.data.view((-1,1)), 1)
one_hot = one_hot.type(torch.ByteTensor)
one_hot = Variable(one_hot)
if prob.is_cuda:
one_hot = one_hot.cuda()
loss = torch.masked_select(prob, one_hot)
loss = loss * reward
loss = -torch.sum(loss)
return loss
python类ByteTensor()的实例源码
triplet_mnist_loader.py 文件源码
项目:triplet-network-pytorch
作者: andreasveit
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def read_image_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2051
length = get_int(data[4:8])
num_rows = get_int(data[8:12])
num_cols = get_int(data[12:16])
images = []
idx = 16
for l in range(length):
img = []
images.append(img)
for r in range(num_rows):
row = []
img.append(row)
for c in range(num_cols):
row.append(parse_byte(data[idx]))
idx += 1
assert len(images) == length
return torch.ByteTensor(images).view(-1, 28, 28)
def test_masked_global_attention(self):
source_lengths = torch.IntTensor([7, 3, 5, 2])
illegal_weights_mask = torch.ByteTensor([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1]])
batch_size = source_lengths.size(0)
dim = 20
context = Variable(torch.randn(batch_size, source_lengths.max(), dim))
hidden = Variable(torch.randn(batch_size, dim))
attn = onmt.modules.GlobalAttention(dim)
_, alignments = attn(hidden, context, context_lengths=source_lengths)
illegal_weights = alignments.masked_select(illegal_weights_mask)
self.assertEqual(0.0, illegal_weights.data.sum())
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(1, len(self.cmap)):
mask = gray_image[0] == label
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
def _new_idx(self, input):
if torch.typename(input) == 'torch.cuda.FloatTensor':
return torch.cuda.ByteTensor()
else:
return torch.ByteTensor()
def type(self, type=None, tensorCache=None):
if not type:
return self._type
self._idx = None
super(CosineEmbeddingCriterion, self).type(type, tensorCache)
# comparison operators behave differently from cuda/c implementations
if type == 'torch.cuda.FloatTensor':
self._idx = torch.cuda.ByteTensor()
else:
self._idx = torch.ByteTensor()
return self
def __init__(self):
super(MaskedSelect, self).__init__()
self._maskIndices = torch.LongTensor()
self._maskIndexBuffer = torch.LongTensor()
self._maskIndexBufferCPU = torch.FloatTensor()
self._gradBuffer = torch.Tensor()
self._gradMask = torch.ByteTensor()
def test_numel(self):
b = torch.ByteTensor(3, 100, 100)
self.assertEqual(b.nelement(), 3*100*100)
self.assertEqual(b.numel(), 3*100*100)
def test_element_size(self):
byte = torch.ByteStorage().element_size()
char = torch.CharStorage().element_size()
short = torch.ShortStorage().element_size()
int = torch.IntStorage().element_size()
long = torch.LongStorage().element_size()
float = torch.FloatStorage().element_size()
double = torch.DoubleStorage().element_size()
self.assertEqual(byte, torch.ByteTensor().element_size())
self.assertEqual(char, torch.CharTensor().element_size())
self.assertEqual(short, torch.ShortTensor().element_size())
self.assertEqual(int, torch.IntTensor().element_size())
self.assertEqual(long, torch.LongTensor().element_size())
self.assertEqual(float, torch.FloatTensor().element_size())
self.assertEqual(double, torch.DoubleTensor().element_size())
self.assertGreater(byte, 0)
self.assertGreater(char, 0)
self.assertGreater(short, 0)
self.assertGreater(int, 0)
self.assertGreater(long, 0)
self.assertGreater(float, 0)
self.assertGreater(double, 0)
# These tests are portable, not necessarily strict for your system.
self.assertEqual(byte, 1)
self.assertEqual(char, 1)
self.assertGreaterEqual(short, 2)
self.assertGreaterEqual(int, 2)
self.assertGreaterEqual(int, short)
self.assertGreaterEqual(long, 4)
self.assertGreaterEqual(long, int)
self.assertGreaterEqual(double, float)
def test_forward_does_correct_computation(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2)
input_tensor = Variable(
torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]], [[.5, .3], [1.4, 1.1], [.3, .9]]]))
mask = Variable(torch.ByteTensor([[1, 1, 1], [1, 1, 0]]))
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(),
numpy.asarray([[.7 + .1 + .3, .8 + 1.5 + .6], [.5 + 1.4, .3 + 1.1]]))
def test_forward_does_correct_computation_with_average(self):
encoder = BagOfEmbeddingsEncoder(embedding_dim=2, averaged=True)
input_tensor = Variable(
torch.FloatTensor([[[.7, .8], [.1, 1.5], [.3, .6]],
[[.5, .3], [1.4, 1.1], [.3, .9]],
[[.4, .3], [.4, .3], [1.4, 1.7]]]))
mask = Variable(torch.ByteTensor([[1, 1, 1], [1, 1, 0], [0, 0, 0]]))
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(),
numpy.asarray([[(.7 + .1 + .3)/3, (.8 + 1.5 + .6)/3],
[(.5 + 1.4)/2, (.3 + 1.1)/2],
[0., 0.]]))
def test_get_sequence_lengths_from_binary_mask(self):
binary_mask = torch.ByteTensor([[1, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0]])
lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
numpy.testing.assert_array_equal(lengths.numpy(), numpy.array([3, 2, 6, 1]))
def forward(self,
inputs: torch.Tensor,
tags: torch.Tensor,
mask: torch.ByteTensor = None) -> torch.Tensor:
"""
Computes the log likelihood.
"""
# pylint: disable=arguments-differ
if mask is None:
mask = torch.autograd.Variable(torch.ones(*tags.size()).long())
log_denominator = self._input_likelihood(inputs, mask)
log_numerator = self._joint_likelihood(inputs, tags, mask)
return torch.sum(log_numerator - log_denominator)
def label_to_long_tensor(pic):
label = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
label = label.view(pic.size[1], pic.size[0], 1)
label = label.transpose(0, 1).transpose(0, 2).squeeze().contiguous().long()
return label
def where(condition, if_true, if_false):
"""
Torch equivalent of numpy.where.
Parameters
----------
condition : torch.ByteTensor or torch.cuda.ByteTensor or torch.autograd.Variable
Condition to check.
if_true : torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
Output value if condition is true.
if_false: torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
Output value if condition is false
Returns
-------
torch.Tensor
Raises
------
AssertionError
if if_true and if_false are not both variables or both tensors.
AssertionError
if if_true and if_false don't have the same datatype.
"""
if isinstance(if_true, Variable) or isinstance(if_false, Variable):
assert isinstance(condition, Variable), \
"Condition must be a variable if either if_true or if_false is a variable."
assert isinstance(if_false, Variable) and isinstance(if_false, Variable), \
"Both if_true and if_false must be variables if either is one."
assert if_true.data.type() == if_false.data.type(), \
"Type mismatch: {} and {}".format(if_true.data.type(), if_false.data.type())
else:
assert not isinstance(condition, Variable), \
"Condition must not be a variable because neither if_true nor if_false is one."
# noinspection PyArgumentList
assert if_true.type() == if_false.type(), \
"Type mismatch: {} and {}".format(if_true.data.type(), if_false.data.type())
casted_condition = condition.type_as(if_true)
output = casted_condition * if_true + (1 - casted_condition) * if_false
return output
def image2torch(img):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
return img
def reinforce_sample(self, x, max_length=30, temperature=1.0, argmax=False):
N, T = x.size(0), max_length
encoded = self.encoder(x)
y = torch.LongTensor(N, T).fill_(self.NULL)
done = torch.ByteTensor(N).fill_(0)
cur_input = Variable(x.data.new(N, 1).fill_(self.START))
h, c = None, None
self.multinomial_outputs = []
self.multinomial_probs = []
for t in range(T):
# logprobs is N x 1 x V
logprobs, h, c = self.decoder(encoded, cur_input, h0=h, c0=c)
logprobs = logprobs / temperature
probs = F.softmax(logprobs.view(N, -1)) # Now N x V
if argmax:
_, cur_output = probs.max(1)
else:
cur_output = probs.multinomial() # Now N x 1
self.multinomial_outputs.append(cur_output)
self.multinomial_probs.append(probs)
cur_output_data = cur_output.data.cpu()
not_done = logical_not(done)
y[:, t][not_done] = cur_output_data[not_done]
done = logical_or(done, cur_output_data.cpu() == self.END)
cur_input = cur_output
if done.sum() == N:
break
return Variable(y.type_as(x.data))
def __call__(self, pic):
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic).permute(2, 0, 1).contiguous()
else:
# handle PIL Image
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
img = img.view(pic.size[1], pic.size[0], len(pic.mode))
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
return img.float().div(255) if self.div else img.float()
def type(self, type=None, tensorCache=None):
if not type:
return self._type
self._idx = None
super(CosineEmbeddingCriterion, self).type(type, tensorCache)
# comparison operators behave differently from cuda/c implementations
if type == 'torch.cuda.FloatTensor':
self._idx = torch.cuda.ByteTensor()
else:
self._idx = torch.ByteTensor()
return self
def __init__(self):
super(MaskedSelect, self).__init__()
self._maskIndices = torch.LongTensor()
self._maskIndexBuffer = torch.LongTensor()
self._maskIndexBufferCPU = torch.FloatTensor()
self._gradBuffer = torch.Tensor()
self._gradMask = torch.ByteTensor()