def test_comparison_ops(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertIs(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertIs(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertIs(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertIs(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertIs(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertIs(x[idx] >= y[idx], ge[idx] == 1)
python类gt()的实例源码
def greater(x: T.FloatTensor, y: T.FloatTensor) -> T.ByteTensor:
"""
Elementwise test if x > y.
Args:
x: A tensor.
y: A tensor.
Returns:
tensor (of bools): Elementwise test of x > y.
"""
return torch.gt(x, y)
def forward(self, input, context, src_pad_mask, tgt_pad_mask):
# Args Checks
input_batch, input_len, _ = input.size()
contxt_batch, contxt_len, _ = context.size()
aeq(input_batch, contxt_batch)
src_batch, t_len, s_len = src_pad_mask.size()
tgt_batch, t_len_, t_len__ = tgt_pad_mask.size()
aeq(input_batch, contxt_batch, src_batch, tgt_batch)
aeq(t_len, t_len_, t_len__, input_len)
aeq(s_len, contxt_len)
# END Args Checks
dec_mask = torch.gt(tgt_pad_mask + self.mask[:, :tgt_pad_mask.size(1),
:tgt_pad_mask.size(1)]
.expand_as(tgt_pad_mask), 0)
input_norm = self.layer_norm_1(input)
query, attn = self.self_attn(input_norm, input_norm, input_norm,
mask=dec_mask)
query_norm = self.layer_norm_2(query+input)
mid, attn = self.context_attn(context, context, query_norm,
mask=src_pad_mask)
output = self.feed_forward(mid+query+input)
# CHECKS
output_batch, output_len, _ = output.size()
aeq(input_len, output_len)
aeq(contxt_batch, output_batch)
n_batch_, t_len_, s_len_ = attn.size()
aeq(input_batch, n_batch_)
aeq(contxt_len, s_len_)
aeq(input_len, t_len_)
# END CHECKS
return output, attn
sorting_task.py 文件源码
项目:neural-combinatorial-rl-pytorch
作者: pemami4911
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def reward(sample_solution, USE_CUDA=False):
"""
The reward for the sorting task is defined as the
length of the longest sorted consecutive subsequence.
Input sequences must all be the same length.
Example:
input | output
====================
[1 4 3 5 2] | [5 1 2 3 4]
The output gets a reward of 4/5, or 0.8
The range is [1/sourceL, 1]
Args:
sample_solution: list of len sourceL of [batch_size]
Tensors
Returns:
[batch_size] containing trajectory rewards
"""
batch_size = sample_solution[0].size(0)
sourceL = len(sample_solution)
longest = Variable(torch.ones(batch_size), requires_grad=False)
current = Variable(torch.ones(batch_size), requires_grad=False)
if USE_CUDA:
longest = longest.cuda()
current = current.cuda()
for i in range(1, sourceL):
# compare solution[i-1] < solution[i]
res = torch.lt(sample_solution[i-1], sample_solution[i])
# if res[i,j] == 1, increment length of current sorted subsequence
current += res.float()
# else, reset current to 1
current[torch.eq(res, 0)] = 1
#current[torch.eq(res, 0)] -= 1
# if, for any, current > longest, update longest
mask = torch.gt(current, longest)
longest[mask] = current[mask]
return -torch.div(longest, sourceL)