def test_bernoulli(self):
t = torch.ByteTensor(10, 10)
def isBinary(t):
return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0
p = 0.5
t.bernoulli_(p)
self.assertTrue(isBinary(t))
p = torch.rand(SIZE)
t.bernoulli_(p)
self.assertTrue(isBinary(t))
q = torch.rand(5, 5)
self.assertTrue(isBinary(q.bernoulli()))
python类ne()的实例源码
def test_bernoulli(self):
t = torch.ByteTensor(10, 10)
def isBinary(t):
return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0
p = 0.5
t.bernoulli_(p)
self.assertTrue(isBinary(t))
p = torch.rand(SIZE)
t.bernoulli_(p)
self.assertTrue(isBinary(t))
q = torch.rand(5, 5)
self.assertTrue(isBinary(q.bernoulli()))
def test_bernoulli(self):
t = torch.ByteTensor(10, 10)
def isBinary(t):
return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0
p = 0.5
t.bernoulli_(p)
self.assertTrue(isBinary(t))
p = torch.rand(SIZE)
t.bernoulli_(p)
self.assertTrue(isBinary(t))
q = torch.rand(5, 5)
self.assertTrue(isBinary(q.bernoulli()))
def test_bernoulli(self):
t = torch.ByteTensor(10, 10)
def isBinary(t):
return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0
p = 0.5
t.bernoulli_(p)
self.assertTrue(isBinary(t))
p = torch.rand(10, 10)
t.bernoulli_(p)
self.assertTrue(isBinary(t))
q = torch.rand(5, 5)
self.assertTrue(isBinary(q.bernoulli()))
def test_bernoulli_variable(self):
# TODO: remove once we merge Variable and Tensor
t = torch.autograd.Variable(torch.ByteTensor(10, 10))
def isBinary(t):
return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0
p = 0.5
t.bernoulli_(p)
self.assertTrue(isBinary(t))
p = torch.autograd.Variable(torch.rand(10))
t.bernoulli_(p)
self.assertTrue(isBinary(t))
q = torch.rand(5, 5)
self.assertTrue(isBinary(q.bernoulli()))
def tany(x: T.Tensor,
axis: int = None,
keepdims: bool = False) -> T.Boolean:
"""
Return True if any elements of the input tensor are true along the
specified axis.
Args:
x: A float or tensor.
axis (optional): The axis of interest.
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is None:
bool: 'any' applied to all elements in the tensor
else:
tensor (of bytes): 'any' applied to the elements in the tensor
along axis
"""
return tmax(x.ne(0), axis=axis, keepdims=keepdims)
def tall(x: T.Tensor,
axis: int = None,
keepdims: bool = False) -> T.Boolean:
"""
Return True if all elements of the input tensor are true along the
specified axis.
Args:
x: A float or tensor.
axis (optional): The axis of interest.
keepdims (optional): If this is set to true, the dimension of the tensor
is unchanged. Otherwise, the reduced axis is removed
and the dimension of the array is 1 less.
Returns:
if axis is None:
bool: 'all' applied to all elements in the tensor
else:
tensor (of bytes): 'all' applied to the elements in the tensor
along axis
"""
return tmin(x.ne(0), axis=axis, keepdims=keepdims)
def test_logical(self):
x = torch.rand(100, 100) * 2 - 1
xx = x.clone()
xgt = torch.gt(x, 1)
xlt = torch.lt(x, 1)
xeq = torch.eq(x, 1)
xne = torch.ne(x, 1)
neqs = xgt + xlt
all = neqs + xeq
self.assertEqual(neqs.sum(), xne.sum(), 0)
self.assertEqual(x.nelement(), all.sum())
def test_RNGState(self):
state = torch.get_rng_state()
stateCloned = state.clone()
before = torch.rand(1000)
self.assertEqual(state.ne(stateCloned).long().sum(), 0, 0)
torch.set_rng_state(state)
after = torch.rand(1000)
self.assertEqual(before, after, 0)
def test_bernoulli(self):
t = torch.ByteTensor(10, 10)
def isBinary(t):
return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum() == 0
p = 0.5
t.bernoulli_(p)
self.assertTrue(isBinary(t))
p = torch.rand(SIZE)
t.bernoulli_(p)
self.assertTrue(isBinary(t))
mgru_rte_model.py 文件源码
项目:Recognizing-Textual-Entailment
作者: codedecde
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def forward(self, premise, hypothesis, training=False):
'''
inputs:
premise : batch x T
hypothesis : batch x T
outputs :
pred : batch x num_classes
'''
self.train(training)
batch_size = premise.size(0)
mask_p = torch.ne(premise, 0).type(dtype)
mask_h = torch.ne(hypothesis, 0).type(dtype)
encoded_p = self.embedding(premise) # batch x T x n_embed
encoded_p = F.dropout(encoded_p, p=self.options['DROPOUT'], training=training)
encoded_h = self.embedding(hypothesis) # batch x T x n_embed
encoded_h = F.dropout(encoded_h, p=self.options['DROPOUT'], training=training)
encoded_p = encoded_p.transpose(1, 0) # T x batch x n_embed
encoded_h = encoded_h.transpose(1, 0) # T x batch x n_embed
mask_p = mask_p.transpose(1, 0) # T x batch
mask_h = mask_h.transpose(1, 0) # T x batch
h_p_0, h_n_0 = self.init_hidden(batch_size) # 1 x batch x n_dim
o_p, h_n = self._gru_forward(self.p_gru, encoded_p, mask_p, h_p_0) # o_p : T x batch x n_dim
# h_n : 1 x batch x n_dim
o_h, h_n = self._gru_forward(self.h_gru, encoded_h, mask_h, h_n_0) # o_h : T x batch x n_dim
# h_n : 1 x batch x n_dim
r_0 = self.attn_gru_init_hidden(batch_size)
h_star, alpha_vec = self._attn_gru_forward(o_h, mask_h, r_0, o_p, mask_p)
h_star = self.out(h_star) # batch x num_classes
if self.options['LAST_NON_LINEAR']:
h_star = F.leaky_relu(h_star) # Non linear projection
pred = F.log_softmax(h_star)
return pred
def test_logical(self):
x = torch.rand(100, 100) * 2 - 1
xx = x.clone()
xgt = torch.gt(x, 1)
xlt = torch.lt(x, 1)
xeq = torch.eq(x, 1)
xne = torch.ne(x, 1)
neqs = xgt + xlt
all = neqs + xeq
self.assertEqual(neqs.sum(), xne.sum(), 0)
self.assertEqual(x.nelement(), all.sum())
def test_RNGState(self):
state = torch.get_rng_state()
stateCloned = state.clone()
before = torch.rand(1000)
self.assertEqual(state.ne(stateCloned).long().sum(), 0, 0)
torch.set_rng_state(state)
after = torch.rand(1000)
self.assertEqual(before, after, 0)
def test_comparison_ops(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertIs(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertIs(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertIs(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertIs(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertIs(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertIs(x[idx] >= y[idx], ge[idx] == 1)
def test_logical(self):
x = torch.rand(100, 100) * 2 - 1
xx = x.clone()
xgt = torch.gt(x, 1)
xlt = torch.lt(x, 1)
xeq = torch.eq(x, 1)
xne = torch.ne(x, 1)
neqs = xgt + xlt
all = neqs + xeq
self.assertEqual(neqs.sum(), xne.sum(), 0)
self.assertEqual(x.nelement(), all.sum())
def test_RNGState(self):
state = torch.get_rng_state()
stateCloned = state.clone()
before = torch.rand(1000)
self.assertEqual(state.ne(stateCloned).long().sum(), 0, 0)
torch.set_rng_state(state)
after = torch.rand(1000)
self.assertEqual(before, after, 0)
def test_comparison_ops(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertIs(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertIs(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertIs(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertIs(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertIs(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertIs(x[idx] >= y[idx], ge[idx] == 1)
def test_logical(self):
x = torch.rand(100, 100) * 2 - 1
xx = x.clone()
xgt = torch.gt(x, 1)
xlt = torch.lt(x, 1)
xeq = torch.eq(x, 1)
xne = torch.ne(x, 1)
neqs = xgt + xlt
all = neqs + xeq
self.assertEqual(neqs.sum(), xne.sum(), 0)
self.assertEqual(x.nelement(), all.sum())
def test_RNGState(self):
state = torch.get_rng_state()
stateCloned = state.clone()
before = torch.rand(1000)
self.assertEqual(state.ne(stateCloned).long().sum(), 0, 0)
torch.set_rng_state(state)
after = torch.rand(1000)
self.assertEqual(before, after, 0)
def test_comparison_ops(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertIs(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertIs(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertIs(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertIs(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertIs(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertIs(x[idx] >= y[idx], ge[idx] == 1)
def test_logical(self):
x = torch.rand(100, 100) * 2 - 1
xx = x.clone()
xgt = torch.gt(x, 1)
xlt = torch.lt(x, 1)
xeq = torch.eq(x, 1)
xne = torch.ne(x, 1)
neqs = xgt + xlt
all = neqs + xeq
self.assertEqual(neqs.sum(), xne.sum(), 0)
self.assertEqual(x.nelement(), all.sum())
def test_RNGState(self):
state = torch.get_rng_state()
stateCloned = state.clone()
before = torch.rand(1000)
self.assertEqual(state.ne(stateCloned).long().sum(), 0, 0)
torch.set_rng_state(state)
after = torch.rand(1000)
self.assertEqual(before, after, 0)
def test_comparison_ops(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertIs(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertIs(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertIs(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertIs(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertIs(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertIs(x[idx] >= y[idx], ge[idx] == 1)
def not_equal(x: T.FloatTensor, y: T.FloatTensor) -> T.ByteTensor:
"""
Elementwise test if two tensors are not equal.
Args:
x: A tensor.
y: A tensor.
Returns:
tensor (of bytes): Elementwise test of non-equality between x and y.
"""
return torch.ne(x, y)
def forward(self, premise, hypothesis, training=False):
'''
inputs:
premise : batch x T
hypothesis : batch x T
outputs :
pred : batch x num_classes
'''
self.train(training)
batch_size = premise.size(0)
mask_p = torch.ne(premise, 0).type(dtype)
mask_h = torch.ne(hypothesis, 0).type(dtype)
encoded_p = self.embedding(premise) # batch x T x n_embed
encoded_p = F.dropout(encoded_p, p=self.options['DROPOUT'], training=training)
encoded_h = self.embedding(hypothesis) # batch x T x n_embed
encoded_h = F.dropout(encoded_h, p=self.options['DROPOUT'], training=training)
encoded_p = encoded_p.transpose(1, 0) # T x batch x n_embed
encoded_h = encoded_h.transpose(1, 0) # T x batch x n_embed
mask_p = mask_p.transpose(1, 0) # T x batch
mask_h = mask_h.transpose(1, 0) # T x batch
h_0 = self.init_hidden(batch_size) # 1 x batch x n_dim
o_p, h_n = self._gru_forward(self.p_gru, encoded_p, mask_p, h_0) # o_p : T x batch x n_dim
# h_n : 1 x batch x n_dim
o_h, h_n = self._gru_forward(self.h_gru, encoded_h, mask_h, h_n) # o_h : T x batch x n_dim
# h_n : 1 x batch x n_dim
if self.options['WBW_ATTN']:
r_0 = self.attn_rnn_init_hidden(batch_size) # batch x n_dim
r, alpha_vec = self._attn_rnn_forward(o_h, mask_h, r_0, o_p, mask_p) # r : batch x n_dim
# alpha_vec : T x batch x T
else:
r, alpha = self._attention_forward(o_p, mask_p, o_h[-1]) # r : batch x n_dim
# alpha : batch x T
h_star = self._combine_last(r, o_h[-1]) # batch x n_dim
h_star = self.out(h_star) # batch x num_classes
if self.options['LAST_NON_LINEAR']:
h_star = F.leaky_relu(h_star) # Non linear projection
pred = F.log_softmax(h_star)
return pred
def prepare_rnn_seq(rnn_input, lengths, hx=None, masks=None, batch_first=False):
'''
Args:
rnn_input: [seq_len, batch, input_size]: tensor containing the features of the input sequence.
lengths: [batch]: tensor containing the lengthes of the input sequence
hx: [num_layers * num_directions, batch, hidden_size]: tensor containing the initial hidden state for each element in the batch.
masks: [seq_len, batch]: tensor containing the mask for each element in the batch.
batch_first: If True, then the input and output tensors are provided as [batch, seq_len, feature].
Returns:
'''
def check_decreasing(lengths):
lens, order = torch.sort(lengths, dim=0, descending=True)
if torch.ne(lens, lengths).sum() == 0:
return None
else:
_, rev_order = torch.sort(order)
return lens, Variable(order), Variable(rev_order)
check_res = check_decreasing(lengths)
if check_res is None:
lens = lengths
rev_order = None
else:
lens, order, rev_order = check_res
batch_dim = 0 if batch_first else 1
rnn_input = rnn_input.index_select(batch_dim, order)
if hx is not None:
# hack lstm
if isinstance(hx, tuple):
hx, cx = hx
hx = hx.index_select(1, order)
cx = cx.index_select(1, order)
hx = (hx, cx)
else:
hx = hx.index_select(1, order)
lens = lens.tolist()
seq = rnn_utils.pack_padded_sequence(rnn_input, lens, batch_first=batch_first)
if masks is not None:
if batch_first:
masks = masks[:, :lens[0]]
else:
masks = masks[:lens[0]]
return seq, hx, rev_order, masks