python类equal()的实例源码

common.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def assert_tensors_equal(a, b, prec=1e-5, msg=''):
    assert a.size() == b.size(), msg
    if prec == 0:
        assert (a == b).all(), msg
    elif a.numel() > 0:
        b = b.type_as(a)
        b = b.cuda(device=a.get_device()) if a.is_cuda else b.cpu()
        # check that NaNs are in the same locations
        nan_mask = a != a
        assert torch.equal(nan_mask, b != b), msg
        diff = a - b
        diff[nan_mask] = 0
        if diff.is_signed():
            diff = diff.abs()
        max_err = diff.max()
        assert max_err < prec, msg
kronecker_product_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def test_kronecker_product():
    matrix_list = []
    matrix1 = torch.Tensor([
        [1, 2, 3],
        [4, 5, 6],
    ])
    matrix2 = torch.Tensor([
        [1, 2],
        [4, 3],
    ])
    matrix_list.append(matrix1)
    matrix_list.append(matrix2)
    res = kronecker_product(matrix_list)

    actual = torch.Tensor([
        [1, 2, 2, 4, 3, 6],
        [4, 3, 8, 6, 12, 9],
        [4, 8, 5, 10, 6, 12],
        [16, 12, 20, 15, 24, 18]
    ])

    assert(torch.equal(res, actual))
test_torch.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def test_repeat(self):

        initial_shape = (8, 4)
        tensor = torch.rand(*initial_shape)

        size = (3, 1, 1)
        torchSize = torch.Size(size)
        target = [3, 8, 4]
        self.assertEqual(tensor.repeat(*size).size(), target, 'Error in repeat')
        self.assertEqual(tensor.repeat(torchSize).size(), target,
                         'Error in repeat using LongStorage')
        result = tensor.repeat(*size)
        self.assertEqual(result.size(), target, 'Error in repeat using result')
        result = tensor.repeat(torchSize)
        self.assertEqual(result.size(), target, 'Error in repeat using result and LongStorage')
        self.assertEqual(result.mean(0).view(8, 4), tensor, 'Error in repeat (not equal)')
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_repeat(self):
        result = torch.Tensor()
        tensor = torch.rand(8, 4)
        size = (3, 1, 1)
        torchSize = torch.Size(size)
        target = [3, 8, 4]
        self.assertEqual(tensor.repeat(*size).size(), target, 'Error in repeat')
        self.assertEqual(tensor.repeat(torchSize).size(), target, 'Error in repeat using LongStorage')
        result = tensor.repeat(*size)
        self.assertEqual(result.size(), target, 'Error in repeat using result')
        result = tensor.repeat(torchSize)
        self.assertEqual(result.size(), target, 'Error in repeat using result and LongStorage')
        self.assertEqual((result.mean(0).view(8, 4)-tensor).abs().max(), 0, 'Error in repeat (not equal)')
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_equal(self):
        # Contiguous, 1D
        t1 = torch.Tensor((3, 4, 9, 10))
        t2 = t1.contiguous()
        t3 = torch.Tensor((1, 9, 3, 10))
        t4 = torch.Tensor((3, 4, 9))
        t5 = torch.Tensor()
        self.assertTrue(t1.equal(t2))
        self.assertFalse(t1.equal(t3))
        self.assertFalse(t1.equal(t4))
        self.assertFalse(t1.equal(t5))
        self.assertTrue(torch.equal(t1, t2))
        self.assertFalse(torch.equal(t1, t3))
        self.assertFalse(torch.equal(t1, t4))
        self.assertFalse(torch.equal(t1, t5))

        # Non contiguous, 2D
        s = torch.Tensor(((1, 2, 3, 4), (5, 6, 7, 8)))
        s1 = s[:,1:3]
        s2 = s1.clone()
        s3 = torch.Tensor(((2, 3), (6, 7)))
        s4 = torch.Tensor(((0, 0), (0, 0)))

        self.assertFalse(s1.is_contiguous())
        self.assertTrue(s1.equal(s2))
        self.assertTrue(s1.equal(s3))
        self.assertFalse(s1.equal(s4))
        self.assertTrue(torch.equal(s1, s2))
        self.assertTrue(torch.equal(s1, s3))
        self.assertFalse(torch.equal(s1, s4))
archival_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_archiving(self):
        # copy params, since they'll get consumed during training
        params_copy = copy.deepcopy(self.params.as_dict())

        # `train_model` should create an archive
        model = train_model(self.params, serialization_dir=self.TEST_DIR)

        archive_path = os.path.join(self.TEST_DIR, "model.tar.gz")

        # load from the archive
        archive = load_archive(archive_path)
        model2 = archive.model

        # check that model weights are the same
        keys = set(model.state_dict().keys())
        keys2 = set(model2.state_dict().keys())

        assert keys == keys2

        for key in keys:
            assert torch.equal(model.state_dict()[key], model2.state_dict()[key])

        # check that vocabularies are the same
        vocab = model.vocab
        vocab2 = model2.vocab

        assert vocab._token_to_index == vocab2._token_to_index  # pylint: disable=protected-access
        assert vocab._index_to_token == vocab2._index_to_token  # pylint: disable=protected-access

        # check that params are the same
        params2 = archive.config
        assert params2.as_dict() == params_copy
initializers_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def test_regex_matches_are_initialized_correctly(self):
        class Net(torch.nn.Module):
            def __init__(self):
                super(Net, self).__init__()
                self.linear_1_with_funky_name = torch.nn.Linear(5, 10)
                self.linear_2 = torch.nn.Linear(10, 5)
                self.conv = torch.nn.Conv1d(5, 5, 5)

            def forward(self, inputs):  # pylint: disable=arguments-differ
                pass

        # pyhocon does funny things if there's a . in a key.  This test makes sure that we
        # handle these kinds of regexes correctly.
        json_params = """{"initializer": [
        ["conv", {"type": "constant", "val": 5}],
        ["funky_na.*bi", {"type": "constant", "val": 7}]
        ]}
        """
        params = Params(pyhocon.ConfigFactory.parse_string(json_params))
        initializers = InitializerApplicator.from_params(params['initializer'])
        model = Net()
        initializers(model)

        for parameter in model.conv.parameters():
            assert torch.equal(parameter.data, torch.ones(parameter.size()) * 5)

        parameter = model.linear_1_with_funky_name.bias
        assert torch.equal(parameter.data, torch.ones(parameter.size()) * 7)
common.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def assert_not_equal(x, y, prec=1e-5, msg=''):
    try:
        assert_equal(x, y, prec)
    except AssertionError:
        pass
    raise AssertionError("{} \nValues are equal: x={}, y={}, prec={}".format(msg, x, y, prec))
test_module.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_random_module(nn_module):
    pyro.clear_param_store()
    nn_module = nn_module()
    p = Variable(torch.ones(2, 2))
    prior = dist.Bernoulli(p)
    lifted_mod = pyro.random_module("module", nn_module, prior)
    nn_module = lifted_mod()
    for name, parameter in nn_module.named_parameters():
        assert torch.equal(torch.ones(2, 2), parameter.data)
common.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def assertNotEqual(self, x, y, prec=None, message=''):
        if prec is None:
            prec = self.precision

        if isinstance(x, Variable) and isinstance(y, Variable):
            x = x.data
            y = y.data

        if torch.is_tensor(x) and torch.is_tensor(y):
            if x.size() != y.size():
                super(TestCase, self).assertNotEqual(x.size(), y.size())
            self.assertGreater(x.numel(), 0)
            y = y.type_as(x)
            y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu()
            nan_mask = x != x
            if torch.equal(nan_mask, y != y):
                diff = x - y
                if diff.is_signed():
                    diff = diff.abs()
                diff[nan_mask] = 0
                max_err = diff.max()
                self.assertGreaterEqual(max_err, prec, message)
        elif type(x) == str and type(y) == str:
            super(TestCase, self).assertNotEqual(x, y)
        elif is_iterable(x) and is_iterable(y):
            super(TestCase, self).assertNotEqual(x, y)
        else:
            try:
                self.assertGreaterEqual(abs(x - y), prec, message)
                return
            except:
                pass
            super(TestCase, self).assertNotEqual(x, y, message)
test_torch.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_repeat(self):
        result = torch.Tensor()
        tensor = torch.rand(8, 4)
        size = (3, 1, 1)
        torchSize = torch.Size(size)
        target = [3, 8, 4]
        self.assertEqual(tensor.repeat(*size).size(), target, 'Error in repeat')
        self.assertEqual(tensor.repeat(torchSize).size(), target, 'Error in repeat using LongStorage')
        result = tensor.repeat(*size)
        self.assertEqual(result.size(), target, 'Error in repeat using result')
        result = tensor.repeat(torchSize)
        self.assertEqual(result.size(), target, 'Error in repeat using result and LongStorage')
        self.assertEqual((result.mean(0).view(8, 4) - tensor).abs().max(), 0, 'Error in repeat (not equal)')
test_torch.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_equal(self):
        # Contiguous, 1D
        t1 = torch.Tensor((3, 4, 9, 10))
        t2 = t1.contiguous()
        t3 = torch.Tensor((1, 9, 3, 10))
        t4 = torch.Tensor((3, 4, 9))
        t5 = torch.Tensor()
        self.assertTrue(t1.equal(t2))
        self.assertFalse(t1.equal(t3))
        self.assertFalse(t1.equal(t4))
        self.assertFalse(t1.equal(t5))
        self.assertTrue(torch.equal(t1, t2))
        self.assertFalse(torch.equal(t1, t3))
        self.assertFalse(torch.equal(t1, t4))
        self.assertFalse(torch.equal(t1, t5))

        # Non contiguous, 2D
        s = torch.Tensor(((1, 2, 3, 4), (5, 6, 7, 8)))
        s1 = s[:, 1:3]
        s2 = s1.clone()
        s3 = torch.Tensor(((2, 3), (6, 7)))
        s4 = torch.Tensor(((0, 0), (0, 0)))

        self.assertFalse(s1.is_contiguous())
        self.assertTrue(s1.equal(s2))
        self.assertTrue(s1.equal(s3))
        self.assertFalse(s1.equal(s4))
        self.assertTrue(torch.equal(s1, s2))
        self.assertTrue(torch.equal(s1, s3))
        self.assertFalse(torch.equal(s1, s4))
common.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def assertNotEqual(self, x, y, prec=None, message=''):
        if prec is None:
            prec = self.precision

        x, y = self.unwrapVariables(x, y)

        if torch.is_tensor(x) and torch.is_tensor(y):
            if x.size() != y.size():
                super(TestCase, self).assertNotEqual(x.size(), y.size())
            self.assertGreater(x.numel(), 0)
            y = y.type_as(x)
            y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu()
            nan_mask = x != x
            if torch.equal(nan_mask, y != y):
                diff = x - y
                if diff.is_signed():
                    diff = diff.abs()
                diff[nan_mask] = 0
                max_err = diff.max()
                self.assertGreaterEqual(max_err, prec, message)
        elif type(x) == str and type(y) == str:
            super(TestCase, self).assertNotEqual(x, y)
        elif is_iterable(x) and is_iterable(y):
            super(TestCase, self).assertNotEqual(x, y)
        else:
            try:
                self.assertGreaterEqual(abs(x - y), prec, message)
                return
            except:
                pass
            super(TestCase, self).assertNotEqual(x, y, message)
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_repeat(self):
        result = torch.Tensor()
        tensor = torch.rand(8, 4)
        size = (3, 1, 1)
        torchSize = torch.Size(size)
        target = [3, 8, 4]
        self.assertEqual(tensor.repeat(*size).size(), target, 'Error in repeat')
        self.assertEqual(tensor.repeat(torchSize).size(), target, 'Error in repeat using LongStorage')
        result = tensor.repeat(*size)
        self.assertEqual(result.size(), target, 'Error in repeat using result')
        result = tensor.repeat(torchSize)
        self.assertEqual(result.size(), target, 'Error in repeat using result and LongStorage')
        self.assertEqual((result.mean(0).view(8, 4) - tensor).abs().max(), 0, 'Error in repeat (not equal)')
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_equal(self):
        # Contiguous, 1D
        t1 = torch.Tensor((3, 4, 9, 10))
        t2 = t1.contiguous()
        t3 = torch.Tensor((1, 9, 3, 10))
        t4 = torch.Tensor((3, 4, 9))
        t5 = torch.Tensor()
        self.assertTrue(t1.equal(t2))
        self.assertFalse(t1.equal(t3))
        self.assertFalse(t1.equal(t4))
        self.assertFalse(t1.equal(t5))
        self.assertTrue(torch.equal(t1, t2))
        self.assertFalse(torch.equal(t1, t3))
        self.assertFalse(torch.equal(t1, t4))
        self.assertFalse(torch.equal(t1, t5))

        # Non contiguous, 2D
        s = torch.Tensor(((1, 2, 3, 4), (5, 6, 7, 8)))
        s1 = s[:, 1:3]
        s2 = s1.clone()
        s3 = torch.Tensor(((2, 3), (6, 7)))
        s4 = torch.Tensor(((0, 0), (0, 0)))

        self.assertFalse(s1.is_contiguous())
        self.assertTrue(s1.equal(s2))
        self.assertTrue(s1.equal(s3))
        self.assertFalse(s1.equal(s4))
        self.assertTrue(torch.equal(s1, s2))
        self.assertTrue(torch.equal(s1, s3))
        self.assertFalse(torch.equal(s1, s4))
test_jit.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_python_ir(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        def doit(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        traced, _ = torch.jit.trace(doit, (x, y))
        g = torch._C._jit_get_graph(traced)
        g2 = torch._C.Graph()
        g_to_g2 = {}
        for node in g.inputs():
            g_to_g2[node] = g2.addInput()
        for node in g.nodes():
            if node.kind() == "PythonOp":
                n_ = g2.create(node.pyname(),
                               [g_to_g2[i] for i in node.inputs()]) \
                    .setType(node.typeOption()) \
                    .s_("note", "from_pyop") \
                    .i_("some_value", len(node.scalar_args()))
                assert(n_.i("some_value") == len(node.scalar_args()))
            else:
                n_ = g2.createClone(node, lambda x: g_to_g2[x])
                assert(n_.kindOf("Offset") == "i")

            g_to_g2[node] = g2.appendNode(n_)

        for node in g.outputs():
            g2.registerOutput(g_to_g2[node])

        t_node = g2.create("TensorTest").t_("a", torch.ones([2, 2]))
        assert(t_node.attributeNames() == ["a"])
        g2.appendNode(t_node)
        assert(torch.equal(torch.ones([2, 2]), t_node.t("a")))
        self.assertExpected(str(g2))
test_nn.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_vector_to_parameters(self):
        conv1 = nn.Conv2d(3, 10, 5)
        fc1 = nn.Linear(10, 20)
        model = nn.Sequential(conv1, fc1)

        vec = Variable(torch.arange(0, 980))
        vector_to_parameters(vec, model.parameters())

        sample = next(model.parameters())[0, 0, 0]
        self.assertTrue(torch.equal(sample.data, vec.data[:5]))
test_nn.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 57 收藏 0 点赞 0 评论 0
def _test_InstanceNorm(self, cls, input):
        b, c = input.size(0), input.size(1)
        input_var = Variable(input)

        IN = cls(c, eps=0)

        output = IN(input_var)
        out_reshaped = output.transpose(1, 0).contiguous().view(c, -1)

        mean = out_reshaped.mean(1)
        var = out_reshaped.var(1, unbiased=False)

        self.assertAlmostEqual(torch.abs(mean.data).mean(), 0, delta=1e-5)
        self.assertAlmostEqual(torch.abs(var.data).mean(), 1, delta=1e-5)

        # If momentum==1 running_mean/var should be
        # equal to mean/var of the input
        IN = cls(c, momentum=1, eps=0)

        output = IN(input_var)

        input_reshaped = input_var.transpose(1, 0).contiguous().view(c, -1)
        mean = input_reshaped.mean(1)

        input_reshaped = input_var.transpose(1, 0).contiguous().view(c, b, -1)
        var = input_reshaped.var(2, unbiased=True)[:, :]

        self.assertAlmostEqual(torch.abs(mean.data - IN.running_mean).mean(), 0, delta=1e-5)
        self.assertAlmostEqual(torch.abs(var.data.mean(1) - IN.running_var).mean(), 0, delta=1e-5)
common.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def assertNotEqual(self, x, y, prec=None, message=''):
        if prec is None:
            prec = self.precision

        x, y = self.unwrapVariables(x, y)

        if torch.is_tensor(x) and torch.is_tensor(y):
            if x.size() != y.size():
                super(TestCase, self).assertNotEqual(x.size(), y.size())
            self.assertGreater(x.numel(), 0)
            y = y.type_as(x)
            y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu()
            nan_mask = x != x
            if torch.equal(nan_mask, y != y):
                diff = x - y
                if diff.is_signed():
                    diff = diff.abs()
                diff[nan_mask] = 0
                max_err = diff.max()
                self.assertGreaterEqual(max_err, prec, message)
        elif type(x) == str and type(y) == str:
            super(TestCase, self).assertNotEqual(x, y)
        elif is_iterable(x) and is_iterable(y):
            super(TestCase, self).assertNotEqual(x, y)
        else:
            try:
                self.assertGreaterEqual(abs(x - y), prec, message)
                return
            except:
                pass
            super(TestCase, self).assertNotEqual(x, y, message)
test_torch.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_repeat(self):
        result = torch.Tensor()
        tensor = torch.rand(8, 4)
        size = (3, 1, 1)
        torchSize = torch.Size(size)
        target = [3, 8, 4]
        self.assertEqual(tensor.repeat(*size).size(), target, 'Error in repeat')
        self.assertEqual(tensor.repeat(torchSize).size(), target, 'Error in repeat using LongStorage')
        result = tensor.repeat(*size)
        self.assertEqual(result.size(), target, 'Error in repeat using result')
        result = tensor.repeat(torchSize)
        self.assertEqual(result.size(), target, 'Error in repeat using result and LongStorage')
        self.assertEqual((result.mean(0).view(8, 4) - tensor).abs().max(), 0, 'Error in repeat (not equal)')
test_torch.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def test_equal(self):
        # Contiguous, 1D
        t1 = torch.Tensor((3, 4, 9, 10))
        t2 = t1.contiguous()
        t3 = torch.Tensor((1, 9, 3, 10))
        t4 = torch.Tensor((3, 4, 9))
        t5 = torch.Tensor()
        self.assertTrue(t1.equal(t2))
        self.assertFalse(t1.equal(t3))
        self.assertFalse(t1.equal(t4))
        self.assertFalse(t1.equal(t5))
        self.assertTrue(torch.equal(t1, t2))
        self.assertFalse(torch.equal(t1, t3))
        self.assertFalse(torch.equal(t1, t4))
        self.assertFalse(torch.equal(t1, t5))

        # Non contiguous, 2D
        s = torch.Tensor(((1, 2, 3, 4), (5, 6, 7, 8)))
        s1 = s[:, 1:3]
        s2 = s1.clone()
        s3 = torch.Tensor(((2, 3), (6, 7)))
        s4 = torch.Tensor(((0, 0), (0, 0)))

        self.assertFalse(s1.is_contiguous())
        self.assertTrue(s1.equal(s2))
        self.assertTrue(s1.equal(s3))
        self.assertFalse(s1.equal(s4))
        self.assertTrue(torch.equal(s1, s2))
        self.assertTrue(torch.equal(s1, s3))
        self.assertFalse(torch.equal(s1, s4))
mul_lazy_variable_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_add_diag():
    lazy_var = make_mul_lazy_var()[0]
    assert torch.equal(lazy_var.evaluate().data, (t1_t2_t3_eval + added_diag.diag()))
diag_lazy_variable_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_evaluate():
    diag_lv = DiagLazyVariable(Variable(diag))
    assert torch.equal(diag_lv.evaluate().data, diag.diag())
diag_lazy_variable_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_get_item():
    diag_lv = DiagLazyVariable(Variable(diag))
    diag_ev = diag_lv.evaluate()
    assert torch.equal(diag_lv[0:2].evaluate().data, diag_ev[0:2].data)
sum_lazy_variable_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def test_add_diag():
    diag = Variable(torch.Tensor([4]))
    lazy_var = make_sum_lazy_var().add_diag(diag)
    assert torch.equal(lazy_var.evaluate().data, (t1_eval + t2_eval + torch.eye(4) * 4))
sparse_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_sparse_eye():
    res = sparse_eye(5)
    actual = torch.eye(5)
    assert torch.equal(res.to_dense(), actual)
sparse_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_sparse_getitem_one_dim_slice():
    actual = dense[2:4]
    res = sparse_getitem(sparse, slice(2, 4))
    assert torch.equal(actual, res.to_dense())
sparse_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def test_sparse_getitem_two_dim_int_slice():
    actual = dense[:, 1]
    res = sparse_getitem(sparse, (slice(None, None, None), 1))
    assert torch.equal(actual, res.to_dense())

    actual = dense[1, :]
    res = sparse_getitem(sparse, (1, slice(None, None, None)))
    assert torch.equal(actual, res.to_dense())
sparse_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_sparse_getitem_two_dim_slice():
    actual = dense[2:4, 1:3]
    res = sparse_getitem(sparse, (slice(2, 4), slice(1, 3)))
    assert torch.equal(actual, res.to_dense())
sparse_test.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_sparse_repeat_1d():
    sparse_1d = sparse_getitem(sparse, 1)
    actual = sparse_1d.to_dense().repeat(3, 1)
    res = sparse_repeat(sparse_1d, 3, 1)
    assert torch.equal(actual, res.to_dense())

    actual = sparse_1d.to_dense().repeat(2, 3)
    res = sparse_repeat(sparse_1d, 2, 3)
    assert torch.equal(actual, res.to_dense())


问题


面经


文章

微信
公众号

扫码关注公众号