python类ones()的实例源码

task_net.py 文件源码 项目:e2e-model-learning 作者: locuslab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def forward(self, y):
        nBatch, k = y.size()

        Q_scale = torch.cat([torch.diag(torch.cat(
            [self.one, y[i], y[i]])).unsqueeze(0) for i in range(nBatch)], 0)
        Q = self.Q.unsqueeze(0).expand_as(Q_scale).mul(Q_scale)
        p_scale = torch.cat([Variable(torch.ones(nBatch,1).cuda()), y, y], 1)
        p = self.p.unsqueeze(0).expand_as(p_scale).mul(p_scale)
        G = self.G.unsqueeze(0).expand(nBatch, self.G.size(0), self.G.size(1))
        h = self.h.unsqueeze(0).expand(nBatch, self.h.size(0))
        e = Variable(torch.Tensor().cuda()).double()

        out = QPFunction(verbose=False)\
            (Q.double(), p.double(), G.double(), h.double(), e, e).float()

        return out[:,:1]
importance_sampling.py 文件源码 项目:alpha-dimt-icmlws 作者: sotetsuk 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def uniform_weights(n_sample):
    """Return uniform weights (almost for debug).

    EXAMPLE
    -------
    >>> weights = uniform_weights(3)
    >>> print(weights)
    <BLANKLINE>
     0.3333
     0.3333
     0.3333
    [torch.FloatTensor of size 3]
    <BLANKLINE>

    :return:
    """
    weights = torch.ones(n_sample)
    return weights / weights.sum()
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, nFeatures, args):
        super().__init__()

        nHidden, neq, nineq = 2*nFeatures-1,0,2*nFeatures-2
        assert(neq==0)

        # self.fc1 = nn.Linear(nFeatures, nHidden)
        self.M = Variable(torch.tril(torch.ones(nHidden, nHidden)).cuda())

        Q = 1e-8*torch.eye(nHidden)
        Q[:nFeatures,:nFeatures] = torch.eye(nFeatures)
        self.L = Variable(torch.potrf(Q))

        self.D = Parameter(0.3*torch.randn(nFeatures-1, nFeatures))
        # self.lam = Parameter(20.*torch.ones(1))
        self.h = Variable(torch.zeros(nineq))

        self.nFeatures = nFeatures
        self.nHidden = nHidden
        self.neq = neq
        self.nineq = nineq
        self.args = args
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def forward(self, x):
        nBatch = x.size(0)

        L = self.M*self.L
        Q = L.mm(L.t()) + self.args.eps*Variable(torch.eye(self.nHidden)).cuda()
        Q = Q.unsqueeze(0).expand(nBatch, self.nHidden, self.nHidden)
        nI = Variable(-torch.eye(self.nFeatures-1).type_as(Q.data))
        G = torch.cat((
              torch.cat(( self.D, nI), 1),
              torch.cat((-self.D, nI), 1)
        ))
        G = G.unsqueeze(0).expand(nBatch, self.nineq, self.nHidden)
        h = self.h.unsqueeze(0).expand(nBatch, self.nineq)
        e = Variable(torch.Tensor())
        # p = torch.cat((-x, self.lam.unsqueeze(0).expand(nBatch, self.nFeatures-1)), 1)
        p = torch.cat((-x, Parameter(13.*torch.ones(nBatch, self.nFeatures-1).cuda())), 1)
        x = QPFunction()(Q.double(), p.double(), G.double(), h.double(), e, e).float()
        x = x[:,:self.nFeatures]

        return x
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_sudoku_matrix(n):
    X = np.array([[cp.Variable(n**2) for i in range(n**2)] for j in range(n**2)])
    cons = ([x >= 0 for row in X for x in row] +
            [cp.sum_entries(x) == 1 for row in X for x in row] +
            [sum(row) == np.ones(n**2) for row in X] +
            [sum([row[i] for row in X]) == np.ones(n**2) for i in range(n**2)] +
            [sum([sum(row[i:i+n]) for row in X[j:j+n]]) == np.ones(n**2) for i in range(0,n**2,n) for j in range(0, n**2, n)])
    f = sum([cp.sum_entries(x) for row in X for x in row])
    prob = cp.Problem(cp.Minimize(f), cons)

    A = np.asarray(prob.get_problem_data(cp.ECOS)["A"].todense())
    A0 = [A[0]]
    rank = 1
    for i in range(1,A.shape[0]):
        if np.linalg.matrix_rank(A0+[A[i]], tol=1e-12) > rank:
            A0.append(A[i])
            rank += 1

    return np.array(A0)
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, n, Qpenalty, nineq):
        super().__init__()
        nx = (n**2)**3
        self.Q = Variable(Qpenalty*torch.eye(nx).double().cuda())
        self.G1 = Variable(-torch.eye(nx).double().cuda())
        self.h1 = Variable(torch.zeros(nx).double().cuda())
        # if trueInit:
        #     self.A = Parameter(torch.DoubleTensor(get_sudoku_matrix(n)).cuda())
        # else:
        #     # t = get_sudoku_matrix(n)
        #     # self.A = Parameter(torch.rand(t.shape).double().cuda())
        #     # import IPython, sys; IPython.embed(); sys.exit(-1)
        self.A = Parameter(torch.rand(50,nx).double().cuda())
        self.G2 = Parameter(torch.Tensor(128, nx).uniform_(-1,1).double().cuda())
        self.z2 = Parameter(torch.zeros(nx).double().cuda())
        self.s2 = Parameter(torch.ones(128).double().cuda())
        # self.b = Variable(torch.ones(self.A.size(0)).double().cuda())
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, nHidden=50, nineq=200, neq=0, eps=1e-4):
        super(LenetOptNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, kernel_size=5)
        self.conv2 = nn.Conv2d(20, 50, kernel_size=5)

        self.qp_o = nn.Linear(50*4*4, nHidden)
        self.qp_z0 = nn.Linear(50*4*4, nHidden)
        self.qp_s0 = nn.Linear(50*4*4, nineq)

        assert(neq==0)
        self.M = Variable(torch.tril(torch.ones(nHidden, nHidden)).cuda())
        self.L = Parameter(torch.tril(torch.rand(nHidden, nHidden).cuda()))
        self.G = Parameter(torch.Tensor(nineq,nHidden).uniform_(-1,1).cuda())
        # self.z0 = Parameter(torch.zeros(nHidden).cuda())
        # self.s0 = Parameter(torch.ones(nineq).cuda())

        self.nHidden = nHidden
        self.nineq = nineq
        self.neq = neq
        self.eps = eps
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, nFeatures, nHidden, nCls, neq, Qpenalty=0.1, eps=1e-4):
        super().__init__()

        self.nFeatures = nFeatures
        self.nHidden = nHidden
        self.nCls = nCls

        self.fc1 = nn.Linear(nFeatures, nHidden)
        self.fc2 = nn.Linear(nHidden, nCls)

        self.Q = Variable(Qpenalty*torch.eye(nHidden).double().cuda())
        self.G = Variable(-torch.eye(nHidden).double().cuda())
        self.h = Variable(torch.zeros(nHidden).double().cuda())
        self.A = Parameter(torch.rand(neq,nHidden).double().cuda())
        self.b = Variable(torch.ones(self.A.size(0)).double().cuda())

        self.neq = neq
utils.py 文件源码 项目:GAN-Zoo 作者: corenel 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def calc_gradient_penalty(D, real_data, fake_data):
    """Calculatge gradient penalty for WGAN-GP."""
    alpha = torch.rand(params.batch_size, 1)
    alpha = alpha.expand(real_data.size())
    alpha = make_cuda(alpha)

    interpolates = make_variable(alpha * real_data + ((1 - alpha) * fake_data))
    interpolates.requires_grad = True

    disc_interpolates = D(interpolates)

    gradients = grad(outputs=disc_interpolates,
                     inputs=interpolates,
                     grad_outputs=make_cuda(
                         torch.ones(disc_interpolates.size())),
                     create_graph=True,
                     retain_graph=True,
                     only_inputs=True)[0]

    gradient_penalty = params.penalty_lambda * \
        ((gradients.norm(2, dim=1) - 1) ** 2).mean()

    return gradient_penalty
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_Conv2d_large_workspace(self):
        # These sizes require huge cuDNN workspaces. Make sure we choose a
        # reasonable algorithm that does not run out of memory
        sizes = [
            (1, 256, 109, 175),
            (1, 256, 80, 128),
            (1, 256, 120, 192),
        ]
        dtype = torch.cuda.FloatTensor

        def run_test(benchmark):
            torch.backends.cudnn.benchmark = benchmark
            conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).type(dtype)
            for size in sizes:
                x = torch.randn(size).type(dtype)
                out = conv(Variable(x, requires_grad=True))
                out.backward(torch.ones(out.size()).type(dtype))

        b = torch.backends.cudnn.benchmark
        try:
            run_test(benchmark=False)
            run_test(benchmark=True)
        finally:
            torch.backends.cudnn.benchmark = b
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_accumulate_grad(self):
        import sys

        grad_output = Variable(torch.ones(5, 5))
        for start_volatile, end_volatile in product((True, False), repeat=2):
            go1 = grad_output.data if start_volatile else grad_output
            go2 = grad_output.data if end_volatile else grad_output

            x = Variable(torch.randn(5, 5), requires_grad=True)
            y = x + 2
            y.backward(go1, retain_variables=True)
            x_grad = x.grad
            x_grad_clone = x.grad.data.clone()

            del x
            y.backward(go2)

            # That's the only case when we can accumulate in-place
            if start_volatile and end_volatile:
                expected_grad = x_grad_clone * 2
            else:
                expected_grad = x_grad_clone
            self.assertEqual(x_grad.data, expected_grad)
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_hessian_vector(self):
        x = Variable(torch.randn(2, 2), requires_grad=True)
        y = Variable(torch.randn(2, 2), requires_grad=True)

        z = x ** 2 + y * x + y ** 2
        z.backward(Variable(torch.ones(2, 2), requires_grad=True), retain_variables=True)

        x_grad = 2 * x.data + y.data
        y_grad = x.data + 2 * y.data
        self.assertEqual(x.grad.data, x_grad)
        self.assertEqual(y.grad.data, y_grad)

        grad_sum = 2 * x.grad + y.grad
        grad_sum.backward(torch.ones(2, 2))
        x_hv = torch.ones(2, 2) * 5
        y_hv = torch.ones(2, 2) * 4
        self.assertEqual(x.grad.data, x_grad + x_hv)
        self.assertEqual(y.grad.data, y_grad + y_hv)
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def test_hooks_cycle(self):
        import gc
        counter = [0]

        class GradHook(object):
            def __init__(self, var):
                self.var = var

            def __del__(self):
                counter[0] += 1

            def __call__(self, *args):
                pass

        def run_test():
            x = Variable(torch.ones(5, 5), requires_grad=True)
            y = x * 2
            x.register_hook(GradHook(x))
            y.register_hook(GradHook(y))
            y._backward_hooks[1] = GradHook(y)

        run_test()
        gc.collect()
        self.assertEqual(counter[0], 3)
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def test_volatile(self):
        x = Variable(torch.ones(5, 5), requires_grad=True)
        y = Variable(torch.ones(5, 5) * 4, volatile=True)

        z = x ** 2
        self.assertFalse(z.volatile)
        self.assertTrue(z.requires_grad)
        self.assertIsNotNone(z.grad_fn)
        z.backward(torch.ones(5, 5))
        self.assertEqual(x.grad.data, torch.ones(5, 5) * 2)

        w = z + y
        self.assertTrue(w.volatile)
        self.assertFalse(w.requires_grad)
        self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
        self.assertIsNone(w.grad_fn)
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_requires_grad(self):
        x = Variable(torch.randn(5, 5))
        y = Variable(torch.randn(5, 5))
        z = Variable(torch.randn(5, 5), requires_grad=True)
        a = x + y
        self.assertFalse(a.requires_grad)
        b = a + z
        self.assertTrue(b.requires_grad)

        def error():
            raise RuntimeError
        # Make sure backward isn't called on these
        a._backward_hooks = OrderedDict()
        x._backward_hooks = OrderedDict()
        y._backward_hooks = OrderedDict()
        a._backward_hooks['test'] = error
        x._backward_hooks['test'] = error
        y._backward_hooks['test'] = error
        b.backward(torch.ones(5, 5))
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def _test_setitem_tensor(self, size, index):
        x = Variable(torch.ones(*size), requires_grad=True)
        y = x + 2
        y_version = y._version
        value = Variable(torch.Tensor(x[index].size()).fill_(7), requires_grad=True)
        y[index] = value
        self.assertNotEqual(y._version, y_version)
        y.backward(torch.ones(*size))
        expected_grad_input = torch.ones(*size)
        if isinstance(index, Variable):
            index = index.data
        expected_grad_input[index] = 0
        self.assertEqual(x.grad.data, expected_grad_input)
        self.assertEqual(value.grad.data, torch.ones(value.size()))

        # case when x is not same shape as y[1]
        x = Variable(torch.randn(1, 2), requires_grad=True)
        y = Variable(torch.zeros(10, 2))
        y[1] = x
        y.backward(torch.randn(10, 2))
        self.assertEqual(x.size(), x.grad.size())
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_return_leaf_inplace(self):
        class Inplace(InplaceFunction):

            def forward(self, a, b):
                self.mark_dirty(a)
                return a.add_(b), b + 2

            def backward(self, grad_a, grad_b):
                return grad_a, grad_a + grad_b

        x = Variable(torch.randn(5, 5))
        y = Variable(torch.randn(5, 5), requires_grad=True)

        fn = Inplace(True)
        q, p = fn(x, y)
        self.assertIs(q, x)
        self.assertIs(q.grad_fn, fn)
        self.assertTrue(q.requires_grad)
        q.sum().backward()
        self.assertEqual(y.grad.data, torch.ones(5, 5))
test_multiprocessing.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _test_pool(self, ctx=mp, repeat=1):
        def do_test():
            p = ctx.Pool(2)
            for proc in p._pool:
                lc.check_pid(proc.pid)

            buffers = [torch.zeros(2, 2) for i in range(4)]
            results = p.map(simple_pool_fill, buffers, 1)
            self.assertEqual(len(results), len(buffers))
            for r in results:
                self.assertEqual(r, torch.ones(2, 2) * 5, 0)
            for b in buffers:
                self.assertEqual(b, torch.ones(2, 2) * 4, 0)

            p.close()
            p.join()

        with leak_checker(self) as lc:
            for i in range(repeat):
                do_test()
test_multiprocessing.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _test_autograd_sharing(self, var):
        ready = mp.Event()
        master_modified = mp.Event()
        queue = mp.Queue()
        p = mp.Process(target=autograd_sharing, args=(queue, ready, master_modified))
        p.daemon = True
        p.start()
        var._grad = Variable(torch.zeros(5, 5), requires_grad=False)
        queue.put(var)

        ready.wait()
        var.data[0, 0] = 1000
        var.grad.data[:] = torch.ones(5, 5) * 4
        master_modified.set()

        worker_ok = queue.get()
        self.assertTrue(worker_ok)

        self.assertEqual(var.data, torch.ones(5, 5))
        self.assertEqual(var.grad.data, torch.ones(5, 5) * 4)
        p.join(1)
        self.assertFalse(p.is_alive())
test_utils.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_gpu(self):
        compile_extension(
            name='gpulib',
            header=test_dir + '/ffi/src/cuda/cudalib.h',
            sources=[
                test_dir + '/ffi/src/cuda/cudalib.c',
            ],
            with_cuda=True,
            verbose=False,
        )
        import gpulib
        tensor = torch.ones(2, 2).float()

        gpulib.good_func(tensor, 2, 1.5)
        self.assertEqual(tensor, torch.ones(2, 2) * 2 + 1.5)

        ctensor = tensor.cuda().fill_(1)
        gpulib.cuda_func(ctensor, 2, 1.5)
        self.assertEqual(ctensor, torch.ones(2, 2) * 2 + 1.5)

        self.assertRaises(TypeError,
                          lambda: gpulib.cuda_func(tensor, 2, 1.5))
        self.assertRaises(TypeError,
                          lambda: gpulib.cuda_func(ctensor.storage(), 2, 1.5))


问题


面经


文章

微信
公众号

扫码关注公众号