python类randn()的实例源码

vgg.py 文件源码 项目:sketchnet 作者: jtoy 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)

# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size())
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, nFeatures, args):
        super().__init__()

        nHidden, neq, nineq = 2*nFeatures-1,0,2*nFeatures-2
        assert(neq==0)

        # self.fc1 = nn.Linear(nFeatures, nHidden)
        self.M = Variable(torch.tril(torch.ones(nHidden, nHidden)).cuda())

        Q = 1e-8*torch.eye(nHidden)
        Q[:nFeatures,:nFeatures] = torch.eye(nFeatures)
        self.L = Variable(torch.potrf(Q))

        self.D = Parameter(0.3*torch.randn(nFeatures-1, nFeatures))
        # self.lam = Parameter(20.*torch.ones(1))
        self.h = Variable(torch.zeros(nineq))

        self.nFeatures = nFeatures
        self.nHidden = nHidden
        self.neq = neq
        self.nineq = nineq
        self.args = args
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_parallel_apply(self):
        l1 = nn.Linear(10, 5).float().cuda(0)
        l2 = nn.Linear(10, 5).float().cuda(1)
        i1 = Variable(torch.randn(2, 10).float().cuda(0))
        i2 = Variable(torch.randn(2, 10).float().cuda(1))
        expected1 = l1(i1).data
        expected2 = l2(i2).data
        inputs = ((i1,), (i2,))
        modules = (l1, l2)
        expected_outputs = (expected1, expected2)

        outputs = dp.parallel_apply(modules, inputs, None)
        for out, expected in zip(outputs, expected_outputs):
            self.assertEqual(out.data, expected)

        inputs = (i1, Variable(i2.data.new()))
        expected_outputs = (expected1, expected2.new())
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_data_parallel_nested_output(self):
        def fn(input):
            return [input, (input.sin(), input.cos(), [input.add(1)]), input]

        class Net(nn.Module):
            def forward(self, input):
                return fn(input)

        i = Variable(torch.randn(2, 2).float().cuda(1))
        gpus = range(torch.cuda.device_count())
        output = dp.data_parallel(Net(), i, gpus)
        self.assertEqual(output, fn(i))
        self.assertIsInstance(output[0], Variable)
        self.assertIsInstance(output[1], tuple)
        self.assertIsInstance(output[1][0], Variable)
        self.assertIsInstance(output[1][1], Variable)
        self.assertIsInstance(output[1][2], list)
        self.assertIsInstance(output[1][2][0], Variable)
        self.assertIsInstance(output[2], Variable)
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_Conv2d_large_workspace(self):
        # These sizes require huge cuDNN workspaces. Make sure we choose a
        # reasonable algorithm that does not run out of memory
        sizes = [
            (1, 256, 109, 175),
            (1, 256, 80, 128),
            (1, 256, 120, 192),
        ]
        dtype = torch.cuda.FloatTensor

        def run_test(benchmark):
            torch.backends.cudnn.benchmark = benchmark
            conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).type(dtype)
            for size in sizes:
                x = torch.randn(size).type(dtype)
                out = conv(Variable(x, requires_grad=True))
                out.backward(torch.ones(out.size()).type(dtype))

        b = torch.backends.cudnn.benchmark
        try:
            run_test(benchmark=False)
            run_test(benchmark=True)
        finally:
            torch.backends.cudnn.benchmark = b
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def test_Conv2d_groups_nobias(self):
        m = nn.Conv2d(4, 4, kernel_size=3, groups=2, bias=False)
        i = Variable(torch.randn(2, 4, 6, 6), requires_grad=True)
        output = m(i)
        grad_output = torch.randn(2, 4, 4, 4)
        output.backward(grad_output)

        m1 = nn.Conv2d(2, 2, kernel_size=3, bias=False)
        m1.weight.data.copy_(m.weight.data[:2])
        i1 = Variable(i.data[:, :2].contiguous(), requires_grad=True)
        output1 = m1(i1)
        output1.backward(grad_output[:, :2].contiguous())

        m2 = nn.Conv2d(2, 2, kernel_size=3, bias=False)
        m2.weight.data.copy_(m.weight.data[2:])
        i2 = Variable(i.data[:, 2:].contiguous(), requires_grad=True)
        output2 = m2(i2)
        output2.backward(grad_output[:, 2:].contiguous())

        self.assertEqual(output, torch.cat([output1, output2], 1))
        self.assertEqual(i.grad.data,
                         torch.cat([i1.grad.data, i2.grad.data], 1))
        self.assertEqual(m.weight.grad.data,
                         torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0))
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_container_copy(self):
        class Model(nn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.linear = nn.Linear(4, 5)

            def forward(self, input):
                return self.linear(input)

        input = Variable(torch.randn(2, 4))

        model = Model()
        model_cp = deepcopy(model)
        self.assertEqual(model(input).data, model_cp(input).data)

        model_cp.linear.weight.data[:] = 2
        self.assertNotEqual(model(input).data, model_cp(input).data)
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _function_test(self, cls):
        x = Variable(torch.randn(5, 5), requires_grad=True)
        y = Variable(torch.randn(5, 5), requires_grad=True)
        result = cls.apply(x, 2, y)
        go = Variable(torch.ones(1), requires_grad=True)
        result.sum().backward(go)

        self.assertEqual(x.grad.data, y.data + torch.ones(5, 5))
        self.assertEqual(y.grad.data, x.data + torch.ones(5, 5) * 2)

        self.assertFalse(x.grad.volatile)
        self.assertFalse(y.grad.volatile)
        self.assertIsNotNone(x.grad.grad_fn)
        self.assertIsNotNone(y.grad.grad_fn)

        return x, y
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_accumulate_grad(self):
        import sys

        grad_output = Variable(torch.ones(5, 5))
        for start_volatile, end_volatile in product((True, False), repeat=2):
            go1 = grad_output.data if start_volatile else grad_output
            go2 = grad_output.data if end_volatile else grad_output

            x = Variable(torch.randn(5, 5), requires_grad=True)
            y = x + 2
            y.backward(go1, retain_variables=True)
            x_grad = x.grad
            x_grad_clone = x.grad.data.clone()

            del x
            y.backward(go2)

            # That's the only case when we can accumulate in-place
            if start_volatile and end_volatile:
                expected_grad = x_grad_clone * 2
            else:
                expected_grad = x_grad_clone
            self.assertEqual(x_grad.data, expected_grad)
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_grad_nonleaf_many_outputs(self):
        # This checks an edge case for function callbacks
        # We want to capture two grads of a function, but can only
        # register a single callback.
        x = Variable(torch.randn(4, 2), requires_grad=True)
        a, b = x.chunk(2)

        def hook(*grads):
            hook_called[0] = True
        hook_called = [False]
        x.register_hook(hook)

        go = torch.randn(2, 2)
        grad_a, grad_b = torch.autograd.grad(
            (a + 2 * b), [a, b], grad_outputs=go, create_graph=True)

        self.assertEqual(grad_a, go)
        self.assertEqual(grad_b, go * 2)
        self.assertFalse(hook_called[0])
        self.assertIsNone(x.grad)
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _test_backward(self):
        v_t = torch.randn(5, 5)
        x_t = torch.randn(5, 5)
        y_t = torch.rand(5, 5) + 0.1
        z_t = torch.randn(5, 5)
        grad_output = torch.randn(5, 5)
        v = Variable(v_t, requires_grad=True)
        x = Variable(x_t, requires_grad=True)
        y = Variable(y_t, requires_grad=True)
        z = Variable(z_t, requires_grad=True)

        v.backward(grad_output)
        self.assertEqual(v.grad.data, grad_output)

        a = x + (y * z) + 4 * z ** 2 * x / y
        a.backward(grad_output)
        x_grad = 4 * z_t.pow(2) / y_t + 1
        y_grad = z_t - 4 * x_t * z_t.pow(2) / y_t.pow(2)
        z_grad = 8 * x_t * z_t / y_t + y_t
        self.assertEqual(x.grad.data, x_grad * grad_output)
        self.assertEqual(y.grad.data, y_grad * grad_output)
        self.assertEqual(z.grad.data, z_grad * grad_output)
test_autograd.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def test_multi_backward(self):
        x = Variable(torch.randn(5, 5), requires_grad=True)
        y = Variable(torch.randn(5, 5), requires_grad=True)

        q = Variable(torch.randn(5, 5), requires_grad=True)

        a = Variable(torch.randn(5, 5), requires_grad=True)
        b = Variable(torch.randn(5, 5), requires_grad=True)

        q2 = q * 2
        z = x + y + q2
        c = a * b + q2
        grad_z = torch.randn(5, 5)
        grad_c = torch.randn(5, 5)
        torch.autograd.backward([z, c], [grad_z, grad_c])

        self.assertEqual(x.grad.data, grad_z)
        self.assertEqual(y.grad.data, grad_z)
        self.assertEqual(a.grad.data, grad_c * b.data)
        self.assertEqual(b.grad.data, grad_c * a.data)
        self.assertEqual(q.grad.data, (grad_c + grad_z) * 2)
policies.py 文件源码 项目:drl.pth 作者: seba-1511 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def forward(self, x, *args, **kwargs):
        action = super(DiagonalGaussianPolicy, self).forward(x, *args, **kwargs)
        size = action.raw.size()
        std = self.logstd.exp().expand_as(action.raw)
        value = action.raw + std * V(th.randn(size))
        value = value.detach()
        action.value = value
#        action.logstd = self.logstd.clone()
        action.logstd = self.logstd
        action.prob = lambda: self._normal(value, action.raw, action.logstd)
        action.entropy = action.logstd + self.halflog2pie
        var = std.pow(2)
        action.compute_log_prob = lambda a: (- ((a - action.raw).pow(2) / (2.0 * var)) - self.halflog2pi - action.logstd).mean(1)
        action.log_prob = action.compute_log_prob(value)
        return action
resnext.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 60 收藏 0 点赞 0 评论 0
def test_resnext():
    net = ResNeXt29_2x64d()
    x = torch.randn(1,3,32,32)
    y = net(Variable(x))
    print(y.size())

# test_resnext()
test.py 文件源码 项目:diracnets 作者: szagoruyko 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_dirac_property1d(self):
        ni, no, k, pad = 4, 4, 3, 1
        module = DiracConv1d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
        module.alpha.data.fill_(1)
        module.beta.data.fill_(0)
        x = Variable(torch.randn(4, ni, 5))
        y = module(x)
        self.assertEqual(y.size(), x.size(), 'shape check')
        self.assertEqual((y - x).data.abs().sum(), 0, 'dirac delta property check')
test.py 文件源码 项目:diracnets 作者: szagoruyko 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_dirac_property2d(self):
        ni, no, k, pad = 4, 4, 3, 1
        module = DiracConv2d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
        module.alpha.data.fill_(1)
        module.beta.data.fill_(0)
        x = Variable(torch.randn(4, ni, 5, 5))
        y = module(x)
        self.assertEqual(y.size(), x.size(), 'shape check')
        self.assertEqual((y - x).data.abs().sum(), 0, 'dirac delta property check')
test.py 文件源码 项目:diracnets 作者: szagoruyko 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_dirac_property3d(self):
        ni, no, k, pad = 4, 4, 3, 1
        module = DiracConv3d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
        module.alpha.data.fill_(1)
        module.beta.data.fill_(0)
        x = Variable(torch.randn(4, ni, 5, 5, 5))
        y = module(x)
        self.assertEqual(y.size(), x.size(), 'shape check')
        self.assertEqual((y - x).data.abs().sum(), 0, 'dirac delta property check')
test.py 文件源码 项目:diracnets 作者: szagoruyko 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_nonsquare(self):
        ni, no, k, pad = 8, 4, 3, 1
        module = DiracConv2d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
        x = Variable(torch.randn(4, ni, 5, 5))
        y = module(x)
test.py 文件源码 项目:diracnets 作者: szagoruyko 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_cifar10(self):
        inputs = Variable(torch.randn(1,3,32,32))
        f, params, stats = define_diracnet(34, 1, 'CIFAR10')
        outputs = f(inputs, params, stats, mode=False)
        self.assertEqual(outputs.size(), torch.Size((1, 10)))
focal_loss.py 文件源码 项目:RetinaNet 作者: c0nn3r 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_focal_loss():
    loss = FocalLoss()

    input = Variable(torch.randn(3, 5), requires_grad=True)
    target = Variable(torch.LongTensor(3).random_(5))

    print(input)
    print(target)

    output = loss(input, target)
    print(output)
    output.backward()


问题


面经


文章

微信
公众号

扫码关注公众号