python类eye()的实例源码

test_gel.py 文件源码 项目:torch-gel 作者: jayanthkoushik 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_cd_newton(self):
        """Test the CD implementation with Newton internal solver."""
        # Compute the C_js and I_js
        Cs = [(A_j.t()@A_j)/self.m for A_j in self.As]
        Is = [torch.eye(n_j) for n_j in self.ns]
        self._test_implementation(make_A_cd, gel_solve_cd,
                                  block_solve_fun=block_solve_newton,
                                  block_solve_kwargs={
                                      "ls_alpha": 0.01,
                                      "ls_beta": 0.9,
                                      "max_iters": 4,
                                      "tol": 1e-10
                                  }, max_cd_iters=None, rel_tol=1e-6, Cs=Cs,
                                  Is=Is)
kernel.py 文件源码 项目:sef 作者: passalis 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def _regularizer(self):
        if self.use_gpu:
            regularizer = torch.mm(self.A.transpose(0, 1), torch.mm(self.symbolic_kernel(self.X_kernel), self.A)) \
                          - Variable(torch.eye(self.A.size(1)).cuda())
        else:
            regularizer = torch.mm(self.A.transpose(0, 1), torch.mm(self.symbolic_kernel(self.X_kernel), self.A)) \
                          - Variable(torch.eye(self.A.size(1)))

        return 0.5 * torch.sum(regularizer ** 2) / (self.A.size(1) ** 2)
linear.py 文件源码 项目:sef 作者: passalis 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _regularizer(self):

        if self.use_gpu:
            regularizer = torch.mm(self.W.transpose(0, 1), self.W) - Variable(torch.eye(self.W.size(1)).cuda())
        else:
            regularizer = torch.mm(self.W.transpose(0, 1), self.W) - Variable(torch.eye(self.W.size(1)))
        return 0.5 * torch.sum(regularizer ** 2) / (self.W.size(1) ** 2)
multiheaded_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_forward(self):
        # pylint: disable=protected-access
        similarity = MultiHeadedSimilarity(num_heads=3, tensor_1_dim=6)
        similarity._tensor_1_projection = Parameter(torch.eye(6))
        similarity._tensor_2_projection = Parameter(torch.eye(6))
        a_vectors = Variable(torch.FloatTensor([[[[1, 1, -1, -1, 0, 1], [-2, 5, 9, -1, 3, 4]]]]))
        b_vectors = Variable(torch.FloatTensor([[[[1, 1, 1, 0, 2, 5], [0, 1, -1, -7, 1, 2]]]]))
        result = similarity(a_vectors, b_vectors).data.numpy()
        assert result.shape == (1, 1, 2, 3)
        assert_almost_equal(result, [[[[2, -1, 5], [5, -2, 11]]]])
bnlstm.py 文件源码 项目:FewShotLearning 作者: gitabcworld 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        init.orthogonal(self.weight_ih.data)
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        if self.use_bias:
            init.constant(self.bias.data, val=0)
selfatt-model.py 文件源码 项目:pmet 作者: bkj 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, n_chars, n_classes, emb_dim=64, rec_hidden_dim=32, att_dim=30, att_channels=16):
        super(ACharacterLSTM, self).__init__()

        self.char_embs = nn.Embedding(n_chars, emb_dim, padding_idx=0)
        self.rnn = nn.LSTM(emb_dim, int(rec_hidden_dim / 2), bias=False, bidirectional=True)

        self.att1 = nn.Linear(rec_hidden_dim, att_dim, bias=False)
        self.att2 = nn.Linear(att_dim, att_channels, bias=False)

        self.fc1 = nn.Linear(att_channels * rec_hidden_dim, n_classes)

        self.I = Variable(torch.eye(att_channels)).cuda()
e2c.py 文件源码 项目:e2c-pytorch 作者: ethanluoyc 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def cov(self):
        """This should only be called when NormalDistribution represents one sample"""
        if self.v is not None and self.r is not None:
            assert self.v.dim() == 1
            dim = self.v.dim()
            v = self.v.unsqueeze(1)  # D * 1 vector
            rt = self.r.unsqueeze(0)  # 1 * D vector
            A = torch.eye(dim) + v.mm(rt)
            return A.mm(torch.diag(self.sigma.pow(2)).mm(A.t()))
        else:
            return torch.diag(self.sigma.pow(2))
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self, n, Qpenalty, trueInit=False):
        super().__init__()
        nx = (n**2)**3
        self.Q = Variable(Qpenalty*torch.eye(nx).double().cuda())
        self.G = Variable(-torch.eye(nx).double().cuda())
        self.h = Variable(torch.zeros(nx).double().cuda())
        t = get_sudoku_matrix(n)
        if trueInit:
            self.A = Parameter(torch.DoubleTensor(t).cuda())
        else:
            self.A = Parameter(torch.rand(t.shape).double().cuda())
        self.b = Variable(torch.ones(self.A.size(0)).double().cuda())
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, n, Qpenalty, nLatent, nineq, trueInit=False):
        super().__init__()
        nx = (n**2)**3
        self.fc_in = nn.Linear(nx, nLatent)
        self.Q = Variable(Qpenalty*torch.eye(nLatent).cuda())
        self.G = Parameter(torch.Tensor(nineq, nLatent).uniform_(-1,1).cuda())
        self.z = Parameter(torch.zeros(nLatent).cuda())
        self.s = Parameter(torch.ones(nineq).cuda())
        self.fc_out = nn.Linear(nLatent, nx)
models.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, nHidden, nCls=10, proj='softmax'):
        super(Lenet, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, kernel_size=5)
        self.conv2 = nn.Conv2d(20, 50, kernel_size=5)
        self.fc1 = nn.Linear(50*4*4, nHidden)
        self.fc2 = nn.Linear(nHidden, nCls)

        self.proj = proj
        self.nCls = nCls

        if proj == 'simproj':
            self.Q = Variable(0.5*torch.eye(nCls).double().cuda())
            self.G = Variable(-torch.eye(nCls).double().cuda())
            self.h = Variable(-1e-5*torch.ones(nCls).double().cuda())
            self.A = Variable((torch.ones(1, nCls)).double().cuda())
            self.b = Variable(torch.Tensor([1.]).double().cuda())
            def projF(x):
                nBatch = x.size(0)
                Q = self.Q.unsqueeze(0).expand(nBatch, nCls, nCls)
                G = self.G.unsqueeze(0).expand(nBatch, nCls, nCls)
                h = self.h.unsqueeze(0).expand(nBatch, nCls)
                A = self.A.unsqueeze(0).expand(nBatch, 1, nCls)
                b = self.b.unsqueeze(0).expand(nBatch, 1)
                x = QPFunction()(Q, -x.double(), G, h, A, b).float()
                x = x.log()
                return x
            self.projF = projF
        else:
            self.projF = F.log_softmax
optnet-single.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def prof_instance(nz, neq, nineq, nIter, cuda):
    L = np.tril(npr.uniform(0,1, (nz,nz))) + np.eye(nz,nz)
    G = npr.randn(nineq,nz)
    A = npr.randn(neq,nz)
    z0 = npr.randn(nz)
    s0 = np.ones(nineq)
    p = npr.randn(nz)

    p, L, G, A, z0, s0 = [torch.Tensor(x) for x in [p, L, G, A, z0, s0]]
    Q = torch.mm(L, L.t())+0.001*torch.eye(nz).type_as(L)
    if cuda:
        p, L, Q, G, A, z0, s0 = [x.cuda() for x in [p, L, Q, G, A, z0, s0]]

    af = adact.AdactFunction()

    start = time.time()
    # One-time cost for numpy conversion.
    p_np, L_np, G_np, A_np, z0_np, s0_np = [adact.toNp(v) for v in [p, L, G, A, z0, s0]]
    cp = time.time()-start
    for i in range(nIter):
        start = time.time()
        zhat, nu, lam = af.forward_single_np(p_np, L_np, G_np, A_np, z0_np, s0_np)
        cp += time.time()-start

    b = torch.mv(A, z0) if neq > 0 else None
    h = torch.mv(G, z0)+s0
    L_Q, L_S, R = aip.pre_factor_kkt(Q, G, A, nineq, neq)
    pdipm = []
    for i in range(nIter):
        start = time.time()
        zhat_ip, nu_ip, lam_ip = aip.forward_single(p, Q, G, A, b, h, L_Q, L_S, R)
        pdipm.append(time.time()-start)
    return cp, np.sum(pdipm)
functional.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def btriunpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True):
    """Unpacks the data and pivots from a batched LU factorization (btrifact) of a tensor.

    Returns a tuple indexed by:
      0: The pivots.
      1: The L tensor.
      2: The U tensor.

    Arguments:
        LU_data (Tensor): The packed LU factorization data.
        LU_pivots (Tensor): The packed LU factorization pivots.
        unpack_data (bool): Flag indicating if the data should be unpacked.
        unpack_pivots (bool): Flag indicating if the pivots should be unpacked.
    """

    nBatch, sz, _ = LU_data.size()

    if unpack_data:
        I_U = torch.triu(torch.ones(sz, sz)).type_as(LU_data).byte().unsqueeze(0).expand(nBatch, sz, sz)
        I_L = 1 - I_U
        L = LU_data.new(LU_data.size()).zero_()
        U = LU_data.new(LU_data.size()).zero_()
        I_diag = torch.eye(sz).type_as(LU_data).byte().unsqueeze(0).expand(nBatch, sz, sz)
        L[I_diag] = 1.0
        L[I_L] = LU_data[I_L]
        U[I_U] = LU_data[I_U]
    else:
        L = U = None

    if unpack_pivots:
        P = torch.eye(sz).type_as(LU_data).unsqueeze(0).repeat(nBatch, 1, 1)
        for i in range(nBatch):
            for j in range(sz):
                k = LU_pivots[i, j] - 1
                t = P[i, :, j].clone()
                P[i, :, j] = P[i, :, k]
                P[i, :, k] = t
    else:
        P = None

    return P, L, U
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_eye(self):
        for as_variable in [True, False]:
            input_tensor = self._create_random_nd_tensor(2, size_min=1, size_max=5, as_variable=as_variable)
            init.eye(input_tensor)
            if as_variable:
                input_tensor = input_tensor.data

            # Check every single element
            for i in range(input_tensor.size(0)):
                for j in range(input_tensor.size(1)):
                    if i == j:
                        assert input_tensor[i][j] == 1
                    else:
                        assert input_tensor[i][j] == 0
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_eye_only_works_on_2d_inputs(self):
        for as_variable in [True, False]:
            for dims in [1, 3]:
                with self.assertRaises(ValueError):
                    tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3, as_variable=as_variable)
                    init.eye(tensor)
test_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def test_orthogonal(self):
        for as_variable in [True, False]:
            for use_gain in [True, False]:
                for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]:
                    input_tensor = torch.zeros(tensor_size)
                    gain = 1.0

                    if as_variable:
                        input_tensor = Variable(input_tensor)

                    if use_gain:
                        gain = self._random_float(0.1, 2)
                        init.orthogonal(input_tensor, gain=gain)
                    else:
                        init.orthogonal(input_tensor)

                    if as_variable:
                        input_tensor = input_tensor.data

                    rows, cols = tensor_size[0], reduce(mul, tensor_size[1:])
                    flattened_tensor = input_tensor.view(rows, cols)
                    if rows > cols:
                        self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor),
                                         torch.eye(cols) * gain ** 2, prec=1e-6)
                    else:
                        self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()),
                                         torch.eye(rows) * gain ** 2, prec=1e-6)
bnlstm.py 文件源码 项目:benchmark 作者: pytorch 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        init.orthogonal(self.weight_ih.data)
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        if self.use_bias:
            init.constant(self.bias.data, val=0)
MultiModelAll.1.py 文件源码 项目:PyTorchText 作者: chenyuntc 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, opt ):
        super(MultiModelAll, self).__init__()
        self.model_name = 'MultiModelAll'
        self.opt=opt
        # self.char_models = []
        self.models = []
        self.word_embedding=nn.Embedding(411720,256)
        self.char_embedding=nn.Embedding(11973,256)
        self.word_embedding.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path.replace('char','word'))['vector']))
        self.char_embedding.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path.replace('word','char'))['vector']))

        for _name,_path in zip(opt.model_names, opt.model_paths):
            tmp_config = Config().parse(opt.state_dict(),print_=False)
            tmp_config.embedding_path=None
            _model = getattr(models,_name)(tmp_config)

            if _path is not None:
                _model.load(_path)

            # if _model.opt.type_=='char':
            _model.encoder=(self.char_embedding if _model.opt.type_=='char' else self.word_embedding)
            # else:
                # _model.encoder=self.word_embedding

            self.models.append(_model)



        self.models = nn.ModuleList(self.models)
        # self.word_models = nn.ModuleList(self.word_models)
        self.model_num = len(self.models)
        self.weights = nn.Parameter(t.ones(opt.num_classes,self.model_num))
        assert self.opt.loss=='bceloss'
        # self.weight =[nn.Parameter(t.ones(self.model_num)/self.model_num) for _ in range(self.model_num)]
        # self.label_weight = nn.Parameter(t.eye(opt.num_classes))
MultiModel.py 文件源码 项目:PyTorchText 作者: chenyuntc 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, opt ):
        super(MultiModel, self).__init__()
        self.model_name = 'MultiModel'
        self.opt=opt
        self.models = []
        for _name,_path in zip(opt.model_names, opt.model_paths):
            _model = getattr(models,_name)(Config().parse(opt.state_dict(),print_=False))
            if _path is not None:
                _model.load(_path)
            self.models.append(_model)
        self.models = nn.ModuleList(self.models)
        self.model_num = len(self.models)
        self.weights = nn.Parameter(t.ones(opt.num_classes,self.model_num))
        # self.weight =[nn.Parameter(t.ones(self.model_num)/self.model_num) for _ in range(self.model_num)]
        # self.label_weight = nn.Parameter(t.eye(opt.num_classes))
MultiModelAll.py 文件源码 项目:PyTorchText 作者: chenyuntc 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, opt ):
        super(MultiModelAll, self).__init__()
        self.model_name = 'MultiModelAll'
        self.opt=opt
        # self.char_models = []
        self.models = []
        self.word_embedding=nn.Embedding(411720,256)
        self.char_embedding=nn.Embedding(11973,256)
        if opt.embedding_path:
            self.word_embedding.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path.replace('char','word'))['vector']))
            self.char_embedding.weight.data.copy_(t.from_numpy(np.load(opt.embedding_path.replace('word','char'))['vector']))

        for _name,_path in zip(opt.model_names, opt.model_paths):
            tmp_config = Config().parse(opt.state_dict(),print_=False)
            tmp_config.embedding_path=None
            _model = getattr(models,_name)(tmp_config)
            # ?????????
            if _path is not None:
                _model.load(_path)
            # ??????????embedding??
            _model.encoder=(self.char_embedding if _model.opt.type_=='char' else self.word_embedding)
            self.models.append(_model)
        self.models = nn.ModuleList(self.models)

        self.model_num = len(self.models)
        self.weights = nn.Parameter(t.ones(opt.num_classes,self.model_num))
        assert self.opt.loss=='bceloss'
        # self.weight =[nn.Parameter(t.ones(self.model_num)/self.model_num) for _ in range(self.model_num)]
        # self.label_weight = nn.Parameter(t.eye(opt.num_classes))


问题


面经


文章

微信
公众号

扫码关注公众号