python类transpose()的实例源码

model_HighWay_BiLSTM_1.py 文件源码 项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch 作者: bamtercelboo 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = self.embed(x)
        x = self.dropout(x)
        # x = x.view(len(x), x.size(1), -1)
        # x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)

        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        # bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2))
        bilstm_out = bilstm_out.squeeze(2)

        hidden2lable = self.hidden2label1(F.tanh(bilstm_out))

        gate_layer = F.sigmoid(self.gate_layer(bilstm_out))
        # calculate highway layer values
        gate_hidden_layer = torch.mul(hidden2lable, gate_layer)
        # if write like follow ,can run,but not equal the HighWay NetWorks formula
        # gate_input = torch.mul((1 - gate_layer), hidden2lable)
        gate_input = torch.mul((1 - gate_layer), bilstm_out)
        highway_output = torch.add(gate_hidden_layer, gate_input)

        logit = self.logit_layer(highway_output)

        return logit
model.py 文件源码 项目:ss-tad 作者: shyamal-b 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def forward(self, inputs):
        batch_sz = inputs.size(0) # should be batch_sz (~200 in old set-up)
        inputs = torch.transpose(inputs,0,1)
        h0 = self.init_hidden_state(batch_sz)
        rnn_output, h_n = self.rnn.forward(inputs, h0)
        # get proposals output (L x N x h_width) ==> (N x L x K)
        output = self.lin_out.forward(rnn_output.view(rnn_output.size(0)*rnn_output.size(1), rnn_output.size(2)))
        lin_out = output.view(rnn_output.size(0), rnn_output.size(1), output.size(1))
        final_out = self.nonlin_final(torch.transpose(lin_out,0,1))
        return final_out, rnn_output
model.py 文件源码 项目:ss-tad 作者: shyamal-b 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def forward(self, inputs):
        batch_sz = inputs.size(0) # should be batch_sz (~200 in old set-up)
        inputs = torch.transpose(inputs,0,1)
        h0 = self.init_hidden_state(batch_sz)
        rnn_output, h_n = self.rnn.forward(inputs, h0)
        # get "output" after linear layer. 
        output = self.lin_out.forward(rnn_output.view(rnn_output.size(0)*rnn_output.size(1), rnn_output.size(2)))
        L, N = rnn_output.size(0), rnn_output.size(1)
        C = output.size(1)
        assert L*N == output.size(0), "ERROR: mismatch in output tensor dimensions"
        fin_out = output.view(L, N, C) 
        fin_out = torch.transpose(fin_out,0,1) 
        fin_out = fin_out.contiguous().view(N*L, C)
        return fin_out, rnn_output
model.py 文件源码 项目:ss-tad 作者: shyamal-b 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def convert_to_batch_order(self, output, N, L, K, C):
        output = output.view(L, N, K, C)
        output = torch.transpose(output, 0,1)
        return output.contiguous().view(N*L*K, C)
deform_conv.py 文件源码 项目:pytorch-deform-conv 作者: oeway 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def th_repeat(a, repeats, axis=0):
    """Torch version of np.repeat for 1D"""
    assert len(a.size()) == 1
    return th_flatten(torch.transpose(a.repeat(repeats, 1), 0, 1))
gridgen.py 文件源码 项目:lr-gan.pytorch 作者: jwyang 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def forward(self, input1):
        self.batchgrid = torch.zeros(torch.Size([input1.size(0)]) + self.grid.size())

        for i in range(input1.size(0)):
            self.batchgrid[i] = self.grid
        self.batchgrid = Variable(self.batchgrid)

        if input1.is_cuda:
            self.batchgrid = self.batchgrid.cuda()

        output = torch.bmm(self.batchgrid.view(-1, self.height*self.width, 3), torch.transpose(input1, 1, 2)).view(-1, self.height, self.width, 2)

        return output
CRFv2.py 文件源码 项目:BiLSTM-CCM 作者: codedecde 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _forward_alg(self, feats):      
        init_alphas = torch.Tensor(self.tagset_size, 1).fill_(0.).type(self.dtype)      
        forward_var = autograd.Variable(init_alphas).type(self.dtype)       
        for ix,feat in enumerate(feats):
            if ix == 0:
                forward_var += feat.view(self.tagset_size,1) + self.initial_weights
            else:           
                forward_var = feat.view(self.tagset_size,1) + log_sum_exp_mat( self.transitions + torch.transpose(forward_var.repeat(1, self.tagset_size), 0, 1), 1)
        terminal_var = forward_var + self.final_weights
        alpha = log_sum_exp_mat(terminal_var, 0 )
        return alpha
CRF.py 文件源码 项目:BiLSTM-CCM 作者: codedecde 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _forward_alg(self, feats):
        init_alphas = torch.Tensor(self.tagset_size, 1).fill_(-10000.).type(self.dtype)
        init_alphas[self.tag_to_ix[self.START_TAG]][0] = 0.

        forward_var = autograd.Variable(init_alphas).type(self.dtype)
        for feat in feats:
            forward_var = feat.view(self.tagset_size, 1) + log_sum_exp_mat(self.transitions + torch.transpose(forward_var.expand(forward_var.size(0), self.tagset_size), 0, 1), 1)
        terminal_var = forward_var + self.transitions[self.tag_to_ix[self.STOP_TAG]].view(self.tagset_size, 1)
        alpha = log_sum_exp_mat(terminal_var, 0)
        return alpha
model.py 文件源码 项目:Atari-Game-with-DQN 作者: tonyabracadabra 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, x):
        embed = self.embed(x)
        x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)

        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)

        bilstm_out = F.tanh(bilstm_out)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        y = self.hidden2label1(bilstm_out)
        y = self.hidden2label2(y)
        logit = y
        return logit
Model.py 文件源码 项目:MemNN 作者: berlino 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def forward(self, qu, w, cand):
        qu = Variable(qu)
        w = Variable(w)
        cand = Variable(cand)
        embed_q = self.embed_B(qu)
        embed_w1 = self.embed_A(w)
        embed_c = self.embed_C(cand)

        #pdb.set_trace()
        q_state = torch.sum(embed_q, 1).squeeze(1)
        w1_state = torch.sum(embed_w1, 1).squeeze(1)

        sent_dot = torch.mm(q_state, torch.transpose(w1_state, 0, 1))
        sent_att = F.softmax(sent_dot)

        q_rnn_state = self.rnn_qus(embed_q, self.h0_q)[-1].squeeze(0)
        #pdb.set_trace()

        action = sent_att.multinomial()

        sent = embed_w1[action.data[0]]
        sent_state = self.rnn_doc(sent, self.h0_doc)[-1].squeeze(0)
        q_state = torch.add(q_state, sent_state)

        f_feat = torch.mm(q_state, torch.transpose(embed_c, 0, 1))
        reward_prob = F.log_softmax(f_feat).squeeze(0)

        return action, reward_prob
Model.py 文件源码 项目:MemNN 作者: berlino 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def forward(self, qu, w, cand):
        qu = Variable(qu)
        cand = Variable(cand)
        embed_q = self.embed(qu)
        embed_cand = self.embed(cand)

        out, (self.h0, self.c0) = self.rnn(embed_q, (self.h0, self.c0))
        self.h0.detach_()
        self.c0.detach_()
        q_state = out[:,-1,:]

        f_fea_v = torch.mm(q_state, torch.transpose(embed_cand,0,1))

        score_n = F.log_softmax(f_fea_v)
        return score_n
Model.py 文件源码 项目:MemNN 作者: berlino 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, qu, key, value, cand):
        qu = Variable(qu)
        key = Variable(key)
        value = Variable(value)
        cand = Variable(cand)
        embed_q = self.embed_B(qu)
        embed_w1 = self.embed_A(key)
        embed_w2 = self.embed_C(value)
        embed_c = self.embed_C(cand)

        #pdb.set_trace()
        q_state = torch.sum(embed_q, 1).squeeze(1)
        w1_state = torch.sum(embed_w1, 1).squeeze(1)
        w2_state = embed_w2

        for _ in range(self.config.hop):
            sent_dot = torch.mm(q_state, torch.transpose(w1_state, 0, 1))
            sent_att = F.softmax(sent_dot)

            a_dot = torch.mm(sent_att, w2_state)
            a_dot = self.H(a_dot)
            q_state = torch.add(a_dot, q_state)

        f_feat = torch.mm(q_state, torch.transpose(embed_c, 0, 1))
        score = F.log_softmax(f_feat)
        return score
model.py 文件源码 项目:teras 作者: chantera 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def forward(self, input1, input2):
        is_cuda = next(self.parameters()).is_cuda
        device_id = next(self.parameters()).get_device() if is_cuda else None
        out_size = self.out_features
        batch_size, len1, dim1 = input1.size()
        if self._use_bias[0]:
            ones = torch.ones(batch_size, len1, 1)
            if is_cuda:
                ones = ones.cuda(device_id)
            input1 = torch.cat((input1, Variable(ones)), dim=2)
            dim1 += 1
        len2, dim2 = input2.size()[1:]
        if self._use_bias[1]:
            ones = torch.ones(batch_size, len2, 1)
            if is_cuda:
                ones = ones.cuda(device_id)
            input2 = torch.cat((input2, Variable(ones)), dim=2)
            dim2 += 1
        input1_reshaped = input1.contiguous().view(batch_size * len1, dim1)
        W_reshaped = torch.transpose(self.weight, 1, 2) \
            .contiguous().view(dim1, out_size * dim2)
        affine = torch.mm(input1_reshaped, W_reshaped) \
            .view(batch_size, len1 * out_size, dim2)
        biaffine = torch.transpose(
            torch.bmm(affine, torch.transpose(input2, 1, 2))
            .view(batch_size, len1, out_size, len2), 2, 3)
        if self._use_bias[2]:
            biaffine += self.bias.expand_as(biaffine)
        return biaffine
tensor_transforms.py 文件源码 项目:torchsample 作者: ncullen93 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __call__(self, *inputs):
        outputs = []
        for idx, _input in enumerate(inputs):
            _input = th.transpose(_input, self.dim1, self.dim2)
            outputs.append(_input)
        return outputs if idx > 1 else outputs[0]
gridgen.py 文件源码 项目:faster-rcnn.pytorch 作者: jwyang 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, input1):
        self.batchgrid = torch.zeros(torch.Size([input1.size(0)]) + self.grid.size())

        for i in range(input1.size(0)):
            self.batchgrid[i] = self.grid
        self.batchgrid = Variable(self.batchgrid)

        if input1.is_cuda:
            self.batchgrid = self.batchgrid.cuda()

        output = torch.bmm(self.batchgrid.view(-1, self.height*self.width, 3), torch.transpose(input1, 1, 2)).view(-1, self.height, self.width, 2)

        return output
gridgen.py 文件源码 项目:faster-rcnn.pytorch 作者: jwyang 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, input1):
        self.input1 = input1
        output = input1.new(torch.Size([input1.size(0)]) + self.grid.size()).zero_()
        self.batchgrid = input1.new(torch.Size([input1.size(0)]) + self.grid.size()).zero_()
        for i in range(input1.size(0)):
            self.batchgrid[i] = self.grid.astype(self.batchgrid[i])

        # if input1.is_cuda:
        #    self.batchgrid = self.batchgrid.cuda()
        #    output = output.cuda()

        for i in range(input1.size(0)):
            output = torch.bmm(self.batchgrid.view(-1, self.height*self.width, 3), torch.transpose(input1, 1, 2)).view(-1, self.height, self.width, 2)

        return output
gridgen.py 文件源码 项目:faster-rcnn.pytorch 作者: jwyang 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def backward(self, grad_output):

        grad_input1 = self.input1.new(self.input1.size()).zero_()

        # if grad_output.is_cuda:
        #    self.batchgrid = self.batchgrid.cuda()
        #    grad_input1 = grad_input1.cuda()

        grad_input1 = torch.baddbmm(grad_input1, torch.transpose(grad_output.view(-1, self.height*self.width, 2), 1,2), self.batchgrid.view(-1, self.height*self.width, 3))
        return grad_input1
UpdateFunction.py 文件源码 项目:nmp_qc 作者: priba 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def u_duvenaud(self, h_v, m_v, opt):

        param_sz = self.learn_args[0][opt['deg']].size()
        parameter_mat = torch.t(self.learn_args[0][opt['deg']])[None, ...].expand(m_v.size(0), param_sz[1], param_sz[0])

        aux = torch.bmm(parameter_mat, torch.transpose(m_v, 1, 2))

        return torch.transpose(torch.nn.Sigmoid()(aux), 1, 2)
UpdateFunction.py 文件源码 项目:nmp_qc 作者: priba 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def u_ggnn(self, h_v, m_v, opt={}):
        h_v.contiguous()
        m_v.contiguous()
        h_new = self.learn_modules[0](torch.transpose(m_v, 0, 1), torch.unsqueeze(h_v, 0))[0]  # 0 or 1???
        return torch.transpose(h_new, 0, 1)
deform_conv.py 文件源码 项目:pytorch_resnet 作者: taokong 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def th_repeat(a, repeats, axis=0):
    """Torch version of np.repeat for 1D"""
    assert len(a.size()) == 1
    return th_flatten(torch.transpose(a.repeat(repeats, 1), 0, 1))


问题


面经


文章

微信
公众号

扫码关注公众号