python类mul()的实例源码

model.py 文件源码 项目:treelstm.pytorch 作者: dasguptar 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def node_forward(self, inputs, child_c, child_h):
        child_h_sum = torch.sum(child_h, dim=0, keepdim=True)

        iou = self.ioux(inputs) + self.iouh(child_h_sum)
        i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
        i, o, u = F.sigmoid(i), F.sigmoid(o), F.tanh(u)

        f = F.sigmoid(
                self.fh(child_h) +
                self.fx(inputs).repeat(len(child_h), 1)
            )
        fc = torch.mul(f, child_c)

        c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
        h = torch.mul(o, F.tanh(c))
        return c, h
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            self.assertEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2^31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
cls_sparse_skip_filt.py 文件源码 项目:mss_pytorch 作者: Js-Mim 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward(self, H_j_dec, input_x):
        if torch.has_cudnn:
            # Input is of the shape : (B, T, N)
            input_x = Variable(torch.from_numpy(input_x[:, self._L:-self._L, :]).cuda(), requires_grad=True)

        else:
            # Input is of the shape : (B, T, N)
            # Cropping some "un-necessary" frequency sub-bands
            input_x = Variable(torch.from_numpy(input_x[:, self._L:-self._L, :]), requires_grad=True)

        # Decode/Sparsify mask
        mask_t1 = self.relu(self.ffDec(H_j_dec))
        # Apply skip-filtering connections
        Y_j = torch.mul(mask_t1, input_x)

        return Y_j, mask_t1
module_utils.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def cosine_cont(repr_context, relevancy, norm=False):
    """
    cosine siminlarity betwen context and relevancy
    Args:
        repr_context - [batch_size, other_len, context_lstm_dim]
        relevancy - [batch_size, this_len, other_len]
    Return:
        size - [batch_size, this_len, context_lstm_dim]
    """
    dim = repr_context.dim()

    temp_relevancy = relevancy.unsqueeze(dim) # [batch_size, this_len, other_len, 1]
    buff = repr_context.unsqueeze(1) # [batch_size, 1, other_len, context_lstm_dim]
    buff = torch.mul(buff, temp_relevancy) # [batch_size, this_len, other_len, context_lstm_dim]
    buff = buff.sum(2) # [batch_size, this_len, context_lstm_dim]
    if norm:
        relevancy = relevancy.sum(dim-1).clamp(min=1e-6) # [batch_size, this_len]
        relevancy = relevancy.unsqueeze(2) # [batch_size, this_len, 1]
        buff = buff.div(relevancy)
    return buff
faster_rcnn.py 文件源码 项目:faster_rcnn_pytorch 作者: longcw 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_loss(self, rpn_cls_score_reshape, rpn_bbox_pred, rpn_data):
        # classification loss
        rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(-1, 2)
        rpn_label = rpn_data[0].view(-1)

        rpn_keep = Variable(rpn_label.data.ne(-1).nonzero().squeeze()).cuda()
        rpn_cls_score = torch.index_select(rpn_cls_score, 0, rpn_keep)
        rpn_label = torch.index_select(rpn_label, 0, rpn_keep)

        fg_cnt = torch.sum(rpn_label.data.ne(0))

        rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)

        # box loss
        rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]
        rpn_bbox_targets = torch.mul(rpn_bbox_targets, rpn_bbox_inside_weights)
        rpn_bbox_pred = torch.mul(rpn_bbox_pred, rpn_bbox_inside_weights)

        rpn_loss_box = F.smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, size_average=False) / (fg_cnt + 1e-4)

        return rpn_cross_entropy, rpn_loss_box
model.py 文件源码 项目:pytorch-nlp 作者: endymecy 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward(self, pos_u, pos_v, neg_u, neg_v):
        losses = []
        emb_u = []
        for i in range(len(pos_u)):
            emb_ui = self.u_embeddings(Variable(torch.LongTensor(pos_u[i])))
            emb_u.append(np.sum(emb_ui.data.numpy(), axis=0).tolist())
        emb_u = Variable(torch.FloatTensor(emb_u))
        emb_v = self.v_embeddings(Variable(torch.LongTensor(pos_v)))
        score = torch.mul(emb_u, emb_v)
        score = torch.sum(score, dim=1)
        score = F.logsigmoid(score)
        losses.append(sum(score))

        neg_emb_u = []
        for i in range(len(neg_u)):
            neg_emb_ui = self.u_embeddings(Variable(torch.LongTensor(neg_u[i])))
            neg_emb_u.append(np.sum(neg_emb_ui.data.numpy(), axis=0).tolist())
        neg_emb_u = Variable(torch.FloatTensor(neg_emb_u))
        neg_emb_v = self.v_embeddings(Variable(torch.LongTensor(neg_v)))
        neg_score = torch.mul(neg_emb_u, neg_emb_v)
        neg_score = torch.sum(neg_score, dim=1)
        neg_score = F.logsigmoid(-1 * neg_score)
        losses.append(sum(neg_score))

        return -1 * sum(losses)
modules.py 文件源码 项目:end-to-end-negotiator 作者: facebookresearch 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def forward(self, ctx):
        idx = np.arange(ctx.size(0) // 2)
        # extract counts and values
        cnt_idx = Variable(self.to_device(torch.from_numpy(2 * idx + 0)))
        val_idx = Variable(self.to_device(torch.from_numpy(2 * idx + 1)))

        cnt = ctx.index_select(0, cnt_idx)
        val = ctx.index_select(0, val_idx)

        # embed counts and values
        cnt_emb = self.cnt_enc(cnt)
        val_emb = self.val_enc(val)

        # element wise multiplication to get a hidden state
        h = torch.mul(cnt_emb, val_emb)
        # run the hidden state through the MLP
        h = h.transpose(0, 1).contiguous().view(ctx.size(1), -1)
        ctx_h = self.encoder(h).unsqueeze(0)
        return ctx_h
layers.py 文件源码 项目:BiDAF-PyTorch 作者: kelayamatoz 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, h, u, h_mask=None, u_mask=None):
        config = self.config
        if config.q2c_att or config.c2q_att:
            u_a, h_a = self.bi_attention(h, u, h_mask=h_mask, u_mask=u_mask)
            '''
            u_a: [N, M, JX, d]
            h_a: [N, M, d]
            '''
        else:
            print("AttentionLayer: q2c_att or c2q_att False not supported")

        if config.q2c_att:
            p0 = torch.cat([h, u_a, torch.mul(h, u_a), torch.mul(h, h_a)], 3)
        else:
            print("AttentionLayer: q2c_att False not supported")

        return p0
fusion.py 文件源码 项目:vqa.pytorch 作者: Cadene 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def forward(self, input_v, input_q):
        # visual (cnn features)
        if 'dim_v' in self.opt:
            x_v = F.dropout(input_v, p=self.opt['dropout_v'], training=self.training)
            x_v = self.linear_v(x_v)
            if 'activation_v' in self.opt:
                x_v = getattr(F, self.opt['activation_v'])(x_v)
        else:
            x_v = input_v
        # question (rnn features)
        if 'dim_q' in self.opt:
            x_q = F.dropout(input_q, p=self.opt['dropout_q'], training=self.training)
            x_q = self.linear_q(x_q)
            if 'activation_q' in self.opt:
                x_q = getattr(F, self.opt['activation_q'])(x_q)
        else:
            x_q = input_q
        # hadamard product
        x_mm = torch.mul(x_q, x_v)
        return x_mm
test_torch.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            # abs is used in assertEqual so we use the slow version instead
            self.assertTensorsSlowEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2 ^ 31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
lstm.py 文件源码 项目:benchmark 作者: pytorch 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, x, hidden):
        h, c = hidden
        h = h.view(h.size(1), -1)
        c = c.view(c.size(1), -1)
        x = x.view(x.size(1), -1)
        # Linear mappings
        i_t = th.mm(x, self.w_xi) + th.mm(h, self.w_hi) + self.b_i
        f_t = th.mm(x, self.w_xf) + th.mm(h, self.w_hf) + self.b_f
        o_t = th.mm(x, self.w_xo) + th.mm(h, self.w_ho) + self.b_o
        # activations
        i_t.sigmoid_()
        f_t.sigmoid_()
        o_t.sigmoid_()
        # cell computations
        c_t = th.mm(x, self.w_xc) + th.mm(h, self.w_hc) + self.b_c
        c_t.tanh_()
        c_t = th.mul(c, f_t) + th.mul(i_t, c_t)
        h_t = th.mul(o_t, th.tanh(c_t))
        # Reshape for compatibility
        h_t = h_t.view(1, h_t.size(0), -1)
        c_t = c_t.view(1, c_t.size(0), -1)
        if self.dropout > 0.0:
            F.dropout(h_t, p=self.dropout, training=self.training, inplace=True)
        return h_t, (h_t, c_t)
model.py 文件源码 项目:SuperResolution 作者: bguisard 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward(self, x):
        upblock = True
        # Downsizing layer - Large Kernel ensures large receptive field on the residual blocks
        h = F.relu(self.b2(self.c1(x)))

        # Residual Layers
        for r in self.rs:
            h = r(h)  # will go through all residual blocks in this loop

        if upblock:
            # Upsampling Layers - improvement suggested by [2] to remove "checkerboard pattern"
            for u in self.up:
                h = u(h)  # will go through all upsampling blocks in this loop
        else:
            # As recommended by [1]
            h = F.relu(self.bc2(self.dc2(h)))
            h = F.relu(self.bc3(self.dc3(h)))

        # Last layer and scaled tanh activation - Scaled from 0 to 1 instead of 0 - 255
        h = F.tanh(self.c3(h))
        h = torch.add(h, 1.)
        h = torch.mul(h, 0.5)
        return h
gridgen.py 文件源码 项目:lr-gan.pytorch 作者: jwyang 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, input1):
        self.batchgrid3d = torch.zeros(torch.Size([input1.size(0)]) + self.grid3d.size())

        for i in range(input1.size(0)):
            self.batchgrid3d[i] = self.grid3d

        self.batchgrid3d = Variable(self.batchgrid3d)
        #print(self.batchgrid3d)

        x = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,0:4]), 3)
        y = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,4:8]), 3)
        z = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,8:]), 3)
        #print(x)
        r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5

        #print(r)
        theta = torch.acos(z/r)/(np.pi/2)  - 1
        #phi = torch.atan(y/x)
        phi = torch.atan(y/(x + 1e-5))  + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor))
        phi = phi/np.pi


        output = torch.cat([theta,phi], 3)

        return output
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_abs(self):
        size = 1000
        max_val = 1000
        original = torch.rand(size).mul(max_val)
        # Tensor filled with values from {-1, 1}
        switch = torch.rand(size).mul(2).floor().mul(2).add(-1)

        types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor']
        for t in types:
            data = original.type(t)
            switch = switch.type(t)
            res = torch.mul(data, switch)
            # abs is used in assertEqual so we use the slow version instead
            self.assertTensorsSlowEqual(res.abs(), data, 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2 ^ 31 + 1
        res = torch.LongTensor((-bignumber,))
        self.assertGreater(res.abs()[0], 0)
VAE_ccLinIAF.py 文件源码 项目:vae_vpflows 作者: jmtomczak 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def forward(self, L, z):
        '''
        :param L: batch_size (B) x latent_size^2 (L^2)
        :param z: batch_size (B) x latent_size (L)
        :return: z_new = L*z
        '''
        # L->tril(L)
        L_matrix = L.view( -1, self.args.z1_size, self.args.z1_size ) # resize to get B x L x L
        LTmask = torch.tril( torch.ones(self.args.z1_size, self.args.z1_size), k=-1 ) # lower-triangular mask matrix (1s in lower triangular part)
        I = Variable( torch.eye(self.args.z1_size, self.args.z1_size).expand(L_matrix.size(0), self.args.z1_size, self.args.z1_size) )
        if self.args.cuda:
            LTmask = LTmask.cuda()
            I = I.cuda()
        LTmask = Variable(LTmask)
        LTmask = LTmask.unsqueeze(0).expand( L_matrix.size(0), self.args.z1_size, self.args.z1_size ) # 1 x L x L -> B x L x L
        LT = torch.mul( L_matrix, LTmask ) + I # here we get a batch of lower-triangular matrices with ones on diagonal

        # z_new = L * z
        z_new = torch.bmm( LT , z.unsqueeze(2) ).squeeze(2) # B x L x L * B x L x 1 -> B x L

        return z_new
faster_rcnn.py 文件源码 项目:pytorch_RFCN 作者: PureDiors 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build_loss(self, rpn_cls_score_reshape, rpn_bbox_pred, rpn_data):
        # classification loss
        rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(-1, 2)
        rpn_label = rpn_data[0].view(-1)

        rpn_keep = Variable(rpn_label.data.ne(-1).nonzero().squeeze()).cuda()
        rpn_cls_score = torch.index_select(rpn_cls_score, 0, rpn_keep)
        rpn_label = torch.index_select(rpn_label, 0, rpn_keep)

        fg_cnt = torch.sum(rpn_label.data.ne(0))

        rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)

        # box loss
        rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]
        rpn_bbox_targets = torch.mul(rpn_bbox_targets, rpn_bbox_inside_weights)
        rpn_bbox_pred = torch.mul(rpn_bbox_pred, rpn_bbox_inside_weights)

        rpn_loss_box = F.smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, size_average=False) / (fg_cnt + 1e-4)

        return rpn_cross_entropy, rpn_loss_box
rfcn.py 文件源码 项目:pytorch_RFCN 作者: PureDiors 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_loss(self, rpn_cls_score_reshape, rpn_bbox_pred, rpn_data):
        # classification loss
        rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(-1, 2)
        rpn_label = rpn_data[0].view(-1)

        rpn_keep = Variable(rpn_label.data.ne(-1).nonzero().squeeze()).cuda()
        rpn_cls_score = torch.index_select(rpn_cls_score, 0, rpn_keep)
        rpn_label = torch.index_select(rpn_label, 0, rpn_keep)

        fg_cnt = torch.sum(rpn_label.data.ne(0))

        rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)

        # box loss
        rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]
        rpn_bbox_targets = torch.mul(rpn_bbox_targets, rpn_bbox_inside_weights)
        rpn_bbox_pred = torch.mul(rpn_bbox_pred, rpn_bbox_inside_weights)

        rpn_loss_box = F.smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, size_average=False) / (fg_cnt + 1e-4)

        return rpn_cross_entropy, rpn_loss_box
gridgen.py 文件源码 项目:faster-rcnn.pytorch 作者: jwyang 项目源码 文件源码 阅读 77 收藏 0 点赞 0 评论 0
def forward(self, input1):
        self.batchgrid3d = torch.zeros(torch.Size([input1.size(0)]) + self.grid3d.size())

        for i in range(input1.size(0)):
            self.batchgrid3d[i] = self.grid3d

        self.batchgrid3d = Variable(self.batchgrid3d)
        #print(self.batchgrid3d)

        x = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,0:4]), 3)
        y = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,4:8]), 3)
        z = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,8:]), 3)
        #print(x)
        r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5

        #print(r)
        theta = torch.acos(z/r)/(np.pi/2)  - 1
        #phi = torch.atan(y/x)
        phi = torch.atan(y/(x + 1e-5))  + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor))
        phi = phi/np.pi


        output = torch.cat([theta,phi], 3)

        return output
action_conditional_video_prediction.py 文件源码 项目:DeepRL 作者: ShangtongZhang 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward(self, obs, action):
        x = F.relu(self.conv1(obs))
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))
        x = F.relu(self.conv4(x))
        x = x.view((-1, self.hidden_units))
        x = F.relu(self.fc5(x))
        x = self.fc_encode(x)
        action = self.fc_action(action)
        x = torch.mul(x, action)
        x = self.fc_decode(x)
        x = F.relu(self.fc8(x))
        x = x.view((-1, 128, 11, 8))
        x = F.relu(self.deconv9(x))
        x = F.relu(self.deconv10(x))
        x = F.relu(self.deconv11(x))
        x = self.deconv12(x)
        return x
layers.py 文件源码 项目:repeval_rivercorners 作者: jabalazs 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def forward(self, input_1, input_2):
        """

        :param : input_1
            Size is (*, hidden_size)

        :param input_2:
            Size is (*, hidden_size)

        :return:

            Merged vectors, size is (*, 4*hidden size)
        """
        assert input_1.size(-1) == input_2.size(-1)
        mult_combined_vec = torch.mul(input_1, input_2)
        diff_combined_vec = torch.abs(input_1 - input_2)
        combined_vec = torch.cat((input_1,
                                  input_2,
                                  mult_combined_vec,
                                  diff_combined_vec), input_1.dim()-1)

        return combined_vec
faster_rcnn.py 文件源码 项目:intel-cervical-cancer 作者: wangg12 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_loss(self, rpn_cls_score_reshape, rpn_bbox_pred, rpn_data):
        # classification loss
        rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(-1, 2)
        rpn_label = rpn_data[0].view(-1)

        rpn_keep = Variable(rpn_label.data.ne(-1).nonzero().squeeze()).cuda()
        rpn_cls_score = torch.index_select(rpn_cls_score, 0, rpn_keep)
        rpn_label = torch.index_select(rpn_label, 0, rpn_keep)

        fg_cnt = torch.sum(rpn_label.data.ne(0))

        rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)

        # box loss
        rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]
        rpn_bbox_targets = torch.mul(rpn_bbox_targets, rpn_bbox_inside_weights)
        rpn_bbox_pred = torch.mul(rpn_bbox_pred, rpn_bbox_inside_weights)

        rpn_loss_box = F.smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, size_average=False) / (fg_cnt + 1e-4)

        return rpn_cross_entropy, rpn_loss_box
gridgen.py 文件源码 项目:intel-cervical-cancer 作者: wangg12 项目源码 文件源码 阅读 56 收藏 0 点赞 0 评论 0
def forward(self, input):
        if not self.aux_loss:
            return self.f(input)
        else:
            identity = torch.from_numpy(np.array([[1,0,0], [0,1,0]], dtype=np.float32))
            batch_identity = torch.zeros([input.size(0), 2,3])
            for i in range(input.size(0)):
                batch_identity[i] = identity

            if input.is_cuda:
                batch_identity = Variable(batch_identity.cuda())
            else:
                batch_identity = Variable(batch_identity)

            loss = torch.mul(input - batch_identity, input - batch_identity)
            loss = torch.sum(loss,1)
            loss = torch.sum(loss,2)

            return self.f(input), loss.view(-1,1)
__init__.py 文件源码 项目:confusion 作者: abhimanyudubey 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def EntropicConfusion(features):
    batch_size = features.size(0)
    return torch.mul(features, torch.log(features)).sum() * (1.0 / batch_size)
decoder.py 文件源码 项目:ladder 作者: abhiskk 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def g(self, tilde_z_l, u_l):
        if self.use_cuda:
            ones = Parameter(torch.ones(tilde_z_l.size()[0], 1).cuda())
        else:
            ones = Parameter(torch.ones(tilde_z_l.size()[0], 1))

        b_a1 = ones.mm(self.a1)
        b_a2 = ones.mm(self.a2)
        b_a3 = ones.mm(self.a3)
        b_a4 = ones.mm(self.a4)
        b_a5 = ones.mm(self.a5)

        b_a6 = ones.mm(self.a6)
        b_a7 = ones.mm(self.a7)
        b_a8 = ones.mm(self.a8)
        b_a9 = ones.mm(self.a9)
        b_a10 = ones.mm(self.a10)

        mu_l = torch.mul(b_a1, torch.sigmoid(torch.mul(b_a2, u_l) + b_a3)) + \
               torch.mul(b_a4, u_l) + \
               b_a5

        v_l = torch.mul(b_a6, torch.sigmoid(torch.mul(b_a7, u_l) + b_a8)) + \
              torch.mul(b_a9, u_l) + \
              b_a10

        hat_z_l = torch.mul(tilde_z_l - mu_l, v_l) + mu_l

        return hat_z_l
encoder.py 文件源码 项目:ladder 作者: abhiskk 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def bn_gamma_beta(self, x):
        if self.use_cuda:
            ones = Parameter(torch.ones(x.size()[0], 1).cuda())
        else:
            ones = Parameter(torch.ones(x.size()[0], 1))
        t = x + ones.mm(self.bn_beta)
        if self.train_bn_scaling:
            t = torch.mul(t, ones.mm(self.bn_gamma))
        return t
model.py 文件源码 项目:treelstm.pytorch 作者: dasguptar 项目源码 文件源码 阅读 74 收藏 0 点赞 0 评论 0
def forward(self, lvec, rvec):
        mult_dist = torch.mul(lvec, rvec)
        abs_dist = torch.abs(torch.add(lvec, -rvec))
        vec_dist = torch.cat((mult_dist, abs_dist), 1)

        out = F.sigmoid(self.wh(vec_dist))
        out = F.log_softmax(self.wp(out))
        return out


# putting the whole model together
loss.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def backward(self, grad_output):
        v1, v2, y = self.saved_tensors

        buffer = v1.new()
        _idx = self._new_idx(v1)

        gw1 = grad_output.new()
        gw2 = grad_output.new()
        gw1.resize_as_(v1).copy_(v2)
        gw2.resize_as_(v1).copy_(v1)

        torch.mul(buffer, self.w1, self.w22)
        gw1.addcmul_(-1, buffer.expand_as(v1), v1)
        gw1.mul_(self.w.expand_as(v1))

        torch.mul(buffer, self.w1, self.w32)
        gw2.addcmul_(-1, buffer.expand_as(v1), v2)
        gw2.mul_(self.w.expand_as(v1))

        torch.le(_idx, self._outputs, 0)
        _idx = _idx.view(-1, 1).expand(gw1.size())
        gw1[_idx] = 0
        gw2[_idx] = 0

        torch.eq(_idx, y, 1)
        _idx = _idx.view(-1, 1).expand(gw2.size())
        gw1[_idx] = gw1[_idx].mul_(-1)
        gw2[_idx] = gw2[_idx].mul_(-1)

        if self.size_average:
            gw1.div_(y.size(0))
            gw2.div_(y.size(0))

        if grad_output[0] != 1:
            gw1.mul_(grad_output)
            gw2.mul_(grad_output)

        return gw1, gw2, None
loss.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def backward(self, grad_output):
        input, target = self.saved_tensors
        grad_input = input.new().resize_as_(input).copy_(target)
        grad_input[torch.mul(torch.eq(target, -1), torch.gt(input, self.margin))] = 0

        if self.size_average:
            grad_input.mul_(1. / input.nelement())

        if grad_output[0] != 1:
            grad_input.mul_(grad_output[0])

        return grad_input, None
BCECriterion.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def updateOutput(self, input, target):
         # - log(input) * target - log(1 - input) * (1 - target)
        if input.nelement() != target.nelement():
            raise RuntimeError("input and target size mismatch")

        self.buffer = self.buffer or input.new()

        buffer = self.buffer
        weights = self.weights

        buffer.resize_as_(input)

        if weights is not None and target.dim() != 1:
            weights = self.weights.view(1, target.size(1)).expand_as(target)

        # log(input) * target
        torch.add(buffer, input, self.eps).log_()
        if weights is not None:
            buffer.mul_(weights)

        output = torch.dot(target, buffer)

        # log(1 - input) * (1 - target)
        torch.mul(buffer, input, -1).add_(1+self.eps).log_()
        if weights is not None:
            buffer.mul_(weights)

        output = output + torch.sum(buffer)
        output = output - torch.dot(target, buffer)

        if self.sizeAverage:
            output = output / input.nelement()

        self.output = - output

        return self.output
DotProduct.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def updateOutput(self, input):
        input1, input2 = input[0], input[1]

        if not self.buffer:
           self.buffer = input1.new()

        torch.mul(self.buffer, input1, input2)
        torch.sum(self.output, self.buffer, 1)
        self.output.resize_(input1.size(0))
        return self.output


问题


面经


文章

微信
公众号

扫码关注公众号