python类squeeze()的实例源码

pytorch_model.py 文件源码 项目:biaffineparser 作者: chantera 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def forward(self, pretrained_word_tokens, word_tokens, pos_tokens):
        lengths = np.array([len(tokens) for tokens in word_tokens])
        X = self.forward_embed(
            pretrained_word_tokens, word_tokens, pos_tokens, lengths)
        indices = np.argsort(-np.array(lengths)).astype(np.int64)
        lengths = lengths[indices]
        X = torch.stack([X[idx] for idx in indices])
        X = nn.utils.rnn.pack_padded_sequence(X, lengths, batch_first=True)
        R = self.blstm(X)[0]
        R = nn.utils.rnn.pad_packed_sequence(R, batch_first=True)[0]
        R = R.index_select(dim=0, index=_model_var(
            self, torch.from_numpy(np.argsort(indices).astype(np.int64))))
        H_arc_head = self.mlp_arc_head(R)
        H_arc_dep = self.mlp_arc_dep(R)
        arc_logits = self.arc_biaffine(H_arc_dep, H_arc_head)
        arc_logits = torch.squeeze(arc_logits, dim=3)
        H_label_dep = self.mlp_label_dep(R)
        H_label_head = self.mlp_label_head(R)
        label_logits = self.label_biaffine(H_label_dep, H_label_head)
        return arc_logits, label_logits
rfcn.py 文件源码 项目:pytorch_RFCN 作者: PureDiors 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def build_loss(self, rpn_cls_score_reshape, rpn_bbox_pred, rpn_data):
        # classification loss
        rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(-1, 2)
        rpn_label = rpn_data[0].view(-1)

        rpn_keep = Variable(rpn_label.data.ne(-1).nonzero().squeeze()).cuda()
        rpn_cls_score = torch.index_select(rpn_cls_score, 0, rpn_keep)
        rpn_label = torch.index_select(rpn_label, 0, rpn_keep)

        fg_cnt = torch.sum(rpn_label.data.ne(0))

        rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)

        # box loss
        rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]
        rpn_bbox_targets = torch.mul(rpn_bbox_targets, rpn_bbox_inside_weights)
        rpn_bbox_pred = torch.mul(rpn_bbox_pred, rpn_bbox_inside_weights)

        rpn_loss_box = F.smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, size_average=False) / (fg_cnt + 1e-4)

        return rpn_cross_entropy, rpn_loss_box
rfcn.py 文件源码 项目:pytorch_RFCN 作者: PureDiors 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def forward(self, im_data, im_info, gt_boxes=None, gt_ishard=None, dontcare_areas=None):
        features, rois = self.rpn(im_data, im_info, gt_boxes, gt_ishard, dontcare_areas)

        if self.training:
            roi_data = self.proposal_target_layer(rois, gt_boxes, gt_ishard, dontcare_areas, self.n_classes)
            rois = roi_data[0]

        # roi pool
    conv_new1 = self.new_conv(features)
    r_score_map = self.rfcn_score(conv_new1)
    r_bbox_map = self.rfcn_bbox(conv_new1)
    psroi_pooled_cls = self.psroi_pool_cls(r_score_map, rois)
    psroi_pooled_loc = self.psroi_pool_loc(r_bbox_map, rois)
    bbox_pred = self.bbox_pred(psroi_pooled_loc)
    bbox_pred = torch.squeeze(bbox_pred)
    cls_score = self.cls_score(psroi_pooled_cls)
    cls_score = torch.squeeze(cls_score)
        cls_prob = F.softmax(cls_score)

        if self.training:
            self.cross_entropy, self.loss_box = self.build_loss(cls_score, bbox_pred, roi_data)

        return cls_prob, bbox_pred, rois
model.py 文件源码 项目:treelstm-pytorch 作者: pklfz 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def node_forward(self, inputs, child_c, child_h):
        child_h_sum = F.torch.sum(torch.squeeze(child_h, 1), 0)

        i = F.sigmoid(self.ix(inputs) + self.ih(child_h_sum))
        o = F.sigmoid(self.ox(inputs) + self.oh(child_h_sum))
        u = F.tanh(self.ux(inputs) + self.uh(child_h_sum))

        # add extra singleton dimension
        fx = F.torch.unsqueeze(self.fx(inputs), 1)
        f = F.torch.cat([self.fh(child_hi) + fx for child_hi in child_h], 0)
        f = F.sigmoid(f)
        # removing extra singleton dimension
        f = F.torch.unsqueeze(f, 1)
        fc = F.torch.squeeze(F.torch.mul(f, child_c), 1)

        c = F.torch.mul(i, u) + F.torch.sum(fc, 0)
        h = F.torch.mul(o, F.tanh(c))

        return c, h
ReadoutFunction.py 文件源码 项目:nmp_qc 作者: priba 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def r_duvenaud(self, h):
        # layers
        aux = []
        for l in range(len(h)):
            param_sz = self.learn_args[l].size()
            parameter_mat = torch.t(self.learn_args[l])[None, ...].expand(h[l].size(0), param_sz[1],
                                                                                      param_sz[0])

            aux.append(torch.transpose(torch.bmm(parameter_mat, torch.transpose(h[l], 1, 2)), 1, 2))

            for j in range(0, aux[l].size(1)):
                # Mask whole 0 vectors
                aux[l][:, j, :] = nn.Softmax()(aux[l][:, j, :].clone())*(torch.sum(aux[l][:, j, :] != 0, 1) > 0).expand_as(aux[l][:, j, :]).type_as(aux[l])

        aux = torch.sum(torch.sum(torch.stack(aux, 3), 3), 1)
        return self.learn_modules[0](torch.squeeze(aux))
MessageFunction.py 文件源码 项目:nmp_qc 作者: priba 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def m_ggnn(self, h_v, h_w, e_vw, opt={}):

        m = Variable(torch.zeros(h_w.size(0), h_w.size(1), self.args['out']).type_as(h_w.data))

        for w in range(h_w.size(1)):
            if torch.nonzero(e_vw[:, w, :].data).size():
                for i, el in enumerate(self.args['e_label']):
                    ind = (el == e_vw[:,w,:]).type_as(self.learn_args[0][i])

                    parameter_mat = self.learn_args[0][i][None, ...].expand(h_w.size(0), self.learn_args[0][i].size(0),
                                                                            self.learn_args[0][i].size(1))

                    m_w = torch.transpose(torch.bmm(torch.transpose(parameter_mat, 1, 2),
                                                                        torch.transpose(torch.unsqueeze(h_w[:, w, :], 1),
                                                                                        1, 2)), 1, 2)
                    m_w = torch.squeeze(m_w)
                    m[:,w,:] = ind.expand_as(m_w)*m_w
        return m
train_batch.py 文件源码 项目:kdnet.pytorch 作者: fxia22 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def split_ps(point_set):
    #print point_set.size()
    num_points = point_set.size()[0]/2
    diff = point_set.max(dim=0)[0] - point_set.min(dim=0)[0]
    dim = torch.max(diff, dim = 1)[1][0,0]
    cut = torch.median(point_set[:,dim])[0][0]
    left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
    right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
    middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))

    if torch.numel(left_idx) < num_points:
        left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
    if torch.numel(right_idx) < num_points:
        right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)

    left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
    right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
    return left_ps, right_ps, dim
train.py 文件源码 项目:kdnet.pytorch 作者: fxia22 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def split_ps(point_set):
    #print point_set.size()
    num_points = point_set.size()[0]/2
    diff = point_set.max(dim=0, keepdim = True)[0] - point_set.min(dim=0, keepdim = True)[0]
    dim = torch.max(diff, dim = 1, keepdim = True)[1][0,0]
    cut = torch.median(point_set[:,dim], keepdim = True)[0][0]
    left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
    right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
    middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))

    if torch.numel(left_idx) < num_points:
        left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
    if torch.numel(right_idx) < num_points:
        right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)

    left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
    right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
    return left_ps, right_ps, dim
train_MG2.py 文件源码 项目:kdnet.pytorch 作者: fxia22 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def split_ps(point_set):
    #print point_set.size()
    num_points = point_set.size()[0]/2
    diff = point_set.max(dim=0)[0] - point_set.min(dim=0)[0]
    diff = diff[:3]
    dim = torch.max(diff, dim = 1)[1][0,0]
    cut = torch.median(point_set[:,dim])[0][0]
    left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
    right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
    middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))

    if torch.numel(left_idx) < num_points:
        left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
    if torch.numel(right_idx) < num_points:
        right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)

    left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
    right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
    return left_ps, right_ps, dim
test.py 文件源码 项目:kdnet.pytorch 作者: fxia22 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def split_ps(point_set):
    #print point_set.size()
    num_points = point_set.size()[0]/2
    diff = point_set.max(dim=0)[0] - point_set.min(dim=0)[0] 
    dim = torch.max(diff, dim = 1)[1][0,0]
    cut = torch.median(point_set[:,dim])[0][0]  
    left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
    right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
    middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))

    if torch.numel(left_idx) < num_points:
        left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
    if torch.numel(right_idx) < num_points:
        right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)

    left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
    right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
    return left_ps, right_ps, dim
torch_backend.py 文件源码 项目:ktorch 作者: farizrahman4u 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def max(x, axis=None, keepdims=False):
    def _max(x, axis, keepdims):
        y = torch.max(x, axis)[0]
        # Since keepdims argument of torch not functional
        return y if keepdims else torch.squeeze(y, axis)

    def _compute_output_shape(x, axis, keepdims):
        if axis is None:
            return ()

        shape = list(_get_shape(x))
        if keepdims:
            shape[axis] = 1
        else:
            del shape[axis]

        return tuple(shape)

    return get_op(_max, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
torch_backend.py 文件源码 项目:ktorch 作者: farizrahman4u 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def min(x, axis=None, keepdims=False):
    def _min(x, axis, keepdims):
        y = torch.min(x, axis)[0]
        # Since keepdims argument of torch not functional
        return y if keepdims else torch.squeeze(y, axis)

    def _compute_output_shape(x, axis, keepdims):
        if axis is None:
            return ()

        shape = list(_get_shape(x))
        if keepdims:
            shape[axis] = 1
        else:
            del shape[axis]

        return tuple(shape)

    return get_op(_min, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
torch_backend.py 文件源码 项目:ktorch 作者: farizrahman4u 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def sum(x, axis=None, keepdims=False):
    def _sum(x, axis, keepdims):
        y = torch.sum(x, axis)
        # Since keepdims argument of torch not functional
        return y if keepdims else torch.squeeze(y, axis)

    def _compute_output_shape(x, axis, keepdims):
        if axis is None:
            return ()

        shape = list(_get_shape(x))
        if keepdims:
            shape[axis] = 1
        else:
            del shape[axis]

        return tuple(shape)

    return get_op(_sum, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
torch_backend.py 文件源码 项目:ktorch 作者: farizrahman4u 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def prod(x, axis=None, keepdims=False):
    def _prod(x, axis, keepdims):
        y = torch.prod(x, axis)
        # Since keepdims argument of torch not functional
        return y if keepdims else torch.squeeze(y, axis)

    def _compute_output_shape(x, axis, keepdims):
        if axis is None:
            return ()

        shape = list(_get_shape(x))
        if keepdims:
            shape[axis] = 1
        else:
            del shape[axis]

        return tuple(shape)

    return get_op(_prod, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
torch_backend.py 文件源码 项目:ktorch 作者: farizrahman4u 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def std(x, axis=None, keepdims=False):
    def _std(x, axis, keepdims):
        y = torch.std(x, axis)
        # Since keepdims argument of torch not functional
        return y if keepdims else torch.squeeze(y, axis)

    def _compute_output_shape(x, axis, keepdims):
        if axis is None:
            return ()

        shape = list(_get_shape(x))
        if keepdims:
            shape[axis] = 1
        else:
            del shape[axis]

        return tuple(shape)

    return get_op(_std, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
torch_backend.py 文件源码 项目:ktorch 作者: farizrahman4u 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def mean(x, axis=None, keepdims=False):
    def _mean(x, axis=axis, keepdims=keepdims):
        y = torch.mean(x, axis)
        # Since keepdims argument of torch not functional
        return y if keepdims else torch.squeeze(y, axis)

    def _compute_output_shape(x, axis=axis, keepdims=keepdims):
        if axis is None:
            return ()

        shape = list(_get_shape(x))
        if keepdims:
            shape[axis] = 1
        else:
            del shape[axis]

        return tuple(shape)

    return get_op(_mean, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
torch_backend.py 文件源码 项目:ktorch 作者: farizrahman4u 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def any(x, axis=None, keepdims=False):
    def _any(x, axis=axis, keepdims=keepdims):
        y = torch.sum(x != 0, axis) != 0
        # Since keepdims argument of torch not functional
        return y if keepdims else torch.squeeze(y, axis)

    def _compute_output_shape(x, axis=axis, keepdims=keepdims):
        if axis is None:
            return ()

        shape = list(_get_shape(x))
        if keepdims:
            shape[axis] = 1
        else:
            del shape[axis]

        return tuple(shape)

    return get_op(_any, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
torch_backend.py 文件源码 项目:ktorch 作者: farizrahman4u 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def all(x, axis=None, keepdims=False):
    def _all(x, axis=axis, keepdims=keepdims):
        y = torch.sum(x == False, axis) == 0
        # Since keepdims argument of torch not functional
        return y if keepdims else torch.squeeze(y, axis)

    def _compute_output_shape(x, axis=axis, keepdims=keepdims):
        if axis is None:
            return ()

        shape = list(_get_shape(x))
        if keepdims:
            shape[axis] = 1
        else:
            del shape[axis]

        return tuple(shape)

    return get_op(_all, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
loss.py 文件源码 项目:torchbiomed 作者: mattmacy 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def dice_error(input, target):
    eps = 0.000001
    _, result_ = input.max(1)
    result_ = torch.squeeze(result_)
    if input.is_cuda:
        result = torch.cuda.FloatTensor(result_.size())
        target_ = torch.cuda.FloatTensor(target.size())
    else:
        result = torch.FloatTensor(result_.size())
        target_ = torch.FloatTensor(target.size())
    result.copy_(result_.data)
    target_.copy_(target.data)
    target = target_
    intersect = torch.dot(result, target)

    result_sum = torch.sum(result)
    target_sum = torch.sum(target)
    union = result_sum + target_sum + 2*eps
    intersect = np.max([eps, intersect])
    # the target volume can be empty - so we still want to
    # end up with a score of 1 if the result is 0/0
    IoU = intersect / union
#    print('union: {:.3f}\t intersect: {:.6f}\t target_sum: {:.0f} IoU: result_sum: {:.0f} IoU {:.7f}'.format(
#        union, intersect, target_sum, result_sum, 2*IoU))
    return 2*IoU
densenet.py 文件源码 项目:ResNeXt-DenseNet 作者: D-X-Y 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, x):
    out = self.conv1(x)
    out = self.trans1(self.dense1(out))
    out = self.trans2(self.dense2(out))
    out = self.dense3(out)
    out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
    out = F.log_softmax(self.fc(out))
    return out
train_char.py 文件源码 项目:Tree-LSTM-LM 作者: vgene 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def run_epoch(model, reader, criterion, is_train=False, use_cuda=False, lr=0.01):
    """
        reader: data provider
        criterion: loss calculation 
    """
    # if is_train:
    #     model.train()
    # else:
    #     model.eval()

    epoch_size = ((reader.file_length // model.batch_size)-1) // model.seq_length

    hidden = model.init_hidden()

    iters = 0
    costs = 0
    for steps, (inputs, targets) in tqdm.tqdm(enumerate(reader.iterator_char(model.batch_size, model.seq_length))):
        #print(len(inputs)) 
        model.optimizer.zero_grad()
        inputs = Variable(torch.from_numpy(inputs.astype(np.int64)).transpose(0,1).contiguous())
        targets = Variable(torch.from_numpy(targets.astype(np.int64)).transpose(0,1).contiguous())
        if use_cuda:
            inputs = inputs.cuda()
            targets = targets.cuda()
        targets = torch.squeeze(targets.view(-1, model.batch_size*model.seq_length))
        hidden = repackage_hidden(hidden, use_cuda=use_cuda)
        outputs, hidden = model(inputs, hidden)

        loss = criterion(outputs.view(-1, model.vocab_size), targets)
        costs += loss.data[0] * model.seq_length

        perplexity = np.exp(costs/((steps+1)*model.seq_length))
        #print("Iter {}/{},Perplexity:{}".format(steps+1, epoch_size, perplexity))

        if is_train:
            loss.backward()
            model.optimizer.step()

    return perplexity
train.py 文件源码 项目:Tree-LSTM-LM 作者: vgene 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def run_epoch(model, provider, criterion, is_train=False, use_cuda=False, lr=0.01):
    """
        reader: data provider
        criterion: loss calculation 
    """
    # if is_train:
    #     model.train()
    # else:
    #     model.eval()

    # epoch_size = ((provider.file_length // model.batch_size)-1) // model.seq_length

    hidden = model.init_hidden()

    iters = 0
    costs = 0
    for steps, (inputs, targets) in enumerate(provider.iterator(model.batch_size, model.seq_length)):

        # print(inputs)
        model.optimizer.zero_grad()
        inputs = Variable(torch.from_numpy(inputs.astype(np.int64)).transpose(0,1).contiguous())
        targets = Variable(torch.from_numpy(targets.astype(np.int64)).transpose(0,1).contiguous())
        if use_cuda:
            inputs = inputs.cuda()
            targets = targets.cuda()
        targets = torch.squeeze(targets.view(-1, model.batch_size*model.seq_length))
        hidden = repackage_hidden(hidden, use_cuda=use_cuda)
        outputs, hidden = model(inputs, hidden)

        loss = criterion(outputs.view(-1, model.node_size), targets)
        costs += loss.data[0] * model.seq_length

        perplexity = np.exp(costs/((steps+1)*model.seq_length))
        #print("Iter {}/{},Perplexity:{}".format(steps+1, epoch_size, perplexity))

        if is_train:
            loss.backward()
            model.optimizer.step()

    return perplexity
pytorch_emitter.py 文件源码 项目:MMdnn 作者: Microsoft 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def emit_Squeeze(self, IR_node):
        self.add_body(2, "{:<15} = torch.squeeze({})".format(
            IR_node.variable_name, self.parent_variable_name(IR_node)
        ))
pytorch_emitter.py 文件源码 项目:MMdnn 作者: Microsoft 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _layer_LRN(self):
        self.add_body(0, """
    class LRN(nn.Module):
        def __init__(self, size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=False):
            super(KitModel.LRN, self).__init__()
            self.ACROSS_CHANNELS = ACROSS_CHANNELS
            if self.ACROSS_CHANNELS:
                self.average=nn.AvgPool3d(kernel_size=(size, 1, 1),
                        stride=1,
                        padding=(int((size-1.0)/2), 0, 0))
            else:
                self.average=nn.AvgPool2d(kernel_size=size,
                        stride=1,
                        padding=int((size-1.0)/2))
            self.alpha = alpha
            self.beta = beta

        def forward(self, x):
            if self.ACROSS_CHANNELS:
                div = x.pow(2).unsqueeze(1)
                div = self.average(div).squeeze(1)
                div = div.mul(self.alpha).add(1.0).pow(self.beta)
            else:
                div = x.pow(2)
                div = self.average(div)
                div = div.mul(self.alpha).add(1.0).pow(self.beta)
            x = x.div(div)
            return x""")
model.py 文件源码 项目:treehopper 作者: tomekkorbak 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def node_forward(self, inputs, child_c, child_h, training):
        child_h_sum = F.torch.sum(torch.squeeze(child_h, 1), 0, keepdim = True)

        i = F.sigmoid(self.ix(inputs)+self.ih(child_h_sum))
        o = F.sigmoid(self.ox(inputs)+self.oh(child_h_sum))
        u = F.tanh(self.ux(inputs)+self.uh(child_h_sum))

        # add extra singleton dimension
        fx = F.torch.unsqueeze(self.fx(inputs), 1)
        f = F.torch.cat([self.fh(child_hi) + torch.squeeze(fx, 1) for child_hi in child_h], 0)
        # f = torch.squeeze(f, 0)
        f = F.sigmoid(f)
        # removing extra singleton dimension
        f = F.torch.unsqueeze(f, 1)
        fc = F.torch.squeeze(F.torch.mul(f, child_c), 1)

        idx = Var(torch.multinomial(torch.ones(child_c.size(0)), 1), requires_grad=False)
        if self.cuda_flag:
            idx = idx.cuda()

        c = zoneout(
            current_input=F.torch.mul(i, u) + F.torch.sum(fc, 0, keepdim=True),
            previous_input=F.torch.squeeze(child_c.index_select(0, idx), 0) if self.zoneout_choose_child else F.torch.sum(torch.squeeze(child_c, 1), 0, keepdim=True),
            p=self.recurrent_dropout_c,
            training=training,
            mask=self.mask if self.commons_mask else None
        )
        h = zoneout(
            current_input=F.torch.mul(o, F.tanh(c)),
            previous_input=F.torch.squeeze(child_h.index_select(0, idx), 0) if self.zoneout_choose_child else child_h_sum,
            p=self.recurrent_dropout_h,
            training=training,
            mask=self.mask if self.commons_mask else None
        )

        return c, h
train.py 文件源码 项目:vnet.pytorch 作者: mattmacy 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def inference(args, loader, model, transforms):
    src = args.inference
    dst = args.save

    model.eval()
    nvols = reduce(operator.mul, target_split, 1)
    # assume single GPU / batch size 1
    for data in loader:
        data, series, origin, spacing = data[0]
        shape = data.size()
        # convert names to batch tensor
        if args.cuda:
            data.pin_memory()
            data = data.cuda()
        data = Variable(data, volatile=True)
        output = model(data)
        _, output = output.max(1)
        output = output.view(shape)
        output = output.cpu()
        # merge subvolumes and save
        results = output.chunk(nvols)
        results = map(lambda var : torch.squeeze(var.data).numpy().astype(np.int16), results)
        volume = utils.merge_image([*results], target_split)
        print("save {}".format(series))
        utils.save_updated_image(volume, os.path.join(dst, series + ".mhd"), origin, spacing)

# performing post-train inference:
# train.py --resume <model checkpoint> --i <input directory (*.mhd)> --save <output directory>
pytorch_model.py 文件源码 项目:biaffineparser 作者: chantera 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def extract_best_label_logits(self, arc_logits, label_logits, lengths):
        pred_arcs = torch.squeeze(
            torch.max(arc_logits, dim=1)[1], dim=1).data.cpu().numpy()
        size = label_logits.size()
        output_logits = _model_var(
            self.model,
            torch.zeros(size[0], size[1], size[3]))
        for batch_index, (_logits, _arcs, _length) \
                in enumerate(zip(label_logits, pred_arcs, lengths)):
            for i in range(_length):
                output_logits[batch_index] = _logits[_arcs[i]]
        return output_logits
densenet.py 文件源码 项目:densenet.pytorch 作者: bamos 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = self.conv1(x)
        out = self.trans1(self.dense1(out))
        out = self.trans2(self.dense2(out))
        out = self.dense3(out)
        out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
        out = F.log_softmax(self.fc(out))
        return out
densenet.py 文件源码 项目:FreezeOut 作者: ajbrock 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = self.conv1(x)
        out = self.trans1(self.dense1(out))
        out = self.trans2(self.dense2(out))
        out = self.dense3(out)
        out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
        out = F.log_softmax(self.fc(out))
        return out
densenet.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, x):
        out = self.conv1(x)
        out = self.trans1(self.dense1(out))
        out = self.trans2(self.dense2(out))
        out = self.dense3(out)
        out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
        out = F.log_softmax(self.fc(out))
        return out


问题


面经


文章

微信
公众号

扫码关注公众号