python类from_numpy()的实例源码

nn1_stress_test.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
voc0712.py 文件源码 项目:ssd.pytorch 作者: amdegroot 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def pull_item(self, index):
        img_id = self.ids[index]

        target = ET.parse(self._annopath % img_id).getroot()
        img = cv2.imread(self._imgpath % img_id)
        height, width, channels = img.shape

        if self.target_transform is not None:
            target = self.target_transform(target, width, height)

        if self.transform is not None:
            target = np.array(target)
            img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
            # to rgb
            img = img[:, :, (2, 1, 0)]
            # img = img.transpose(2, 0, 1)
            target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
        return torch.from_numpy(img).permute(2, 0, 1), target, height, width
        # return torch.from_numpy(img), target, height, width
pascal_voc_loader.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        lbl[lbl==255] = 0
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
ade20k_loader.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        lbl = self.encode_segmap(lbl)
        classes = np.unique(lbl)
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)
        assert(np.all(classes == np.unique(lbl)))

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
occlusion.py 文件源码 项目:DeepLearning_PlantDiseases 作者: MarkoArsenovic 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def Occlusion_exp(image,occluding_size,occluding_stride,model,preprocess,classes,groundTruth):    
    img = np.copy(image)
    height, width,_= img.shape
    output_height = int(math.ceil((height-occluding_size)/occluding_stride+1))
    output_width = int(math.ceil((width-occluding_size)/occluding_stride+1))
    ocludedImages=[]
    for h in range(output_height):
        for w in range(output_width):
            #occluder region
            h_start = h*occluding_stride
            w_start = w*occluding_stride
            h_end = min(height, h_start + occluding_size)
            w_end = min(width, w_start + occluding_size)

            input_image = copy.copy(img)
            input_image[h_start:h_end,w_start:w_end,:] =  0
            ocludedImages.append(preprocess(Image.fromarray(input_image)))

    L = np.empty(output_height*output_width)
    L.fill(groundTruth)
    L = torch.from_numpy(L)
    tensor_images = torch.stack([img for img in ocludedImages])
    dataset = torch.utils.data.TensorDataset(tensor_images,L) 
    dataloader = torch.utils.data.DataLoader(dataset,batch_size=5,shuffle=False, num_workers=8) 

    heatmap=np.empty(0)
    model.eval()
    for data in dataloader:
        images, labels = data

        if use_gpu:
            images, labels = (images.cuda()), (labels.cuda(async=True))

        outputs = model(Variable(images))
        m = nn.Softmax()
        outputs=m(outputs)
        if use_gpu:   
            outs=outputs.cpu()
        heatmap = np.concatenate((heatmap,outs[0:outs.size()[0],groundTruth].data.numpy()))

    return heatmap.reshape((output_height, output_width))
cartpole_wrapper.py 文件源码 项目:pytorch-nec 作者: mjacar 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_screen(self):
    screen = self.env.render(mode='rgb_array').transpose(
        (2, 0, 1))  # transpose into torch order (CHW)
    # Strip off the top and bottom of the screen
    screen = screen[:, 160:320]
    view_width = 320
    cart_location = self.get_cart_location()
    if cart_location < view_width // 2:
        slice_range = slice(view_width)
    elif cart_location > (self.screen_width - view_width // 2):
        slice_range = slice(-view_width, None)
    else:
        slice_range = slice(cart_location - view_width // 2,
                            cart_location + view_width // 2)
    # Strip off the edges, so that we have a square image centered on a cart
    screen = screen[:, :, slice_range]
    # Convert to float, rescare, convert to torch tensor
    # (this doesn't require a copy)
    screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
    screen = torch.from_numpy(screen)
    # Resize, and add a batch dimension (BCHW)
    return self.resize(screen).numpy()
meter.py 文件源码 项目:python-utils 作者: zhijian-liu 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def add(self, outputs, targets):
        outputs = to_numpy(outputs)
        targets = to_numpy(targets)

        if np.ndim(targets) == 2:
            targets = np.argmax(targets, 1)

        assert np.ndim(outputs) == 2, 'wrong output size (2D expected)'
        assert np.ndim(targets) == 1, 'wrong target size (1D or 2D expected)'
        assert targets.shape[0] == outputs.shape[0], 'number of outputs and targets do not match'

        top_k = self.top_k
        max_k = int(top_k[-1])

        predict = torch.from_numpy(outputs).topk(max_k, 1, True, True)[1].numpy()
        correct = (predict == targets[:, np.newaxis].repeat(predict.shape[1], 1))

        self.size += targets.shape[0]
        for k in top_k:
            self.corrects[k] += correct[:, :k].sum()
train.py 文件源码 项目:speed 作者: keon 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def train(e, model, opt, dataset, arg, cuda=False):
    model.train()
    criterion = nn.MSELoss()
    losses = []

    batcher = dataset.get_batcher(shuffle=True, augment=True)
    for b, (x, y) in enumerate(batcher, 1):
        x = V(th.from_numpy(x).float()).cuda()
        y = V(th.from_numpy(y).float()).cuda()
        opt.zero_grad()
        logit = model(x)
        loss = criterion(logit, y)
        loss.backward()
        opt.step()

        losses.append(loss.data[0])
        if arg.verbose and b % 50 == 0:
            loss_t = np.mean(losses[:-49])
            print('[train] [e]:%s [b]:%s - [loss]:%s' % (e, b, loss_t))
    return losses
train.py 文件源码 项目:speed 作者: keon 项目源码 文件源码 阅读 54 收藏 0 点赞 0 评论 0
def validate(models, dataset, arg, cuda=False):
    criterion = nn.MSELoss()
    losses = []
    batcher = dataset.get_batcher(shuffle=True, augment=False)
    for b, (x, y) in enumerate(batcher, 1):
        x = V(th.from_numpy(x).float()).cuda()
        y = V(th.from_numpy(y).float()).cuda()
        # Ensemble average
        logit = None
        for model, _ in models:
            model.eval()
            logit = model(x) if logit is None else logit + model(x)
        logit = th.div(logit, len(models))
        loss = criterion(logit, y)
        losses.append(loss.data[0])
    return np.mean(losses)
predict.py 文件源码 项目:speed 作者: keon 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def predict(models, dataset, arg, cuda=False):
    prediction_file = open('save/predictions.txt', 'w')
    batcher = dataset.get_batcher(shuffle=False, augment=False)
    for b, (x, _) in enumerate(batcher, 1):
        x = V(th.from_numpy(x).float()).cuda()
        # Ensemble average
        logit = None
        for model, _ in models:
            model.eval()
            logit = model(x) if logit is None else logit + model(x)
        logit = th.div(logit, len(models))
        prediction = logit.cpu().data[0][0]
        prediction_file.write('%s\n' % prediction)
        if arg.verbose and b % 100 == 0:
            print('[predict] [b]:%s - prediction: %s' % (b, prediction))
    # prediction_file.close()
actor_critic.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _loop(self):
        done = False
        total_reward, reward, iter = 0, 0, 0
        self.state = self.env.reset()
        while not done:
            action = self.policy()
            _state, reward, done, _ = self.env.step(action)
            # if _state is terminal, state value is 0
            v = 0 if done else self.state_value(_state)
            delta = reward + self.gamma * v - self.state_value(self.state)
            # \nabla_w v = s, since v = s^{\tim} w
            self.state_value_weight += self.beta * delta * to_tensor(self.state).float()
            # \pi(a) = x^{\top}(a)w, where x is feature and w is weight
            # \nabla\ln\pi(a) = x(a)\sum_b \pi(b)x(b)
            direction = self.feature(_state, action) - sum(
                    [self.softmax @ torch.cat([self.feature(_state, a).unsqueeze(0) for a in self.actions])])

            self.weight += self.alpha * pow(self.gamma, iter) * delta * direction
            total_reward += reward
            self.state = _state
            iter += 1
        return total_reward
REINFORCE_baseline.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _loop(self):
        done = False
        total_reward, reward, iter = 0, 0, 0
        self.state = self.env.reset()
        weight = self.weight
        while not done:
            action = self.policy()
            _state, reward, done, _ = self.env.step(action)
            # use current weight to generate an episode
            # \pi(a) = x^{\top}(a)w, where x is feature and w is weight
            # \nabla\ln\pi(a) = x(a)\sum_b \pi(b)x(b)
            delta = reward - self.state_value(_state)
            self.state_value_weight += self.beta * delta * to_tensor(_state).float()
            direction = self.feature(_state, action) - sum(
                [self.softmax @ torch.cat([self.feature(_state, a).unsqueeze(0) for a in self.actions])])
            weight += self.alpha * pow(self.gamma, iter) * delta * direction
            total_reward += reward
            iter += 1
        # update weight
        self.weight = weight
        return total_reward
myimgfolder.py 文件源码 项目:colorNet-pytorch 作者: shufanwu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        path, target = self.imgs[index]
        img = self.loader(path)
        if self.transform is not None:
            img_original = self.transform(img)
            img_original = np.asarray(img_original)

            img_lab = rgb2lab(img_original)
            img_lab = (img_lab + 128) / 255
            img_ab = img_lab[:, :, 1:3]
            img_ab = torch.from_numpy(img_ab.transpose((2, 0, 1)))
            img_original = rgb2gray(img_original)
            img_original = torch.from_numpy(img_original)
        if self.target_transform is not None:
            target = self.target_transform(target)
        return (img_original, img_ab), target
myimgfolder.py 文件源码 项目:colorNet-pytorch 作者: shufanwu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        path, target = self.imgs[index]
        img = self.loader(path)

        img_scale = img.copy()
        img_original = img
        img_scale = scale_transform(img_scale)

        img_scale = np.asarray(img_scale)
        img_original = np.asarray(img_original)

        img_scale = rgb2gray(img_scale)
        img_scale = torch.from_numpy(img_scale)
        img_original = rgb2gray(img_original)
        img_original = torch.from_numpy(img_original)
        return (img_original, img_scale), target
torch.py 文件源码 项目:emu 作者: mlosch 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward(self, input):
        input_torch = torch.from_numpy(input)
        if self.use_gpu:
            input_torch = input_torch.cuda()
        else:
            input_torch = input_torch.float()

        input_var = Variable(input_torch)

        # forward
        out = self.model.forward(input_var)

        if type(out) is list:
            clean_out = []
            for v in out:
                clean_out.append(v.data.cpu().numpy())
            out = clean_out
        else:
            out = out.data.cpu().numpy()
        self.ready = True

        return out
similarity.py 文件源码 项目:sef 作者: passalis 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def fast_heat_similarity_matrix(X, sigma):
    """
    PyTorch based similarity calculation
    :param X: the matrix with the data
    :param sigma: scaling factor
    :return: the similarity matrix
    """
    use_gpu = False
    # Use GPU if available
    if torch.cuda.device_count() > 0:
        use_gpu = True

    X = Variable(torch.from_numpy(np.float32(X)))
    sigma = Variable(torch.from_numpy(np.float32([sigma])))
    if use_gpu:
        X, sigma = X.cuda(), sigma.cuda()

    D = sym_heat_similarity_matrix(X, sigma)

    if use_gpu:
        D = D.cpu()

    return D.data.numpy()
linear.py 文件源码 项目:sef 作者: passalis 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, input_dimensionality, output_dimensionality, scaler='default'):
        """
        Creats a Linear SEF object
        :param input_dimensionality: dimensionality of the input space
        :param output_dimensionality: dimensionality of the target space
        :param learning_rate: learning rate to be used for the optimization
        :param regularizer_weight: the weight of the regularizer
        :param scaler:
        """

        # Call base constructor
        SEF_Base.__init__(self, input_dimensionality, output_dimensionality, scaler)

        # Projection weights variables
        W = np.float32(0.1 * np.random.randn(self.input_dimensionality, output_dimensionality))
        self.W = Variable(torch.from_numpy(W), requires_grad=True)
        self.trainable_params = [self.W]
relatedness.py 文件源码 项目:SentEval 作者: facebookresearch 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def trainepoch(self, X, y, nepoches=1):
        self.model.train()
        for _ in range(self.nepoch, self.nepoch + nepoches):
            permutation = np.random.permutation(len(X))
            all_costs = []
            for i in range(0, len(X), self.batch_size):
                # forward
                idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().cuda()
                Xbatch = Variable(X.index_select(0, idx))
                ybatch = Variable(y.index_select(0, idx))
                output = self.model(Xbatch)
                # loss
                loss = self.loss_fn(output, ybatch)
                all_costs.append(loss.data[0])
                # backward
                self.optimizer.zero_grad()
                loss.backward()
                # Update parameters
                self.optimizer.step()
        self.nepoch += nepoches
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_last_dim_softmax_does_softmax_on_last_dim(self):
        batch_size = 1
        length_1 = 5
        length_2 = 3
        num_options = 4
        options_array = numpy.zeros((batch_size, length_1, length_2, num_options))
        for i in range(length_1):
            for j in range(length_2):
                options_array[0, i, j] = [2, 4, 0, 1]
        options_tensor = Variable(torch.from_numpy(options_array))
        softmax_tensor = util.last_dim_softmax(options_tensor).data.numpy()
        assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
        for i in range(length_1):
            for j in range(length_2):
                assert_almost_equal(softmax_tensor[0, i, j],
                                    [0.112457, 0.830953, 0.015219, 0.041371],
                                    decimal=5)
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_last_dim_softmax_handles_mask_correctly(self):
        batch_size = 1
        length_1 = 4
        length_2 = 3
        num_options = 5
        options_array = numpy.zeros((batch_size, length_1, length_2, num_options))
        for i in range(length_1):
            for j in range(length_2):
                options_array[0, i, j] = [2, 4, 0, 1, 6]
        mask = Variable(torch.IntTensor([[1, 1, 1, 1, 0]]))
        options_tensor = Variable(torch.from_numpy(options_array).float())
        softmax_tensor = util.last_dim_softmax(options_tensor, mask).data.numpy()
        assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
        for i in range(length_1):
            for j in range(length_2):
                assert_almost_equal(softmax_tensor[0, i, j],
                                    [0.112457, 0.830953, 0.015219, 0.041371, 0.0],
                                    decimal=5)
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_weighted_sum_handles_uneven_higher_order_input(self):
        batch_size = 1
        length_1 = 5
        length_2 = 6
        length_3 = 2
        embedding_dim = 4
        sentence_array = numpy.random.rand(batch_size, length_3, embedding_dim)
        attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
        sentence_tensor = Variable(torch.from_numpy(sentence_array).float())
        attention_tensor = Variable(torch.from_numpy(attention_array).float())
        aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
        assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
        for i in range(length_1):
            for j in range(length_2):
                expected_array = (attention_array[0, i, j, 0] * sentence_array[0, 0] +
                                  attention_array[0, i, j, 1] * sentence_array[0, 1])
                numpy.testing.assert_almost_equal(aggregated_array[0, i, j], expected_array,
                                                  decimal=5)
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_add_sentence_boundary_token_ids_handles_3D_input(self):
        tensor = Variable(torch.from_numpy(
                numpy.array([[[1, 2, 3, 4],
                              [5, 5, 5, 5],
                              [6, 8, 1, 2]],
                             [[4, 3, 2, 1],
                              [8, 7, 6, 5],
                              [0, 0, 0, 0]]])))
        mask = ((tensor > 0).sum(dim=-1) > 0).type(torch.LongTensor)
        bos = Variable(torch.from_numpy(numpy.array([9, 9, 9, 9])))
        eos = Variable(torch.from_numpy(numpy.array([10, 10, 10, 10])))
        new_tensor, new_mask = util.add_sentence_boundary_token_ids(tensor, mask, bos, eos)
        expected_new_tensor = numpy.array([[[9, 9, 9, 9],
                                            [1, 2, 3, 4],
                                            [5, 5, 5, 5],
                                            [6, 8, 1, 2],
                                            [10, 10, 10, 10]],
                                           [[9, 9, 9, 9],
                                            [4, 3, 2, 1],
                                            [8, 7, 6, 5],
                                            [10, 10, 10, 10],
                                            [0, 0, 0, 0]]])
        assert (new_tensor.data.numpy() == expected_new_tensor).all()
        assert (new_mask.data.numpy() == ((expected_new_tensor > 0).sum(axis=-1) > 0)).all()
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_remove_sentence_boundaries(self):
        tensor = Variable(torch.from_numpy(numpy.random.rand(3, 5, 7)))
        mask = Variable(torch.from_numpy(
                # The mask with two elements is to test the corner case
                # of an empty sequence, so here we are removing boundaries
                # from  "<S> </S>"
                numpy.array([[1, 1, 0, 0, 0],
                             [1, 1, 1, 1, 1],
                             [1, 1, 1, 1, 0]]))).long()
        new_tensor, new_mask = util.remove_sentence_boundaries(tensor, mask)

        expected_new_tensor = Variable(torch.zeros(3, 3, 7))
        expected_new_tensor[1, 0:3, :] = tensor[1, 1:4, :]
        expected_new_tensor[2, 0:2, :] = tensor[2, 1:3, :]
        assert_array_almost_equal(new_tensor.data.numpy(), expected_new_tensor.data.numpy())

        expected_new_mask = Variable(torch.from_numpy(
                numpy.array([[0, 0, 0],
                             [1, 1, 1],
                             [1, 1, 0]]))).long()
        assert (new_mask.data.numpy() == expected_new_mask.data.numpy()).all()
elmo.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self,
                 options_file: str,
                 weight_file: str) -> None:
        super(_ElmoCharacterEncoder, self).__init__()

        with open(cached_path(options_file), 'r') as fin:
            self._options = json.load(fin)
        self._weight_file = weight_file

        self.output_dim = self._options['lstm']['projection_dim']

        self._load_weights()

        # Cache the arrays for use in forward -- +1 due to masking.
        self._beginning_of_sentence_characters = Variable(torch.from_numpy(
                numpy.array(ELMoCharacterMapper.beginning_of_sentence_characters) + 1
        ))
        self._end_of_sentence_characters = Variable(torch.from_numpy(
                numpy.array(ELMoCharacterMapper.end_of_sentence_characters) + 1
        ))
array_field.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def as_tensor(self,
                  padding_lengths: Dict[str, int],
                  cuda_device: int = -1,
                  for_training: bool = True) -> torch.Tensor:
        max_shape = [padding_lengths["dimension_{}".format(i)]
                     for i in range(len(padding_lengths))]

        return_array = numpy.ones(max_shape, "float32") * self.padding_value

        # If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
        # form the right shaped list of slices for insertion into the final tensor.
        slicing_shape = list(self.array.shape)
        if len(self.array.shape) < len(max_shape):
            slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]
        slices = [slice(0, x) for x in slicing_shape]
        return_array[slices] = self.array
        tensor = Variable(torch.from_numpy(return_array), volatile=not for_training)
        return tensor if cuda_device == -1 else tensor.cuda(cuda_device)
cls_sparse_skip_filt.py 文件源码 项目:mss_pytorch 作者: Js-Mim 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def forward(self, H_j_dec, input_x):
        if torch.has_cudnn:
            # Input is of the shape : (B, T, N)
            input_x = Variable(torch.from_numpy(input_x[:, self._L:-self._L, :]).cuda(), requires_grad=True)

        else:
            # Input is of the shape : (B, T, N)
            # Cropping some "un-necessary" frequency sub-bands
            input_x = Variable(torch.from_numpy(input_x[:, self._L:-self._L, :]), requires_grad=True)

        # Decode/Sparsify mask
        mask_t1 = self.relu(self.ffDec(H_j_dec))
        # Apply skip-filtering connections
        Y_j = torch.mul(mask_t1, input_x)

        return Y_j, mask_t1
data_loader.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __next__(self):
        def to_longest(insts):
            inst_data_tensor = Variable(torch.from_numpy(insts))
            if self.cuda:
                inst_data_tensor = inst_data_tensor.cuda()
            return inst_data_tensor

        if self._step == self._stop_step:
            self._step = 0
            raise StopIteration()

        _start = self._step*self._batch_size
        _bsz = self._batch_size
        self._step += 1
        data = to_longest(self._src_sents[_start: _start+_bsz])
        label = to_longest(self._label[_start: _start+_bsz])
        return data, label.contiguous().view(-1)
data_loader.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __next__(self):
        def pad_to_longest(insts, max_len):
            inst_data = np.array([inst + [const.PAD] * (max_len - len(inst)) for inst in insts])

            inst_data_tensor = Variable(torch.from_numpy(inst_data), volatile=self.evaluation)
            if self.cuda:
                inst_data_tensor = inst_data_tensor.cuda()
            return inst_data_tensor

        if self._step == self._stop_step:
            self._step = 0
            raise StopIteration()

        _start = self._step*self._batch_size
        _bsz = self._batch_size
        self._step += 1
        data = pad_to_longest(self._src_sents[_start:_start+_bsz], self._max_len)
        label = Variable(torch.from_numpy(self._label[_start:_start+_bsz]),
                    volatile=self.evaluation)
        if self.cuda:
            label = label.cuda()

        return data, label
data_loader.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __next__(self):
        def pad_to_longest(insts, max_len):
            inst_data = np.array([inst + [const.PAD] * (max_len - len(inst)) for inst in insts])

            inst_data_tensor = Variable(torch.from_numpy(inst_data), volatile=self.evaluation)
            if self.cuda:
                inst_data_tensor = inst_data_tensor.cuda()
            return inst_data_tensor

        if self._step == self._stop_step:
            self._step = 0
            raise StopIteration()

        _start = self._step*self._batch_size
        _bsz = min(self._batch_size, self.sents_size-_start)
        self._step += 1
        data = pad_to_longest(self._src_sents[_start:_start+_bsz], self._max_len)
        label = Variable(torch.from_numpy(self._label[_start:_start+_bsz]),
                    volatile=self.evaluation)
        if self.cuda:
            label = label.cuda()

        return data, label
data_loader.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __next__(self):
        def to_longest(insts):
            inst_data_tensor = Variable(torch.from_numpy(insts))
            if self.cuda:
                inst_data_tensor = inst_data_tensor.cuda()
            return inst_data_tensor

        if self._step == self._stop_step:
            self._step = 0
            raise StopIteration()

        _start = self._step*self._batch_size
        _bsz = self._batch_size
        self._step += 1

        enc_input = to_longest(self._enc_sents[_start: _start+_bsz])
        dec_input = to_longest(self._dec_sents[_start: _start+_bsz])
        label = to_longest(self._label[_start: _start+_bsz])
        return enc_input, dec_input, label


问题


面经


文章

微信
公众号

扫码关注公众号