python类FloatTensor()的实例源码

transforms.py 文件源码 项目:audio 作者: pytorch 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def __call__(self, x):
        """

        Args:
            x (FloatTensor/LongTensor or ndarray)

        Returns:
            x_mu (LongTensor or ndarray)

        """
        mu = self.qc - 1.
        if isinstance(x, np.ndarray):
            x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
            x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int)
        elif isinstance(x, (torch.Tensor, torch.LongTensor)):
            if isinstance(x, torch.LongTensor):
                x = x.float()
            mu = torch.FloatTensor([mu])
            x_mu = torch.sign(x) * torch.log1p(mu *
                                               torch.abs(x)) / torch.log1p(mu)
            x_mu = ((x_mu + 1) / 2 * mu + 0.5).long()
        return x_mu
main.py 文件源码 项目:SGAN 作者: YuhangSong 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def vector2image(x):
    block_size = chris_domain.BLOCK_SIZE*3
    x_temp = torch.FloatTensor(
        x.size()[0],
        x.size()[1],
        1,
        block_size,
        params['GRID_SIZE']*block_size
    ).cuda().fill_(0.0)
    for b in range(x.size()[0]):
        for d in range(x.size()[1]):
            for i in range(x.size()[2]):
                from_ = i*block_size
                to_ = (i+1)*block_size
                fill_ = float(x[b][d][i])
                x_temp[b,d,0,:,from_:to_].fill_(fill_)
    return x_temp
main.py 文件源码 项目:SGAN 作者: YuhangSong 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def plt_to_vis(fig,win,name):
    canvas=fig.canvas
    import io
    buf = io.BytesIO()
    canvas.print_png(buf)
    data=buf.getvalue()
    buf.close()

    buf=io.BytesIO()
    buf.write(data)
    img=Image.open(buf)
    img = np.asarray(img)/255.0
    img = img.astype(float)[:,:,0:3]
    img = torch.FloatTensor(img).permute(2,0,1)
    vis.image(  img,
                win=str(MULTI_RUN)+'-'+win,
                opts=dict(title=str(MULTI_RUN)+'-'+name))
decoder.py 文件源码 项目:ladder 作者: abhiskk 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def bn_hat_z_layers(self, hat_z_layers, z_pre_layers):
        # TODO: Calculate batchnorm using GPU Tensors.
        assert len(hat_z_layers) == len(z_pre_layers)
        hat_z_layers_normalized = []
        for i, (hat_z, z_pre) in enumerate(zip(hat_z_layers, z_pre_layers)):
            if self.use_cuda:
                ones = Variable(torch.ones(z_pre.size()[0], 1).cuda())
            else:
                ones = Variable(torch.ones(z_pre.size()[0], 1))
            mean = torch.mean(z_pre, 0)
            noise_var = np.random.normal(loc=0.0, scale=1 - 1e-10, size=z_pre.size())
            if self.use_cuda:
                var = np.var(z_pre.data.cpu().numpy() + noise_var, axis=0).reshape(1, z_pre.size()[1])
            else:
                var = np.var(z_pre.data.numpy() + noise_var, axis=0).reshape(1, z_pre.size()[1])
            var = Variable(torch.FloatTensor(var))
            if self.use_cuda:
                hat_z = hat_z.cpu()
                ones = ones.cpu()
                mean = mean.cpu()
            hat_z_normalized = torch.div(hat_z - ones.mm(mean), ones.mm(torch.sqrt(var + 1e-10)))
            if self.use_cuda:
                hat_z_normalized = hat_z_normalized.cuda()
            hat_z_layers_normalized.append(hat_z_normalized)
        return hat_z_layers_normalized
encoder.py 文件源码 项目:ladder 作者: abhiskk 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward_noise(self, tilde_h):
        # z_pre will be used in the decoder cost
        z_pre = self.linear(tilde_h)
        z_pre_norm = self.bn_normalize(z_pre)
        # Add noise
        noise = np.random.normal(loc=0.0, scale=self.noise_level, size=z_pre_norm.size())
        if self.use_cuda:
            noise = Variable(torch.cuda.FloatTensor(noise))
        else:
            noise = Variable(torch.FloatTensor(noise))
        # tilde_z will be used by decoder for reconstruction
        tilde_z = z_pre_norm + noise
        # store tilde_z in buffer
        self.buffer_tilde_z = tilde_z
        z = self.bn_gamma_beta(tilde_z)
        h = self.activation(z)
        return h
MaskedSelect.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def type(self, type=None, tensorCache=None):
        if type is None:
            return self._type

        self._gradBuffer = self._gradBuffer.type(type)
        self.gradInput = self.gradInput.type(type)
        self.output = self.output.type(type)

        # These casts apply when switching between cuda/non-cuda types
        if type != 'torch.cuda.FloatTensor':
                self._maskIndexBuffer = self._maskIndexBuffer.long()
                self._maskIndices = self._maskIndices.long()
                self._gradMask = self._gradMask.byte()
        else:
                self._maskIndexBuffer = self._maskIndexBuffer.cuda()
                self._maskIndices = self._maskIndices.cuda()
                self._gradMask = self._gradMask.cuda()

        self._type = type
        return self
test_legacy_nn.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def test_Copy(self):
        input = torch.randn(3,4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
test_torch.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def test_serialization(self):
        a = [torch.randn(5, 5).float() for i in range(2)]
        b = [a[i % 2] for i in range(4)]
        b += [a[0].storage()]
        b += [a[0].storage()[1:4]]
        for use_name in (False, True):
            with tempfile.NamedTemporaryFile() as f:
                handle = f if not use_name else f.name
                torch.save(b, handle)
                f.seek(0)
                c = torch.load(handle)
            self.assertEqual(b, c, 0)
            self.assertTrue(isinstance(c[0], torch.FloatTensor))
            self.assertTrue(isinstance(c[1], torch.FloatTensor))
            self.assertTrue(isinstance(c[2], torch.FloatTensor))
            self.assertTrue(isinstance(c[3], torch.FloatTensor))
            self.assertTrue(isinstance(c[4], torch.FloatStorage))
            c[0].fill_(10)
            self.assertEqual(c[0], c[2], 0)
            self.assertEqual(c[4], torch.FloatStorage(25).fill_(10), 0)
            c[1].fill_(20)
            self.assertEqual(c[1], c[3], 0)
            self.assertEqual(c[4], c[5][1:4], 0)
test_nccl.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_reduce_scatter(self):
        in_size = 32 * nGPUs
        out_size = 32

        inputs = [torch.FloatTensor(in_size).uniform_() for i in range(nGPUs)]
        expected = torch.FloatTensor(in_size).zero_()
        for t in inputs:
            expected.add_(t)
        expected = expected.view(nGPUs, 32)

        inputs = [inputs[i].cuda(i) for i in range(nGPUs)]
        outputs = [torch.cuda.FloatTensor(out_size, device=i)
                   for i in range(nGPUs)]
        nccl.reduce_scatter(inputs, outputs)

        for i in range(nGPUs):
            self.assertEqual(outputs[i], expected[i])
voc0712.py 文件源码 项目:ssd.pytorch 作者: amdegroot 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations

    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for sample in batch:
        imgs.append(sample[0])
        targets.append(torch.FloatTensor(sample[1]))
    return torch.stack(imgs, 0), targets
main_1.py 文件源码 项目:postfilt_gan 作者: bajibabu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test(netG, opt):
    assert opt.netG != ''
    test_dir = opt.testdata_dir
    for f in os.listdir(test_dir):
        fname, ext = os.path.splitext(f)
        if ext == '.cmp':
            print(fname)
            cmp_file = os.path.join(test_dir, f)
            ac_data = read_binary_file(cmp_file, dim=47)
            ac_data = torch.FloatTensor(ac_data)
            noise = torch.FloatTensor(ac_data.size(0), nz)
            if opt.cuda:
                ac_data, noise = ac_data.cuda(), noise.cuda()
            ac_data = Variable(ac_data)
            noise = Variable(noise)
            noise.data.normal_(0, 1)
            generated_pulses = netG(noise, ac_data)
            generated_pulses = generated_pulses.data.cpu().numpy()
            generated_pulses = generated_pulses.reshape(ac_data.size(0), -1)
            out_file = os.path.join(test_dir, fname + '.pls')
            with open(out_file, 'wb') as fid:
                generated_pulses.tofile(fid)
main.py 文件源码 项目:postfilt_gan 作者: bajibabu 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def test(netG, opt):
    assert opt.netG != ''
    test_dir = opt.testdata_dir
    for f in os.listdir(test_dir):
        fname, ext = os.path.splitext(f)
        if ext == '.cmp':
            print(fname)
            cmp_file = os.path.join(test_dir, f)
            ac_data = read_binary_file(cmp_file, dim=47)
            ac_data = torch.FloatTensor(ac_data)
            noise = torch.FloatTensor(ac_data.size(0), nz)
            if opt.cuda:
                ac_data, noise = ac_data.cuda(), noise.cuda()
            ac_data = Variable(ac_data)
            noise = Variable(noise)
            noise.data.normal_(0, 1)
            generated_pulses = netG(noise, ac_data)
            generated_pulses = generated_pulses.data.cpu().numpy()
            generated_pulses = generated_pulses.reshape(ac_data.size(0), -1)
            out_file = os.path.join(test_dir, fname + '.pls')
            with open(out_file, 'wb') as fid:
                generated_pulses.tofile(fid)
classifier.py 文件源码 项目:SentEval 作者: facebookresearch 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def prepare_split(self, X, y, validation_data=None, validation_split=None):
        # Preparing validation data
        assert validation_split or validation_data
        if validation_data is not None:
            trainX, trainy = X, y
            devX, devy = validation_data
        else:
            permutation = np.random.permutation(len(X))
            trainidx = permutation[int(validation_split*len(X)):]
            devidx = permutation[0:int(validation_split*len(X))]
            trainX, trainy = X[trainidx], y[trainidx]
            devX, devy = X[devidx], y[devidx]

        if not self.cudaEfficient:
            trainX = torch.FloatTensor(trainX).cuda()
            trainy = torch.LongTensor(trainy).cuda()
            devX = torch.FloatTensor(devX).cuda()
            devy = torch.LongTensor(devy).cuda()
        else:
            trainX = torch.FloatTensor(trainX)
            trainy = torch.LongTensor(trainy)
            devX = torch.FloatTensor(devX)
            devy = torch.LongTensor(devy)

        return trainX, trainy, devX, devy
classifier.py 文件源码 项目:SentEval 作者: facebookresearch 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def score(self, devX, devy):
        self.model.eval()
        correct = 0
        if not isinstance(devX, torch.cuda.FloatTensor) or self.cudaEfficient:
            devX = torch.FloatTensor(devX).cuda()
            devy = torch.LongTensor(devy).cuda()
        for i in range(0, len(devX), self.batch_size):
            Xbatch = Variable(devX[i:i + self.batch_size], volatile=True)
            ybatch = Variable(devy[i:i + self.batch_size], volatile=True)
            if self.cudaEfficient:
                Xbatch = Xbatch.cuda()
                ybatch = ybatch.cuda()
            output = self.model(Xbatch)
            pred = output.data.max(1)[1]
            correct += pred.long().eq(ybatch.data.long()).sum()
        accuracy = 1.0*correct / len(devX)
        return accuracy
feedforward_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_forward_gives_correct_output(self):
        params = Params({
                'input_dim': 2,
                'hidden_dims': 3,
                'activations': 'relu',
                'num_layers': 2
                })
        feedforward = FeedForward.from_params(params)

        constant_init = lambda tensor: torch.nn.init.constant(tensor, 1.)
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(feedforward)

        input_tensor = Variable(torch.FloatTensor([[-3, 1]]))
        output = feedforward(input_tensor).data.numpy()
        assert output.shape == (1, 3)
        # This output was checked by hand - ReLU makes output after first hidden layer [0, 0, 0],
        # which then gets a bias added in the second layer to be [1, 1, 1].
        assert_almost_equal(output, [[1, 1, 1]])
attention_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_no_mask(self):
        attention = Attention()

        # Testing general non-batched case.
        vector = Variable(torch.FloatTensor([[0.3, 0.1, 0.5]]))
        matrix = Variable(torch.FloatTensor([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2]]]))

        result = attention(vector, matrix).data.numpy()
        assert_almost_equal(result, numpy.array([[0.52871835, 0.47128162]]))

        # Testing non-batched case where inputs are all 0s.
        vector = Variable(torch.FloatTensor([[0, 0, 0]]))
        matrix = Variable(torch.FloatTensor([[[0, 0, 0], [0, 0, 0]]]))

        result = attention(vector, matrix).data.numpy()
        assert_almost_equal(result, numpy.array([[0.5, 0.5]]))
attention_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_batched_masked(self):
        attention = Attention()

        # Testing general masked non-batched case.
        vector = Variable(torch.FloatTensor([[0.3, 0.1, 0.5], [0.3, 0.1, 0.5]]))
        matrix = Variable(torch.FloatTensor([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]],
                                             [[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]]]))
        mask = Variable(torch.FloatTensor([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]]))
        result = attention(vector, matrix, mask).data.numpy()
        assert_almost_equal(result, numpy.array([[0.52871835, 0.47128162, 0.0],
                                                 [0.50749944, 0.0, 0.49250056]]))

        # Test the case where a mask is all 0s and an input is all 0s.
        vector = Variable(torch.FloatTensor([[0.0, 0.0, 0.0], [0.3, 0.1, 0.5]]))
        matrix = Variable(torch.FloatTensor([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]],
                                             [[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]]]))
        mask = Variable(torch.FloatTensor([[1.0, 1.0, 0.0], [0.0, 0.0, 0.0]]))
        result = attention(vector, matrix, mask).data.numpy()
        assert_almost_equal(result, numpy.array([[0.5, 0.5, 0.0],
                                                 [0.0, 0.0, 0.0]]))
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_logsumexp(self):
        # First a simple example where we add probabilities in log space.
        tensor = Variable(torch.FloatTensor([[.4, .1, .2]]))
        log_tensor = tensor.log()
        log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=False)
        assert_almost_equal(log_summed.exp().data.numpy(), [.7])
        log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=True)
        assert_almost_equal(log_summed.exp().data.numpy(), [[.7]])

        # Then some more atypical examples, and making sure this will work with how we handle
        # log masks.
        tensor = Variable(torch.FloatTensor([[float('-inf'), 20.0]]))
        assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0])
        tensor = Variable(torch.FloatTensor([[-200.0, 20.0]]))
        assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0])
        tensor = Variable(torch.FloatTensor([[20.0, 20.0], [-200.0, 200.0]]))
        assert_almost_equal(util.logsumexp(tensor, dim=0).data.numpy(), [20.0, 200.0])
coref.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _prune_and_sort_spans(mention_scores: torch.FloatTensor,
                              num_spans_to_keep: int) -> torch.IntTensor:
        """
        The indices of the top-k scoring spans according to span_scores. We return the
        indices in their original order, not ordered by score, so that we can rely on
        the ordering to consider the previous k spans as antecedents for each span later.

        Parameters
        ----------
        mention_scores : ``torch.FloatTensor``, required.
            The mention score for every candidate, with shape (batch_size, num_spans, 1).
        num_spans_to_keep : ``int``, required.
            The number of spans to keep when pruning.
        Returns
        -------
        top_span_indices : ``torch.IntTensor``, required.
            The indices of the top-k scoring spans. Has shape (batch_size, num_spans_to_keep).
        """
        # Shape: (batch_size, num_spans_to_keep, 1)
        _, top_span_indices = mention_scores.topk(num_spans_to_keep, 1)
        top_span_indices, _ = torch.sort(top_span_indices, 1)

        # Shape: (batch_size, num_spans_to_keep)
        top_span_indices = top_span_indices.squeeze(-1)
        return top_span_indices
embedding.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _read_pretrained_hdf5_format_embedding_file(embeddings_filename: str, # pylint: disable=invalid-name
                                                embedding_dim: int,
                                                vocab: Vocabulary,
                                                namespace: str = "tokens") -> torch.FloatTensor:
    """
    Reads from a hdf5 formatted file.  The embedding matrix is assumed to
    be keyed by 'embedding' and of size ``(num_tokens, embedding_dim)``.
    """
    with h5py.File(embeddings_filename, 'r') as fin:
        embeddings = fin['embedding'][...]

    if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]:
        raise ConfigurationError(
                "Read shape {0} embeddings from the file, but expected {1}".format(
                        list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim]))

    return torch.FloatTensor(embeddings)
multi_head_self_attention.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self,
                 num_heads: int,
                 input_dim: int,
                 attention_dim: int,
                 values_dim: int,
                 output_projection_dim: int = None,
                 attention_dropout_prob: float = 0.1) -> None:
        super(MultiHeadSelfAttention, self).__init__()

        self._num_heads = num_heads
        self._input_dim = input_dim
        self._output_dim = output_projection_dim or input_dim
        self._attention_dim = attention_dim
        self._values_dim = values_dim

        self._query_projections = Parameter(torch.FloatTensor(num_heads, input_dim, attention_dim))
        self._key_projections = Parameter(torch.FloatTensor(num_heads, input_dim, attention_dim))
        self._value_projections = Parameter(torch.FloatTensor(num_heads, input_dim, values_dim))

        self._scale = input_dim ** 0.5
        self._output_projection = Linear(num_heads * values_dim,
                                         self._output_dim)
        self._attention_dropout = Dropout(attention_dropout_prob)

        self.reset_parameters()
util.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.autograd.Variable):
    """
    Computes and returns an element-wise dropout mask for a given tensor, where
    each element in the mask is dropped out with probability dropout_probability.
    Note that the mask is NOT applied to the tensor - the tensor is passed to retain
    the correct CUDA tensor type for the mask.

    Parameters
    ----------
    dropout_probability : float, required.
        Probability of dropping a dimension of the input.
    tensor_for_masking : torch.Variable, required.


    Returns
    -------
    A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
    This scaling ensures expected values and variances of the output of applying this mask
     and the original tensor are the same.
    """
    binary_mask = tensor_for_masking.clone()
    binary_mask.data.copy_(torch.rand(tensor_for_masking.size()) > dropout_probability)
    # Scale mask by 1/keep_prob to preserve output statistics.
    dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
    return dropout_mask
util.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def logsumexp(tensor: torch.Tensor,
              dim: int = -1,
              keepdim: bool = False) -> torch.Tensor:
    """
    A numerically stable computation of logsumexp. This is mathematically equivalent to
    `tensor.exp().sum(dim, keep=keepdim).log()`.  This function is typically used for summing log
    probabilities.

    Parameters
    ----------
    tensor : torch.FloatTensor, required.
        A tensor of arbitrary size.
    dim : int, optional (default = -1)
        The dimension of the tensor to apply the logsumexp to.
    keepdim: bool, optional (default = False)
        Whether to retain a dimension of size one at the dimension we reduce over.
    """
    max_score, _ = tensor.max(dim, keepdim=keepdim)
    if keepdim:
        stable_vec = tensor - max_score
    else:
        stable_vec = tensor - max_score.unsqueeze(dim)
    return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
trainer.py 文件源码 项目:DenseNet 作者: kevinzakka 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def accuracy(self, predicted, ground_truth):
        """
        Utility function for calculating the accuracy of the model.

        Params
        ------
        - predicted: (torch.FloatTensor)
        - ground_truth: (torch.LongTensor)

        Returns
        -------
        - acc: (float) % accuracy.
        """
        predicted = torch.max(predicted, 1)[1]
        total = len(ground_truth)
        correct = (predicted == ground_truth).sum()
        acc = 100 * (correct / total)
        return acc
mlpg.py 文件源码 项目:nnmnkwii 作者: r9y9 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def mlpg(means, variances, windows):
    """Maximum Liklihood Paramter Generation (MLPG).

    The parameters are almost same as :func:`nnmnkwii.paramgen.mlpg` expects.
    The differences are:

    - The function assumes ``means`` as :obj:`torch.autograd.Variable`
      instead of :obj:`numpy.ndarray`.
    - The fucntion assumes ``variances_frames`` as :obj:`torch.FloatTensor`?
      instead of :obj:`numpy.ndarray`.

    Args:
        means (torch.autograd.Variable): Means
        variances (torch.FloatTensor): Variances
        windows (list): A sequence of window specification

    See also:
        :obj:`nnmnkwii.autograd.MLPG`, :func:`nnmnkwii.paramgen.mlpg`

    """
    T, D = means.size()
    if variances.dim() == 1 and variances.shape[0] == D:
        variances = variances.expand(T, D)
    assert means.size() == variances.size()
    return MLPG(variances, windows)(means)
mlpg.py 文件源码 项目:nnmnkwii 作者: r9y9 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def unit_variance_mlpg(R, means):
    """Special case of MLPG assuming data is normalized to have unit variance.

    Args:
        means (torch.autograd.Variable): Means, of shape (``T x D``) or
          (``T*num_windows x static_dim``). See
          :func:`nnmnkwii.paramgen.reshape_means` to reshape means from
          (``T x D``) to (``T*num_windows x static_dim``).
        R (torch.FloatTensor): MLPG matrix.

    See also:
        :obj:`nnmnkwii.autograd.UnitVarianceMLPG`,
        :func:`nnmnkwii.paramgen.unit_variance_mlpg_matrix`,
        :func:`reshape_means`.
    """
    return UnitVarianceMLPG(R)(means)
lstm.py 文件源码 项目:harmonizer 作者: meagtan 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def getnotes(self, voice = None):
        # global mat
        if voice is None:
            return [self.getnotes(v) for v in xrange(len(self.s.parts))]
        if self.notes[voice] is None:
            endtimes = self.s.flat.notesAndRests.stream()._uniqueOffsetsAndEndTimes(endTimesOnly=True)
            self.notes[voice] = [None] * len(endtimes)
            notes = list(self.s.parts[voice].flat.notesAndRests)
            j = 0 # index of current note
            curr = 0.0
            for i in xrange(len(endtimes)):
                self.notes[voice][i] = map(lambda k: (isinstance(notes[j], note.Note) and \
                                           k == pitchtoid(notes[j].pitch, self.key)) * \
                                           (endtimes[i] - curr), range(Din))
                # if current note ends here, go to next note
                if endtimes[i] == notes[j].offset + notes[j].quarterLength:
                    j += 1
                curr = endtimes[i]
            self.notes[voice] = torch.FloatTensor(self.notes[voice])
            n = self.notes[voice].clone().apply_(lambda n: int(n != 0))
            # mat += n[:-1].t().mm(n[1:])
        return self.notes[voice]
lstm.py 文件源码 项目:harmonizer 作者: meagtan 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def getchords(self):
        if self.chords is None:
            self.cs = self.s.chordify()
            self.chords = []
            for c in self.cs.flat.notesAndRests:
                self.chords.append(
                    map(lambda k: (isinstance(c, chord.Chord) and \
                        k == chordtoid(c, self.key)) * float(c.quarterLength), range(Dout)))
            self.chords = torch.FloatTensor(self.chords)
        return self.chords

# for c in sc.cs.flat.notesAndRests:
#     if isinstance(c, note.Note):
#         c = chord.Chord(c)
#     sc.notes.append(map(lambda n: (isinstance(c, chord.Chord) and \
#                         (n + lstm.pitchtoid(sc.key.tonic)) % 12 in c.normalOrder) * float(c.quarterLength), range(12)))
transforms.py 文件源码 项目:audio 作者: pytorch 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __call__(self, x_mu):
        """

        Args:
            x_mu (FloatTensor/LongTensor or ndarray)

        Returns:
            x (FloatTensor or ndarray)

        """
        mu = self.qc - 1.
        if isinstance(x_mu, np.ndarray):
            x = ((x_mu) / mu) * 2 - 1.
            x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
        elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)):
            if isinstance(x_mu, torch.LongTensor):
                x_mu = x_mu.float()
            mu = torch.FloatTensor([mu])
            x = ((x_mu) / mu) * 2 - 1.
            x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu
        return x
test_threadutils.py 文件源码 项目:ParlAI 作者: facebookresearch 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_torch(self):
        try:
            import torch
        except ImportError:
            # pass by default if no torch available
            return

        st = SharedTable({'a': torch.FloatTensor([1]), 'b': torch.LongTensor(2)})
        assert st['a'][0] == 1.0
        assert len(st) == 2
        assert 'b' in st
        del st['b']
        assert 'b' not in st
        assert len(st) == 1

        if torch.cuda.is_available():
            st = SharedTable({'a': torch.cuda.FloatTensor([1]), 'b': torch.cuda.LongTensor(2)})
            assert st['a'][0] == 1.0
            assert len(st) == 2
            assert 'b' in st
            del st['b']
            assert 'b' not in st
            assert len(st) == 1


问题


面经


文章

微信
公众号

扫码关注公众号