python类min()的实例源码

test_torch.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_histc(self):
        x = torch.Tensor((2, 4, 2, 2, 5, 4))
        y = torch.histc(x, 5, 1, 5)  # nbins,  min,  max
        z = torch.Tensor((0, 3, 0, 2, 1))
        self.assertEqual(y, z)
test_torch.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_random(self):
        # This test is flaky with p<=(2/(ub-lb))^200=6e-36
        t = torch.FloatTensor(200)
        lb = 1
        ub = 4

        t.fill_(-1)
        t.random_(lb, ub)
        self.assertEqual(t.min(), lb)
        self.assertEqual(t.max(), ub - 1)

        t.fill_(-1)
        t.random_(ub)
        self.assertEqual(t.min(), 0)
        self.assertEqual(t.max(), ub - 1)
Min.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def updateOutput(self, input):
        self._lazyInit()
        dimension = self._getPositiveDimension(input)
        torch.min(input, dimension, out=(self._output, self._indices), keepdim=True)
        if input.dim() > 1:
            self.output.set_(self._output.select(dimension, 0))
        else:
            self.output.set_(self._output)

        return self.output
Min.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 52 收藏 0 点赞 0 评论 0
def type(self, type, tensorCache=None):
        # torch.min expects a LongTensor as indices, whereas cutorch.max expects a CudaTensor.
        if type == 'torch.cuda.FloatTensor':
            indices, self._indices = self._indices, None
            super(Min, self).type(type, tensorCache)
            self._indices = indices.type('torch.cuda.LongTensor') if indices is not None else None
        else:
            # self._indices must be a LongTensor. Setting it to nil temporarily avoids
            # unnecessary memory allocations.
            indices, self._indices = self._indices, None
            super(Min, self).type(type, tensorCache)
            self._indices = indices.long() if indices is not None else None

        return self
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_dim_reduction(self):
        dim_red_fns = [
            "mean", "median", "mode", "norm", "prod",
            "std", "sum", "var", "max", "min"]

        def normfn_attr(t, dim, keepdim=True):
            attr = getattr(torch, "norm")
            return attr(t, 2, dim, keepdim)

        for fn_name in dim_red_fns:
            x = torch.randn(3, 4, 5)
            fn_attr = getattr(torch, fn_name) if fn_name != "norm" else normfn_attr

            def fn(t, dim, keepdim=True):
                ans = fn_attr(x, dim, keepdim)
                return ans if not isinstance(ans, tuple) else ans[0]

            dim = random.randint(0, 2)
            self.assertEqual(fn(x, dim, False).unsqueeze(dim), fn(x, dim))
            self.assertEqual(x.ndimension() - 1, fn(x, dim, False).ndimension())
            self.assertEqual(x.ndimension(), fn(x, dim, True).ndimension())

            # check 1-d behavior
            x = torch.randn(1)
            dim = 0
            self.assertEqual(fn(x, dim), fn(x, dim, True))
            self.assertEqual(x.ndimension(), fn(x, dim).ndimension())
            self.assertEqual(x.ndimension(), fn(x, dim, True).ndimension())
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_min_elementwise(self):
        self._testCSelection(torch.min, min)
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_clamp(self):
        m1 = torch.rand(100).mul(5).add(-2.5)  # uniform in [-2.5, 2.5]
        # just in case we're extremely lucky.
        min_val = -1
        max_val = 1
        m1[1] = min_val
        m1[2] = max_val

        res1 = m1.clone()
        res1.clamp_(min_val, max_val)
        res2 = m1.clone()
        for i in iter_indices(res2):
            res2[i] = max(min_val, min(max_val, res2[i]))
        self.assertEqual(res1, res2)

        res1 = torch.clamp(m1, min=min_val)
        res2 = m1.clone()
        for i in iter_indices(res2):
            res2[i] = max(min_val, res2[i])
        self.assertEqual(res1, res2)

        res1 = torch.clamp(m1, max=max_val)
        res2 = m1.clone()
        for i in iter_indices(res2):
            res2[i] = min(max_val, res2[i])
        self.assertEqual(res1, res2)
test_torch.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_histc(self):
        x = torch.Tensor((2, 4, 2, 2, 5, 4))
        y = torch.histc(x, 5, 1, 5)  # nbins,  min,  max
        z = torch.Tensor((0, 3, 0, 2, 1))
        self.assertEqual(y, z)
main.py 文件源码 项目:pytorch_divcolor 作者: aditya12agd5 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def mdn_loss(gmm_params, mu, stddev, batchsize):
  gmm_mu, gmm_pi = get_gmm_coeffs(gmm_params)
  eps = Variable(torch.randn(stddev.size()).normal_()).cuda() 
  z = torch.add(mu, torch.mul(eps, stddev))
  z_flat = z.repeat(1, args.nmix)
  z_flat = z_flat.view(batchsize*args.nmix, args.hiddensize)
  gmm_mu_flat = gmm_mu.view(batchsize*args.nmix, args.hiddensize)
  dist_all = torch.sqrt(torch.sum(torch.add(z_flat, gmm_mu_flat.mul(-1)).pow(2).mul(50), 1))
  dist_all = dist_all.view(batchsize, args.nmix)
  dist_min, selectids = torch.min(dist_all, 1)
  gmm_pi_min = torch.gather(gmm_pi, 1, selectids.view(-1, 1))
  gmm_loss = torch.mean(torch.add(-1*torch.log(gmm_pi_min+1e-30), dist_min))
  gmm_loss_l2 = torch.mean(dist_min)
  return gmm_loss, gmm_loss_l2
functional.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def binary_cross_entropy_with_logits(input, target, weight=None, size_average=True):
    r"""Function that measures Binary Cross Entropy between target and output
    logits.

    See :class:`~torch.nn.BCEWithLogitsLoss` for details.

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: True

    Examples::

         >>> input = autograd.Variable(torch.randn(3), requires_grad=True)
         >>> target = autograd.Variable(torch.FloatTensor(3).random_(2))
         >>> loss = F.binary_cross_entropy_with_logits(input, target)
         >>> loss.backward()
    """
    if not (target.size() == input.size()):
        raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))

    max_val = (-input).clamp(min=0)
    loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()

    if weight is not None:
        loss = loss * weight

    if size_average:
        return loss.mean()
    else:
        return loss.sum()
functional.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 77 收藏 0 点赞 0 评论 0
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
    r"""Returns cosine similarity between x1 and x2, computed along dim.

    .. math ::
        \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}

    Args:
        x1 (Variable): First input.
        x2 (Variable): Second input (of size matching x1).
        dim (int, optional): Dimension of vectors. Default: 1
        eps (float, optional): Small value to avoid division by zero.
            Default: 1e-8

    Shape:
        - Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
        - Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.

    Example::

        >>> input1 = autograd.Variable(torch.randn(100, 128))
        >>> input2 = autograd.Variable(torch.randn(100, 128))
        >>> output = F.cosine_similarity(input1, input2)
        >>> print(output)
    """
    w12 = torch.sum(x1 * x2, dim)
    w1 = torch.norm(x1, 2, dim)
    w2 = torch.norm(x2, 2, dim)
    return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
Min.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def updateOutput(self, input):
        self._lazyInit()
        dimension = self._getPositiveDimension(input)
        torch.min(input, dimension, out=(self._output, self._indices), keepdim=True)
        if input.dim() > 1:
            self.output.set_(self._output.select(dimension, 0))
        else:
            self.output.set_(self._output)

        return self.output
test_autograd.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_basic_op_grad_fallback(self):
        """Grad output might need to be reshaped to match the second argument."""
        x = Variable(torch.randn(4, 6), requires_grad=True)
        b = Variable(torch.rand(12, 1) + 1e-2, requires_grad=True)
        c = Variable(torch.rand(8, 1) + 1e-2, requires_grad=True)

        def y():
            # .mm() depends on the grad_output being of correct size
            return b.mm(Variable(torch.rand(1, 2) + 1e-2))

        def z():
            return c.mm(Variable(torch.rand(1, 3) + 1e-2))

        # suppress broadcastable warning
        with warnings.catch_warnings(record=True):
            (x + y()).sum().backward()
            (x - y()).sum().backward()
            (x * y()).sum().backward()
            (x / y()).sum().backward()
            (x.dist(y())).sum().backward()
            (x.lerp(y(), 0.5)).sum().backward()
            (x.max(y())).sum().backward()
            (x.min(y())).sum().backward()
            (x.masked_fill(y() < 0, 0.5)).sum().backward()
            (x.masked_scatter(Variable(y().data < 0.25), z())).sum().backward()
            (x.masked_select(Variable(y().data < 0.25))).sum().backward()
            (x.addcmul(1, y(), z())).sum().backward()
            (x.addcdiv(1, y(), z())).sum().backward()
            (x.abs() ** y()).sum().backward()
test_torch.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_min(self):
        self._testSelection(torch.min, min)
test_torch.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def _test_dim_reduction(self, cast):
        dim_red_fns = [
            "mean", "median", "mode", "norm", "prod",
            "std", "sum", "var", "max", "min"]

        def normfn_attr(t, dim, keepdim=False):
            attr = getattr(torch, "norm")
            return attr(t, 2, dim, keepdim)

        for fn_name in dim_red_fns:
            fn_attr = getattr(torch, fn_name) if fn_name != "norm" else normfn_attr

            def fn(x, dim, keepdim=False):
                ans = fn_attr(x, dim, keepdim=keepdim)
                return ans if not isinstance(ans, tuple) else ans[0]

            def test_multidim(x, dim):
                self.assertEqual(fn(x, dim).unsqueeze(dim), fn(x, dim, keepdim=True))
                self.assertEqual(x.ndimension() - 1, fn(x, dim).ndimension())
                self.assertEqual(x.ndimension(), fn(x, dim, keepdim=True).ndimension())

            # general case
            x = cast(torch.randn(3, 4, 5))
            dim = random.randint(0, 2)
            test_multidim(x, dim)

            # check 1-d behavior
            x = cast(torch.randn(1))
            dim = 0
            self.assertEqual(fn(x, dim), fn(x, dim, keepdim=True))
            self.assertEqual(x.ndimension(), fn(x, dim).ndimension())
            self.assertEqual(x.ndimension(), fn(x, dim, keepdim=True).ndimension())

            # check reducing of a singleton dimension
            dims = [3, 4, 5]
            singleton_dim = random.randint(0, 2)
            dims[singleton_dim] = 1
            x = cast(torch.randn(dims))
            test_multidim(x, singleton_dim)
test_torch.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_min_elementwise(self):
        self._testCSelection(torch.min, min)
test_torch.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_histc(self):
        x = torch.Tensor((2, 4, 2, 2, 5, 4))
        y = torch.histc(x, 5, 1, 5)  # nbins,  min,  max
        z = torch.Tensor((0, 3, 0, 2, 1))
        self.assertEqual(y, z)
test_torch.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_random(self):
        # This test is flaky with p<=(2/(ub-lb))^200=6e-36
        t = torch.FloatTensor(200)
        lb = 1
        ub = 4

        t.fill_(-1)
        t.random_(lb, ub)
        self.assertEqual(t.min(), lb)
        self.assertEqual(t.max(), ub - 1)

        t.fill_(-1)
        t.random_(ub)
        self.assertEqual(t.min(), 0)
        self.assertEqual(t.max(), ub - 1)
lanczos_quadrature.py 文件源码 项目:gpytorch 作者: jrg365 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def binary_search_symeig(self, T):
        left = 0
        right = len(T)
        while right - left > 1:
            mid = (left + right) // 2
            eigs = T[:mid, :mid].symeig()[0]
            if torch.min(eigs) < -1e-4:
                right = mid - 1
            else:
                left = mid

        return left
single.py 文件源码 项目:qpth 作者: locuslab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_step(v, dv):
    I = dv < 1e-12
    if torch.sum(I) > 0:  # TODO: Use something like torch.any(dv < 0)
        a = -v / dv
        return torch.min(a[I])
    else:
        return 1


问题


面经


文章

微信
公众号

扫码关注公众号