python类sigmoid()的实例源码

activation.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def forward(self, input):
        return torch.sigmoid(input)
functional.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 57 收藏 0 点赞 0 评论 0
def sigmoid(input):
    return _autograd_functions.Sigmoid.apply(input)


# etc.
functional.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def binary_cross_entropy(input, target, weight=None, size_average=True):
    r"""Function that measures the Binary Cross Entropy
    between the target and the output.

    See :class:`~torch.nn.BCELoss` for details.

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: True

    Examples::

        >>> input = autograd.Variable(torch.randn(3), requires_grad=True)
        >>> target = autograd.Variable(torch.LongTensor(3).random_(2))
        >>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
        >>> loss.backward()
    """
    if not (target.size() == input.size()):
        warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
                      "Please ensure they have the same size.".format(target.size(), input.size()))
    if input.nelement() != target.nelement():
        raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
                         "!= input nelement ({})".format(target.nelement(), input.nelement()))

    if weight is not None:
        new_size = _infer_size(target.size(), weight.size())
        weight = weight.expand(new_size)

    return _functions.thnn.BCELoss.apply(input, target, weight, size_average)
functional.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def multilabel_soft_margin_loss(input, target, weight=None, size_average=True):
    input = torch.sigmoid(input)
    return binary_cross_entropy(input, target, weight, size_average)
test_jit.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_simple(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        def f(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        trace, z = torch.jit.trace(f, (x, y), nderivs=0)

        torch._C._jit_pass_lint(trace)
        torch._C._jit_pass_onnx(trace)
        torch._C._jit_pass_lint(trace)

        self.assertExpected(str(trace))
test_jit.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def test_verify(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        @torch.jit.compile(verify=True, optimize=False)
        def doit(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        z = traced(x, y)
        z2 = traced(x, y)
        self.assertEqual(z, torch.sigmoid(torch.tanh(x * (x + y))))
        self.assertEqual(z, z2)
test_jit.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_traced_function(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        @torch.jit.compile
        def doit(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        z = doit(x, y)
        z2 = doit(x, y)
        self.assertEqual(z, torch.sigmoid(torch.tanh(x * (x + y))))
        self.assertEqual(z, z2)
test_jit.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_disabled_traced_function(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        @torch.jit.compile(enabled=False)
        def doit(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        z = doit(x, y)
        z2 = doit(x, y)
        self.assertEqual(z, torch.sigmoid(torch.tanh(x * (x + y))))
        self.assertEqual(z, z2)
test_jit.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_autograd_closure(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        trace = torch._C._tracer_enter((x, y), 1)

        z = torch.sigmoid(x * (x + y))
        w = torch.abs(x * x * x + y) + Variable(torch.ones(1))

        torch._C._tracer_exit((z, w))
        torch._C._jit_pass_lint(trace)

        (z * w).backward()
        torch._C._jit_pass_dce(trace)
        torch._C._jit_pass_lint(trace)

        x_grad = x.grad.data.clone()
        x.grad.data.zero_()

        function = torch._C._jit_createAutogradClosure(trace)
        torch._C._jit_pass_lint(trace)
        z2, w2 = function()(x, y)
        (z2 * w2).backward()
        self.assertEqual(z, z2)
        self.assertEqual(w, w2)
        self.assertEqual(x.grad.data, x_grad)
test_jit.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_python_ir(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        def doit(x, y):
            return torch.sigmoid(torch.tanh(x * (x + y)))

        traced, _ = torch.jit.trace(doit, (x, y))
        g = torch._C._jit_get_graph(traced)
        g2 = torch._C.Graph()
        g_to_g2 = {}
        for node in g.inputs():
            g_to_g2[node] = g2.addInput()
        for node in g.nodes():
            if node.kind() == "PythonOp":
                n_ = g2.create(node.pyname(),
                               [g_to_g2[i] for i in node.inputs()]) \
                    .setType(node.typeOption()) \
                    .s_("note", "from_pyop") \
                    .i_("some_value", len(node.scalar_args()))
                assert(n_.i("some_value") == len(node.scalar_args()))
            else:
                n_ = g2.createClone(node, lambda x: g_to_g2[x])
                assert(n_.kindOf("Offset") == "i")

            g_to_g2[node] = g2.appendNode(n_)

        for node in g.outputs():
            g2.registerOutput(g_to_g2[node])

        t_node = g2.create("TensorTest").t_("a", torch.ones([2, 2]))
        assert(t_node.attributeNames() == ["a"])
        g2.appendNode(t_node)
        assert(torch.equal(torch.ones([2, 2]), t_node.t("a")))
        self.assertExpected(str(g2))
test_onnx.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_params(self):
        x = Variable(torch.Tensor([[1, 2], [3, 4]]), requires_grad=True)
        y = Variable(torch.Tensor([[1, 2], [3, 4]]), requires_grad=True)
        trace, _ = torch.jit.trace(lambda x, y: -torch.sigmoid(torch.tanh(x * (x + y))), (x, y))
        initializers = [x.data]
        torch._C._jit_pass_onnx(trace)
        self.assertONNXExpected(trace.export(initializers))
basic.py 文件源码 项目:PyTorch-Encoding 作者: zhanghang1989 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def forward(self, input):
        if isinstance(input, Variable):
            return torch.sigmoid(input)
        elif isinstance(input, tuple) or isinstance(input, list):
            return my_data_parallel(self, input)
        else:
            raise RuntimeError('unknown input type')
decoder.py 文件源码 项目:Semi-supervised_Neural_Network 作者: jibancanyang 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def g(self, tilde_z_l, u_l):
        if self.use_cuda:
            ones = Parameter(torch.ones(tilde_z_l.size()[0], 1).cuda())
        else:
            ones = Parameter(torch.ones(tilde_z_l.size()[0], 1))

        b_a1 = ones.mm(self.a1)
        b_a2 = ones.mm(self.a2)
        b_a3 = ones.mm(self.a3)
        b_a4 = ones.mm(self.a4)
        b_a5 = ones.mm(self.a5)

        b_a6 = ones.mm(self.a6)
        b_a7 = ones.mm(self.a7)
        b_a8 = ones.mm(self.a8)
        b_a9 = ones.mm(self.a9)
        b_a10 = ones.mm(self.a10)

        mu_l = torch.mul(b_a1, torch.sigmoid(torch.mul(b_a2, u_l) + b_a3)) + \
               torch.mul(b_a4, u_l) + \
               b_a5

        v_l = torch.mul(b_a6, torch.sigmoid(torch.mul(b_a7, u_l) + b_a8)) + \
              torch.mul(b_a9, u_l) + \
              b_a10

        hat_z_l = torch.mul(tilde_z_l - mu_l, v_l) + mu_l

        return hat_z_l
utils.py 文件源码 项目:pytorch-yolo2 作者: marvis 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def sigmoid(x):
    return 1.0/(math.exp(-x)+1.)
custom.py 文件源码 项目:seqmod 作者: emanjavacas 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _step(self, H_t, T_t, h0, h_mask, t_mask):
        s_lm1 = h0
        for l, (rnn_h, rnn_t) in enumerate(zip(self.rnn_h, self.rnn_t)):
            s_lm1_H = h_mask.expand_as(s_lm1) * s_lm1
            s_lm1_T = t_mask.expand_as(s_lm1) * s_lm1
            if l == 0:
                H_t = F.tanh(H_t + rnn_h(s_lm1_H))
                T_t = F.sigmoid(T_t + rnn_t(s_lm1_T))
            else:
                H_t = F.tanh(rnn_h(s_lm1_H))
                T_t = F.sigmoid(rnn_t(s_lm1_T))
            s_l = (H_t - s_lm1) * T_t + s_lm1
            s_lm1 = s_l

        return s_l
activation.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def forward(self, input):
        return torch.sigmoid(input)
functional.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def sigmoid(input):
    r"""sigmoid(input) -> Variable

    Applies the element-wise function :math:`f(x) = 1 / ( 1 + exp(-x))`

    See :class:`~torch.nn.Sigmoid` for more details.
    """
    return input.sigmoid()


# etc.
functional.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def binary_cross_entropy(input, target, weight=None, size_average=True, reduce=True):
    r"""Function that measures the Binary Cross Entropy
    between the target and the output.

    See :class:`~torch.nn.BCELoss` for details.

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: ``True``
        reduce (bool, optional): By default, the losses are averaged or summed over
                observations for each minibatch depending on size_average. When reduce
                is False, returns a loss per batch element instead and ignores
                size_average. Default: True

    Examples::

        >>> input = autograd.Variable(torch.randn(3), requires_grad=True)
        >>> target = autograd.Variable(torch.LongTensor(3).random_(2))
        >>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
        >>> loss.backward()
    """
    if not (target.size() == input.size()):
        warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
                      "Please ensure they have the same size.".format(target.size(), input.size()))
    if input.nelement() != target.nelement():
        raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
                         "!= input nelement ({})".format(target.nelement(), input.nelement()))

    if weight is not None:
        new_size = _infer_size(target.size(), weight.size())
        weight = weight.expand(new_size)
        if torch.is_tensor(weight):
            weight = Variable(weight)

    return torch._C._nn.binary_cross_entropy(input, target, weight, size_average, reduce)
functional.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def multilabel_soft_margin_loss(input, target, weight=None, size_average=True):
    """multilabel_soft_margin_loss(input, target, weight=None, size_average=True) -> Variable

    See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details.
    """
    input = torch.sigmoid(input)
    return binary_cross_entropy(input, target, weight, size_average)
test_jit.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
    hx, cx = hidden
    gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)

    ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
    ingate = F.sigmoid(ingate)
    forgetgate = F.sigmoid(forgetgate)
    cellgate = F.tanh(cellgate)
    outgate = F.sigmoid(outgate)

    cy = (forgetgate * cx) + (ingate * cellgate)
    hy = outgate * F.tanh(cy)
    return hy, cy


问题


面经


文章

微信
公众号

扫码关注公众号