python类typename()的实例源码

_tensor_str.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
conv.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def forward(self, input, weight, bias=None):
        self._backend = type2backend[type(input)]
        # TODO: free buffers when not needed
        self.buffer1 = input.new()
        self.buffer2 = input.new()
        output = input.new()
        self.with_bias = bias is not None
        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._backend.VolumetricConvolution_updateOutput(
                self._backend.library_state, input, output, weight, bias,
                self.buffer1, self.buffer2, *self.additional_args[3:])
        else:
            self._backend.VolumetricConvolutionMM_updateOutput(
                self._backend.library_state, input, output, weight,
                bias, self.buffer1, *self.additional_args)
        if self.with_bias:
            self.save_for_backward(input, weight, bias)
        else:
            self.save_for_backward(input, weight)
        return output
conv.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _compute_grad_weight(self, grad_output):
        input, weight, bias = self._get_saved_tensors()
        # TODO: no zero needed in the future
        grad_weight = weight.new().resize_as_(weight).zero_()
        grad_bias = bias.new().resize_as_(bias).zero_()
        if torch.typename(input) == 'torch.cuda.FloatTensor':
            args = self.additional_args[3:] + (1,)
            self._backend.VolumetricConvolution_accGradParameters(
                self._backend.library_state, input, grad_output, grad_weight,
                grad_bias, self.buffer1, self.buffer2,
                *args)
        else:
            self._backend.VolumetricConvolutionMM_accGradParameters(
                self._backend.library_state, input, grad_output, grad_weight,
                grad_bias, self.buffer1, 1)
        return grad_weight, grad_bias
ConcatTable.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   +. -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
           if i == len(self.modules)-1:
              res = res + line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + extlast)
           else:
              res = res + line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)


        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
CMul.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def updateOutput(self, input):
        # lazy-initialize
        if self._output is None:
            self._output = input.new()
            self._weight = input.new()
            self._expand = input.new()
            self._repeat = input.new()

        self.output.resize_as_(input).copy_(input)
        batchSize = input.size(0)
        # TODO: expand_as_, view_
        self._output = self.output.view(batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._output)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._output.mul_(self._repeat)
        else:
            self._output.mul_(self._expand)

        return self.output
Parallel.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules)-1:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res
test_legacy_nn.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_Copy(self):
        input = torch.randn(3,4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
_tensor_str.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
ConcatTable.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   +. -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
CMul.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def updateOutput(self, input):
        # lazy-initialize
        if self._output is None:
            self._output = input.new()
            self._weight = input.new()
            self._expand = input.new()
            self._repeat = input.new()

        self.output.resize_as_(input).copy_(input)
        batchSize = input.size(0)
        # TODO: expand_as_, view_
        self._output = self.output.view(batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._output)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._output.mul_(self._repeat)
        else:
            self._output.mul_(self._expand)

        return self.output
CMul.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def updateGradInput(self, input, gradOutput):
        if self.gradInput is None:
            return

        if self._gradOutput is None:
            self._gradOutput = input.new()
            self._gradInput = input.new()

        self.gradInput.resize_as_(input).zero_()
        batchSize = input.size(0)
        contiguousView(self._gradOutput, gradOutput, batchSize, -1)
        contiguousView(self._gradInput, self.gradInput, batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._gradOutput)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
        else:
            self._gradInput.addcmul_(1, self._expand, self._gradOutput)

        return self.gradInput
Parallel.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res += line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res
ParallelTable.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
test_legacy_nn.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_Copy(self):
        input = torch.randn(3, 4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
_tensor_str.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
ConcatTable.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   +. -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
CMul.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def updateOutput(self, input):
        # lazy-initialize
        if self._output is None:
            self._output = input.new()
            self._weight = input.new()
            self._expand = input.new()
            self._repeat = input.new()

        self.output.resize_as_(input).copy_(input)
        batchSize = input.size(0)
        # TODO: expand_as_, view_
        self._output = self.output.view(batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._output)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._output.mul_(self._repeat)
        else:
            self._output.mul_(self._expand)

        return self.output
CMul.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def updateGradInput(self, input, gradOutput):
        if self.gradInput is None:
            return

        if self._gradOutput is None:
            self._gradOutput = input.new()
            self._gradInput = input.new()

        self.gradInput.resize_as_(input).zero_()
        batchSize = input.size(0)
        contiguousView(self._gradOutput, gradOutput, batchSize, -1)
        contiguousView(self._gradInput, self.gradInput, batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._gradOutput)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
        else:
            self._gradInput.addcmul_(1, self._expand, self._gradOutput)

        return self.gradInput
ParallelTable.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
test_nn.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def default_tensor_type(type):
    type_str = torch.typename(type)

    def decorator(fn):
        @wraps(fn)
        def wrapper(*args, **kwargs):
            old_type = torch.typename(torch.Tensor())
            torch.set_default_tensor_type(type_str)
            try:
                return fn(*args, **kwargs)
            finally:
                torch.set_default_tensor_type(old_type)

        return wrapper

    return decorator
test_legacy_nn.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_Copy(self):
        input = torch.randn(3, 4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
_tensor_str.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt
optimizer.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def __init__(self, params, defaults):
        self.defaults = defaults

        if isinstance(params, Variable) or torch.is_tensor(params):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Variables or dicts, but got " +
                            torch.typename(params))

        self.state = defaultdict(dict)
        self.param_groups = []

        param_groups = list(params)
        if len(param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(param_groups[0], dict):
            param_groups = [{'params': param_groups}]

        for param_group in param_groups:
            self.add_param_group(param_group)
CMul.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def updateOutput(self, input):
        # lazy-initialize
        if self._output is None:
            self._output = input.new()
            self._weight = input.new()
            self._expand = input.new()
            self._repeat = input.new()

        self.output.resize_as_(input).copy_(input)
        batchSize = input.size(0)
        # TODO: expand_as_, view_
        self._output = self.output.view(batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._output)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._output.mul_(self._repeat)
        else:
            self._output.mul_(self._expand)

        return self.output
CMul.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def updateGradInput(self, input, gradOutput):
        if self.gradInput is None:
            return

        if self._gradOutput is None:
            self._gradOutput = input.new()
            self._gradInput = input.new()

        self.gradInput.resize_as_(input).zero_()
        batchSize = input.size(0)
        contiguousView(self._gradOutput, gradOutput, batchSize, -1)
        contiguousView(self._gradInput, self.gradInput, batchSize, -1)
        self._weight = self.weight.view(1, -1)
        self._expand = self._weight.expand_as(self._gradOutput)

        if torch.typename(input) == 'torch.cuda.FloatTensor':
            self._repeat.resize_as_(self._expand).copy_(self._expand)
            self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
        else:
            self._gradInput.addcmul_(1, self._expand, self._gradOutput)

        return self.gradInput
Parallel.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res += line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res
ParallelTable.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __repr__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   ... -> '
        res = torch.typename(self)
        res = res + ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res = res + line + tab + next + '(' + str(i) + '): ' + \
                    str(self.modules[i]).replace(line, line + tab + ext)

        res = res + line + tab + last + 'output'
        res = res + line + '}'
        return res
test_nn.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def default_tensor_type(type):
    type_str = torch.typename(type)

    def decorator(fn):
        @wraps(fn)
        def wrapper(*args, **kwargs):
            old_type = torch.typename(torch.Tensor())
            torch.set_default_tensor_type(type_str)
            try:
                return fn(*args, **kwargs)
            finally:
                torch.set_default_tensor_type(old_type)

        return wrapper

    return decorator
test_legacy_nn.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_Copy(self):
        input = torch.randn(3, 4).double()
        c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
        output = c.forward(input)
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')
        self.assertEqual(output, input.float(), 1e-6)
        gradInput = c.backward(input, output.fill_(1))
        self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
        self.assertEqual(gradInput, output.double(), 1e-6)
        c.dontCast = True
        c.double()
        self.assertEqual(torch.typename(output), 'torch.FloatTensor')

        # Check that these don't raise errors
        c.__repr__()
        str(c)
_tensor_str.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _str(self):
    if self.ndimension() == 0:
        return '[{} with no dimension]\n'.format(torch.typename(self))
    elif self.ndimension() == 1:
        strt = _vector_str(self)
    elif self.ndimension() == 2:
        strt = _matrix_str(self)
    else:
        strt = _tensor_str(self)

    size_str = 'x'.join(str(size) for size in self.size())
    device_str = '' if not self.is_cuda else \
        ' (GPU {})'.format(self.get_device())
    strt += '[{} of size {}{}]\n'.format(torch.typename(self),
                                         size_str, device_str)
    return '\n' + strt


问题


面经


文章

微信
公众号

扫码关注公众号