def _str(self):
if self.ndimension() == 0:
return '[{} with no dimension]\n'.format(torch.typename(self))
elif self.ndimension() == 1:
strt = _vector_str(self)
elif self.ndimension() == 2:
strt = _matrix_str(self)
else:
strt = _tensor_str(self)
size_str = 'x'.join(str(size) for size in self.size())
device_str = '' if not self.is_cuda else \
' (GPU {})'.format(self.get_device())
strt += '[{} of size {}{}]\n'.format(torch.typename(self),
size_str, device_str)
return '\n' + strt
python类typename()的实例源码
def forward(self, input, weight, bias=None):
self._backend = type2backend[type(input)]
# TODO: free buffers when not needed
self.buffer1 = input.new()
self.buffer2 = input.new()
output = input.new()
self.with_bias = bias is not None
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._backend.VolumetricConvolution_updateOutput(
self._backend.library_state, input, output, weight, bias,
self.buffer1, self.buffer2, *self.additional_args[3:])
else:
self._backend.VolumetricConvolutionMM_updateOutput(
self._backend.library_state, input, output, weight,
bias, self.buffer1, *self.additional_args)
if self.with_bias:
self.save_for_backward(input, weight, bias)
else:
self.save_for_backward(input, weight)
return output
def _compute_grad_weight(self, grad_output):
input, weight, bias = self._get_saved_tensors()
# TODO: no zero needed in the future
grad_weight = weight.new().resize_as_(weight).zero_()
grad_bias = bias.new().resize_as_(bias).zero_()
if torch.typename(input) == 'torch.cuda.FloatTensor':
args = self.additional_args[3:] + (1,)
self._backend.VolumetricConvolution_accGradParameters(
self._backend.library_state, input, grad_output, grad_weight,
grad_bias, self.buffer1, self.buffer2,
*args)
else:
self._backend.VolumetricConvolutionMM_accGradParameters(
self._backend.library_state, input, grad_output, grad_weight,
grad_bias, self.buffer1, 1)
return grad_weight, grad_bias
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' +. -> '
res = torch.typename(self)
res = res + ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules)-1:
res = res + line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + extlast)
else:
res = res + line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)
res = res + line + tab + last + 'output'
res = res + line + '}'
return res
def updateOutput(self, input):
# lazy-initialize
if self._output is None:
self._output = input.new()
self._weight = input.new()
self._expand = input.new()
self._repeat = input.new()
self.output.resize_as_(input).copy_(input)
batchSize = input.size(0)
# TODO: expand_as_, view_
self._output = self.output.view(batchSize, -1)
self._weight = self.weight.view(1, -1)
self._expand = self._weight.expand_as(self._output)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._output.mul_(self._repeat)
else:
self._output.mul_(self._expand)
return self.output
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' ... -> '
res = torch.typename(self)
res += ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules)-1:
res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + extlast)
else:
res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)
res += line + tab + last + 'output'
res += line + '}'
return res
def test_Copy(self):
input = torch.randn(3,4).double()
c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
output = c.forward(input)
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
self.assertEqual(output, input.float(), 1e-6)
gradInput = c.backward(input, output.fill_(1))
self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
self.assertEqual(gradInput, output.double(), 1e-6)
c.dontCast = True
c.double()
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
# Check that these don't raise errors
c.__repr__()
str(c)
def _str(self):
if self.ndimension() == 0:
return '[{} with no dimension]\n'.format(torch.typename(self))
elif self.ndimension() == 1:
strt = _vector_str(self)
elif self.ndimension() == 2:
strt = _matrix_str(self)
else:
strt = _tensor_str(self)
size_str = 'x'.join(str(size) for size in self.size())
device_str = '' if not self.is_cuda else \
' (GPU {})'.format(self.get_device())
strt += '[{} of size {}{}]\n'.format(torch.typename(self),
size_str, device_str)
return '\n' + strt
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' +. -> '
res = torch.typename(self)
res = res + ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + ext)
res = res + line + tab + last + 'output'
res = res + line + '}'
return res
def updateOutput(self, input):
# lazy-initialize
if self._output is None:
self._output = input.new()
self._weight = input.new()
self._expand = input.new()
self._repeat = input.new()
self.output.resize_as_(input).copy_(input)
batchSize = input.size(0)
# TODO: expand_as_, view_
self._output = self.output.view(batchSize, -1)
self._weight = self.weight.view(1, -1)
self._expand = self._weight.expand_as(self._output)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._output.mul_(self._repeat)
else:
self._output.mul_(self._expand)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
if self._gradOutput is None:
self._gradOutput = input.new()
self._gradInput = input.new()
self.gradInput.resize_as_(input).zero_()
batchSize = input.size(0)
contiguousView(self._gradOutput, gradOutput, batchSize, -1)
contiguousView(self._gradInput, self.gradInput, batchSize, -1)
self._weight = self.weight.view(1, -1)
self._expand = self._weight.expand_as(self._gradOutput)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
else:
self._gradInput.addcmul_(1, self._expand, self._gradOutput)
return self.gradInput
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' ... -> '
res = torch.typename(self)
res += ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res += line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)
res += line + tab + last + 'output'
res += line + '}'
return res
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' ... -> '
res = torch.typename(self)
res = res + ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + ext)
res = res + line + tab + last + 'output'
res = res + line + '}'
return res
def test_Copy(self):
input = torch.randn(3, 4).double()
c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
output = c.forward(input)
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
self.assertEqual(output, input.float(), 1e-6)
gradInput = c.backward(input, output.fill_(1))
self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
self.assertEqual(gradInput, output.double(), 1e-6)
c.dontCast = True
c.double()
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
# Check that these don't raise errors
c.__repr__()
str(c)
def _str(self):
if self.ndimension() == 0:
return '[{} with no dimension]\n'.format(torch.typename(self))
elif self.ndimension() == 1:
strt = _vector_str(self)
elif self.ndimension() == 2:
strt = _matrix_str(self)
else:
strt = _tensor_str(self)
size_str = 'x'.join(str(size) for size in self.size())
device_str = '' if not self.is_cuda else \
' (GPU {})'.format(self.get_device())
strt += '[{} of size {}{}]\n'.format(torch.typename(self),
size_str, device_str)
return '\n' + strt
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' +. -> '
res = torch.typename(self)
res = res + ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + ext)
res = res + line + tab + last + 'output'
res = res + line + '}'
return res
def updateOutput(self, input):
# lazy-initialize
if self._output is None:
self._output = input.new()
self._weight = input.new()
self._expand = input.new()
self._repeat = input.new()
self.output.resize_as_(input).copy_(input)
batchSize = input.size(0)
# TODO: expand_as_, view_
self._output = self.output.view(batchSize, -1)
self._weight = self.weight.view(1, -1)
self._expand = self._weight.expand_as(self._output)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._output.mul_(self._repeat)
else:
self._output.mul_(self._expand)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
if self._gradOutput is None:
self._gradOutput = input.new()
self._gradInput = input.new()
self.gradInput.resize_as_(input).zero_()
batchSize = input.size(0)
contiguousView(self._gradOutput, gradOutput, batchSize, -1)
contiguousView(self._gradInput, self.gradInput, batchSize, -1)
self._weight = self.weight.view(1, -1)
self._expand = self._weight.expand_as(self._gradOutput)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
else:
self._gradInput.addcmul_(1, self._expand, self._gradOutput)
return self.gradInput
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' ... -> '
res = torch.typename(self)
res = res + ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + ext)
res = res + line + tab + last + 'output'
res = res + line + '}'
return res
def default_tensor_type(type):
type_str = torch.typename(type)
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
old_type = torch.typename(torch.Tensor())
torch.set_default_tensor_type(type_str)
try:
return fn(*args, **kwargs)
finally:
torch.set_default_tensor_type(old_type)
return wrapper
return decorator
def test_Copy(self):
input = torch.randn(3, 4).double()
c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
output = c.forward(input)
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
self.assertEqual(output, input.float(), 1e-6)
gradInput = c.backward(input, output.fill_(1))
self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
self.assertEqual(gradInput, output.double(), 1e-6)
c.dontCast = True
c.double()
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
# Check that these don't raise errors
c.__repr__()
str(c)
def _str(self):
if self.ndimension() == 0:
return '[{} with no dimension]\n'.format(torch.typename(self))
elif self.ndimension() == 1:
strt = _vector_str(self)
elif self.ndimension() == 2:
strt = _matrix_str(self)
else:
strt = _tensor_str(self)
size_str = 'x'.join(str(size) for size in self.size())
device_str = '' if not self.is_cuda else \
' (GPU {})'.format(self.get_device())
strt += '[{} of size {}{}]\n'.format(torch.typename(self),
size_str, device_str)
return '\n' + strt
def __init__(self, params, defaults):
self.defaults = defaults
if isinstance(params, Variable) or torch.is_tensor(params):
raise TypeError("params argument given to the optimizer should be "
"an iterable of Variables or dicts, but got " +
torch.typename(params))
self.state = defaultdict(dict)
self.param_groups = []
param_groups = list(params)
if len(param_groups) == 0:
raise ValueError("optimizer got an empty parameter list")
if not isinstance(param_groups[0], dict):
param_groups = [{'params': param_groups}]
for param_group in param_groups:
self.add_param_group(param_group)
def updateOutput(self, input):
# lazy-initialize
if self._output is None:
self._output = input.new()
self._weight = input.new()
self._expand = input.new()
self._repeat = input.new()
self.output.resize_as_(input).copy_(input)
batchSize = input.size(0)
# TODO: expand_as_, view_
self._output = self.output.view(batchSize, -1)
self._weight = self.weight.view(1, -1)
self._expand = self._weight.expand_as(self._output)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._output.mul_(self._repeat)
else:
self._output.mul_(self._expand)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
if self._gradOutput is None:
self._gradOutput = input.new()
self._gradInput = input.new()
self.gradInput.resize_as_(input).zero_()
batchSize = input.size(0)
contiguousView(self._gradOutput, gradOutput, batchSize, -1)
contiguousView(self._gradInput, self.gradInput, batchSize, -1)
self._weight = self.weight.view(1, -1)
self._expand = self._weight.expand_as(self._gradOutput)
if torch.typename(input) == 'torch.cuda.FloatTensor':
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._gradInput.addcmul_(1, self._repeat, self._gradOutput)
else:
self._gradInput.addcmul_(1, self._expand, self._gradOutput)
return self.gradInput
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' ... -> '
res = torch.typename(self)
res += ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res += line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res += line + tab + next + '(' + str(i) + '): ' + str(self.modules[i]).replace(line, line + tab + ext)
res += line + tab + last + 'output'
res += line + '}'
return res
def __repr__(self):
tab = ' '
line = '\n'
next = ' |`-> '
ext = ' | '
extlast = ' '
last = ' ... -> '
res = torch.typename(self)
res = res + ' {' + line + tab + 'input'
for i in range(len(self.modules)):
if i == len(self.modules) - 1:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + extlast)
else:
res = res + line + tab + next + '(' + str(i) + '): ' + \
str(self.modules[i]).replace(line, line + tab + ext)
res = res + line + tab + last + 'output'
res = res + line + '}'
return res
def default_tensor_type(type):
type_str = torch.typename(type)
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
old_type = torch.typename(torch.Tensor())
torch.set_default_tensor_type(type_str)
try:
return fn(*args, **kwargs)
finally:
torch.set_default_tensor_type(old_type)
return wrapper
return decorator
def test_Copy(self):
input = torch.randn(3, 4).double()
c = nn.Copy(torch.DoubleTensor, torch.FloatTensor)
output = c.forward(input)
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
self.assertEqual(output, input.float(), 1e-6)
gradInput = c.backward(input, output.fill_(1))
self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor')
self.assertEqual(gradInput, output.double(), 1e-6)
c.dontCast = True
c.double()
self.assertEqual(torch.typename(output), 'torch.FloatTensor')
# Check that these don't raise errors
c.__repr__()
str(c)
def _str(self):
if self.ndimension() == 0:
return '[{} with no dimension]\n'.format(torch.typename(self))
elif self.ndimension() == 1:
strt = _vector_str(self)
elif self.ndimension() == 2:
strt = _matrix_str(self)
else:
strt = _tensor_str(self)
size_str = 'x'.join(str(size) for size in self.size())
device_str = '' if not self.is_cuda else \
' (GPU {})'.format(self.get_device())
strt += '[{} of size {}{}]\n'.format(torch.typename(self),
size_str, device_str)
return '\n' + strt