def _wrap_function(function, ffi):
@wraps(function)
def safe_call(*args, **kwargs):
args = tuple(ffi.cast(_torch_to_cffi.get(type(arg), 'void') + '*', arg._cdata)
if torch.is_tensor(arg) or torch.is_storage(arg)
else arg
for arg in args)
args = (function,) + args
result = torch._C._safe_call(*args, **kwargs)
if isinstance(result, ffi.CData):
typeof = ffi.typeof(result)
if typeof.kind == 'pointer':
cdata = int(ffi.cast('uintptr_t', result))
cname = typeof.item.cname
if cname in _cffi_to_torch:
return _cffi_to_torch[cname](cdata=cdata)
return result
return safe_call
python类is_tensor()的实例源码
def _load_backend(obj):
if hasattr(obj, '_type'):
obj._backend = type2backend[obj._type]
return
# Try to find tensor attributes and infer type from them
for key in dir(obj):
attr = getattr(obj, key)
if torch.is_tensor(attr):
try:
obj._backend = type2backend[type(attr)]
except KeyError:
pass
# Monkey patch the forward to capture the type of input
updateOutput_orig = obj.updateOutput
def updateOutput_patch(*args):
input = args[0]
while not torch.is_tensor(input):
input = input[0]
obj._backend = type2backend[type(input)]
obj.updateOutput = updateOutput_orig
return obj.updateOutput(*args)
obj.updateOutput = updateOutput_patch
def scatter(inputs, target_gpus, dim=0):
"""
Slices variables into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not variables. Does not
support Tensors.
"""
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, None, dim, obj)
assert not torch.is_tensor(obj), "Tensors not supported in scatter."
if isinstance(obj, tuple):
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list):
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
return scatter_map(inputs)
def clear(self, *args):
if len(args) == 1 and isinstance(args[0], list):
args = args[0]
def _clear(f):
if not hasattr(self, f):
return
attr = getattr(self, f)
if torch.is_tensor(attr):
attr.set_()
elif isinstance(attr, list):
del attr[:]
else:
setattr(self, f, None)
for key in args:
_clear(key)
return self
def create_input(call_args, requires_grad=True, non_contiguous=False):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
return tensor if not non_contiguous else make_non_contiguous(tensor)
if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
return arg
elif isinstance(arg, tuple) and not isinstance(arg[0], Variable):
return Variable(maybe_non_contig(torch.randn(*arg).double()), requires_grad=requires_grad)
elif torch.is_tensor(arg):
if isinstance(arg, torch.FloatTensor):
return Variable(maybe_non_contig(arg.double()), requires_grad=requires_grad)
else:
return Variable(maybe_non_contig(arg), requires_grad=requires_grad)
elif isinstance(arg, Variable) and non_contiguous:
return Variable(maybe_non_contig(arg.data), requires_grad=arg.requires_grad)
else:
return arg
return tuple(map_arg(arg) for arg in call_args)
def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
f_args_variable, f_args_tensor):
output_variable = apply_fn(*f_args_variable)
if not exclude_tensor_method(name, test_name):
output_tensor = apply_fn(*f_args_tensor)
if not torch.is_tensor(output_tensor) and not isinstance(output_tensor, tuple):
output_tensor = torch.DoubleTensor((output_tensor,))
test_case.assertEqual(unpack_variables(output_variable), output_tensor)
if run_grad_checks:
run_grad_and_gradgrad_checks(test_case, test_name, apply_fn,
output_variable, f_args_variable)
self_variable = f_args_variable[0]
if isinstance(output_variable, torch.autograd.Variable) and self_variable is not None:
output_variable.backward(torch.randn(*output_variable.size()).type_as(output_variable.data))
test_case.assertTrue(type(self_variable.data) == type(self_variable.grad.data))
test_case.assertTrue(self_variable.size() == self_variable.grad.size())
def to_gpu(obj, type_map={}):
if torch.is_tensor(obj):
t = type_map.get(type(obj), get_gpu_type(type(obj)))
return obj.clone().type(t)
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, Variable):
assert obj.is_leaf
t = type_map.get(type(obj.data), get_gpu_type(type(obj.data)))
return Variable(obj.data.clone().type(t), requires_grad=obj.requires_grad)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def safeCoalesce(self, t):
tc = t.coalesce()
value_map = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx)
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = val.clone() if torch.is_tensor(val) else val
new_indices = sorted(list(value_map.keys()))
new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(new_values)
else:
new_values = torch.stack(new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
return tg
def to_numpy(x):
if isinstance(x, Variable):
return x.data.cpu().squeeze().numpy()
elif torch.is_tensor(x):
return x.cpu().squeeze().numpy()
else:
return x
# reference: https://github.com/pytorch/tnt/blob/master/torchnet/meter/msemeter.py
def _map_tensor_fromiter(itr):
return _nested_map(lambda o: torch.is_tensor(o), lambda o: next(itr))
def _assertInput(self, input):
if len(input) != 2 or not torch.is_tensor(input[0]) or not torch.is_tensor(input[1]):
raise RuntimeError('input should be a table containing two data Tensors')
if input[0].ndimension() != 2 or input[1].ndimension() != 2:
raise RuntimeError('input Tensors should be two-dimensional')
if input[0].size(0) != input[1].size(0):
raise RuntimeError('input Tensors should have the same number of rows')
if input[0].size(1) != self.weight.size(1):
raise RuntimeError('dimensionality of first input is erroneous')
if input[1].size(1) != self.weight.size(2):
raise RuntimeError('dimensionality of second input is erroneous')
def recursiveType(param, type, tensorCache={}):
from .Criterion import Criterion
from .Module import Module
if isinstance(param, list):
for i, p in enumerate(param):
param[i] = recursiveType(p, type, tensorCache)
elif isinstance(param, Module) or isinstance(param, Criterion):
param.type(type, tensorCache)
elif torch.is_tensor(param):
if torch.typename(param) != type:
key = param._cdata
if key in tensorCache:
newparam = tensorCache[key]
else:
newparam = torch.Tensor().type(type)
storageType = type.replace('Tensor','Storage')
param_storage = param.storage()
if param_storage:
storage_key = param_storage._cdata
if storage_key not in tensorCache:
tensorCache[storage_key] = torch._import_dotted_name(storageType)(param_storage.size()).copy_(param_storage)
newparam.set_(
tensorCache[storage_key],
param.storage_offset(),
param.size(),
param.stride()
)
tensorCache[key] = newparam
param = newparam
return param
def recursiveFill(t2, val):
if isinstance(t2, list):
t2 = [recursiveFill(x, val) for x in t2]
elif torch.is_tensor(t2):
t2.fill_(val)
else:
raise RuntimeError("expecting tensor or table thereof. Got " + \
type(t2).__name__ + " instead")
return t2
def recursiveAdd(t1, val=1, t2=None):
if t2 is None:
t2 = val
val = 1
if isinstance(t2, list):
t1 = t1 if isinstance(t1, list) else [t1]
for i, _ in enumerate(t2):
t1[i], t2[i] = recursiveAdd(t1[i], val, t2[i])
elif torch.is_tensor(t1) and torch.is_tensor(t2):
t1.add_(val, t2)
else:
raise RuntimeError("expecting nested tensors or tables. Got " + \
type(t1).__name__ + " and " + type(t2).__name__ + " instead")
return t1, t2
def recursiveCopy(t1, t2):
if isinstance(t2, list):
t1 = t1 if isinstance(t1, list) else [t1]
for i, _ in enumerate(t2):
t1[i], t2[i] = recursiveCopy(t1[i], t2[i])
elif torch.is_tensor(t2):
t1 = t1 if torch.is_tensor(t1) else t2.new()
t1.resize_as_(t2).copy_(t2)
else:
raise RuntimeError("expecting nested tensors or tables. Got " + \
type(t1).__name__ + " and " + type(t2).__name__ + " instead")
return t1, t2
def addSingletondimension(*args):
view = None
if len(args) < 3:
t, dim = args
return t.unsqueeze(dim)
else:
view, t, dim = args
assert torch.is_tensor(view)
view.set_(t)
return view.unsqueeze_(dim)
def _flatten_tensors(self, x):
if torch.is_tensor(x):
return x.view(-1)
elif isinstance(x, Variable):
return x.data.view(-1)
else:
return tuple(self._flatten_tensors(a) for a in x)
def _zero_grad_input(self, input):
if isinstance(input, Variable):
input.grad.zero_()
elif torch.is_tensor(input):
return
else:
for i in input:
self._zero_grad_input(i)
def _unpack_input(self, input):
if isinstance(input, Variable):
return input.data
elif torch.is_tensor(input):
return input
else:
return type(input)(self._unpack_input(i) for i in input)
def _get_input(self):
if self.input is not None:
return self.input
def map_input_sizes(sizes):
if isinstance(sizes, list):
return [map_input_sizes(s) for s in sizes]
elif torch.is_tensor(sizes):
return sizes.double()
else:
return torch.randn(*sizes)
assert self.input_size is not None
return map_input_sizes(self.input_size)