def __call__(self, *args, **params):
methodname = self.__name__
instance = self.obj
# Fallback : if the instance has not been initialized, use the first
# arg
if instance is None:
args = list(args)
instance = args.pop(0)
data = instance._data
mask = instance._mask
cls = type(instance)
result = getattr(data, methodname)(*args, **params).view(cls)
result._update_from(instance)
if result.ndim:
if not self._onmask:
result.__setmask__(mask)
elif mask is not nomask:
result.__setmask__(getattr(mask, methodname)(*args, **params))
else:
if mask.ndim and (not mask.dtype.names and mask.all()):
return masked
return result
python类ndim()的实例源码
def _sort(group_idx, a, size, fill_value, dtype=None, reversed_=False):
if np.iscomplexobj(a):
raise NotImplementedError("a must be real, could use np.lexsort or "
"sort with recarray for complex.")
if not (np.isscalar(fill_value) or len(fill_value) == 0):
raise ValueError("fill_value must be scalar or an empty sequence")
if reversed_:
order_group_idx = np.argsort(group_idx + -1j * a, kind='mergesort')
else:
order_group_idx = np.argsort(group_idx + 1j * a, kind='mergesort')
counts = np.bincount(group_idx, minlength=size)
if np.ndim(a) == 0:
a = np.full(size, a, dtype=type(a))
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asarray(ret, dtype=object)
if np.isscalar(fill_value):
fill_untouched(group_idx, ret, fill_value)
return ret
def _sum(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
if np.ndim(a) == 0:
ret = np.bincount(group_idx, minlength=size).astype(dtype)
if a != 1:
ret *= a
else:
if np.iscomplexobj(a):
ret = np.empty(size, dtype=dtype)
ret.real = np.bincount(group_idx, weights=a.real,
minlength=size)
ret.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
ret = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
if fill_value != 0:
fill_untouched(group_idx, ret, fill_value)
return ret
def _mean(group_idx, a, size, fill_value, dtype=np.dtype(np.float64)):
if np.ndim(a) == 0:
raise ValueError("cannot take mean with scalar a")
counts = np.bincount(group_idx, minlength=size)
if np.iscomplexobj(a):
dtype = a.dtype # TODO: this is a bit clumsy
sums = np.empty(size, dtype=dtype)
sums.real = np.bincount(group_idx, weights=a.real,
minlength=size)
sums.imag = np.bincount(group_idx, weights=a.imag,
minlength=size)
else:
sums = np.bincount(group_idx, weights=a,
minlength=size).astype(dtype)
with np.errstate(divide='ignore'):
ret = sums.astype(dtype) / counts
if not np.isnan(fill_value):
ret[counts == 0] = fill_value
return ret
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
return masked_d
def round(self, decimals=0, out=None):
"""
Return each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
ndarray.around : corresponding function for ndarrays
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
if result.ndim > 0:
result._mask = self._mask
result._update_from(self)
elif self._mask:
# Return masked when the scalar is masked
result = masked
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
for fetches in unbatch_dict(fetches_batch):
# Convert to unicode
fetches["predicted_tokens"] = np.char.decode(
fetches["predicted_tokens"].astype("S"), "utf-8")
predicted_tokens = fetches["predicted_tokens"]
# If we're using beam search we take the first beam
if np.ndim(predicted_tokens) > 1:
predicted_tokens = predicted_tokens[:, 0]
fetches["features.source_tokens"] = np.char.decode(
fetches["features.source_tokens"].astype("S"), "utf-8")
source_tokens = fetches["features.source_tokens"]
source_len = fetches["features.source_len"]
if self._unk_replace_fn is not None:
# We slice the attention scores so that we do not
# accidentially replace UNK with a SEQUENCE_END token
attention_scores = fetches["attention_scores"]
attention_scores = attention_scores[:, :source_len - 1]
predicted_tokens = self._unk_replace_fn(
source_tokens=source_tokens,
predicted_tokens=predicted_tokens,
attention_scores=attention_scores)
sent = self.params["delimiter"].join(predicted_tokens).split(
"SEQUENCE_END")[0]
# Apply postproc
if self._postproc_fn:
sent = self._postproc_fn(sent)
sent = sent.strip()
print(sent)
def forward(self, input, *args, **kwargs):
assert np.ndim(input) == 3, 'Only support batch training.'
# record
self.last_input = input
# dim
nb_batch, nb_timesteps, nb_in = input.shape
# outputs
output = _zero((nb_batch, nb_timesteps, self.n_out))
# forward
for i in range(nb_timesteps):
# data
s_pre = _zero((nb_batch, self.n_out)) if i == 0 else output[:, i - 1, :]
x_now = input[:, i, :]
# computation
z_now = self.gate_activation.forward(np.dot(x_now, self.U_z) +
np.dot(s_pre, self.W_z) +
self.b_z)
r_now = self.gate_activation.forward(np.dot(x_now, self.U_r) +
np.dot(s_pre, self.W_r) +
self.b_r)
h_now = self.activation.forward(np.dot(x_now, self.U_h) +
np.dot(s_pre * r_now, self.W_h) +
self.b_h)
output[:, i, :] = (1 - z_now) * h_now + z_now * s_pre
# record
self.last_output = output
# return
if self.return_sequence:
return self.last_output
else:
return self.last_output[:, -1, :]
def forward(self, input, *args, **kwargs):
assert np.ndim(input) == 2
self.last_input = input
return self.embed_words[input]
def backward(self, pre_grad, *args, **kwargs):
new_h, new_w = self.out_shape[-2:]
pool_h, pool_w = self.pool_size
length = np.prod(self.pool_size)
layer_grads = _zero(self.input_shape)
if np.ndim(pre_grad) == 4:
nb_batch, nb_axis, _, _ = pre_grad.shape
for a in np.arange(nb_batch):
for b in np.arange(nb_axis):
for h in np.arange(new_h):
for w in np.arange(new_w):
h_shift, w_shift = h * pool_h, w * pool_w
layer_grads[a, b, h_shift: h_shift + pool_h, w_shift: w_shift + pool_w] = \
pre_grad[a, b, h, w] / length
elif np.ndim(pre_grad) == 3:
nb_batch, _, _ = pre_grad.shape
for a in np.arange(nb_batch):
for h in np.arange(new_h):
for w in np.arange(new_w):
h_shift, w_shift = h * pool_h, w * pool_w
layer_grads[a, h_shift: h_shift + pool_h, w_shift: w_shift + pool_w] = \
pre_grad[a, h, w] / length
else:
raise ValueError()
return layer_grads
def forward(self, input, *args, **kwargs):
# shape
self.input_shape = input.shape
pool_h, pool_w = self.pool_size
new_h, new_w = self.out_shape[-2:]
# forward
self.last_input = input
outputs = _zero(self.input_shape[:-2] + self.out_shape[-2:])
if np.ndim(input) == 4:
nb_batch, nb_axis, _, _ = input.shape
for a in np.arange(nb_batch):
for b in np.arange(nb_axis):
for h in np.arange(new_h):
for w in np.arange(new_w):
outputs[a, b, h, w] = np.max(input[a, b, h:h + pool_h, w:w + pool_w])
elif np.ndim(input) == 3:
nb_batch, _, _ = input.shape
for a in np.arange(nb_batch):
for h in np.arange(new_h):
for w in np.arange(new_w):
outputs[a, h, w] = np.max(input[a, h:h + pool_h, w:w + pool_w])
else:
raise ValueError()
return outputs
def backward(self, pre_grad, *args, **kwargs):
new_h, new_w = self.out_shape[-2:]
pool_h, pool_w = self.pool_size
layer_grads = _zero(self.input_shape)
if np.ndim(pre_grad) == 4:
nb_batch, nb_axis, _, _ = pre_grad.shape
for a in np.arange(nb_batch):
for b in np.arange(nb_axis):
for h in np.arange(new_h):
for w in np.arange(new_w):
patch = self.last_input[a, b, h:h + pool_h, w:w + pool_w]
max_idx = np.unravel_index(patch.argmax(), patch.shape)
h_shift, w_shift = h * pool_h + max_idx[0], w * pool_w + max_idx[1]
layer_grads[a, b, h_shift, w_shift] = pre_grad[a, b, a, w]
elif np.ndim(pre_grad) == 3:
nb_batch, _, _ = pre_grad.shape
for a in np.arange(nb_batch):
for h in np.arange(new_h):
for w in np.arange(new_w):
patch = self.last_input[a, h:h + pool_h, w:w + pool_w]
max_idx = np.unravel_index(patch.argmax(), patch.shape)
h_shift, w_shift = h * pool_h + max_idx[0], w * pool_w + max_idx[1]
layer_grads[a, h_shift, w_shift] = pre_grad[a, a, w]
else:
raise ValueError()
return layer_grads
def test_MeanSquaredError():
from npdl.objectives import MeanSquaredError
obj = MeanSquaredError()
outputs = np.random.rand(10, 20)
targets = np.random.rand(10, 20)
f_res = obj.forward(outputs, targets)
b_res = obj.backward(outputs, targets)
assert np.ndim(f_res) == 0
assert np.ndim(b_res) == 2
def test_HellingerDistance():
from npdl.objectives import HellingerDistance
obj = HellingerDistance()
outputs = np.random.random((10, 20))
targets = np.random.random((10, 20))
f_res = obj.forward(outputs, targets)
b_res = obj.backward(outputs, targets)
assert np.ndim(f_res) == 0
assert np.ndim(b_res) == 2
def test_BinaryCrossEntropy():
from npdl.objectives import BinaryCrossEntropy
obj = BinaryCrossEntropy()
outputs = np.random.randint(0, 2, (10, 1))
targets = np.random.randint(0, 2, (10, 1))
f_res = obj.forward(outputs, targets)
b_res = obj.backward(outputs, targets)
assert np.ndim(f_res) == 0
assert np.ndim(b_res) == 2
def test_SoftmaxCategoricalCrossEntropy():
from npdl.objectives import SoftmaxCategoricalCrossEntropy
obj = SoftmaxCategoricalCrossEntropy()
outputs = np.random.random((10, 20))
targets = np.random.random((10, 20))
f_res = obj.forward(outputs, targets)
b_res = obj.backward(outputs, targets)
assert np.ndim(f_res) == 0
assert np.ndim(b_res) == 2
def toscalar(arg):
arg = npp.checksize(arg, 1)
r = np.ndim(arg)
if r == 1: arg = arg[0]
elif r == 2: arg = arg[0, 0]
return arg
def tondim2(arg, ndim1tocolumn=False, copy=False):
r = np.ndim(arg)
if r == 0: arg = np.array(((arg,),))
elif r == 1:
arg = np.array((arg,))
if ndim1tocolumn: arg = arg.T
return np.array(arg, copy=copy)
def checker(input_var, desire_size):
'''
check if debug = 1
'''
if input_var is None:
print('input_variable does not exist!')
if desire_size is None:
print('desire_size does not exist!')
dd = numpy.size(desire_size)
dims = numpy.shape(input_var)
# print('dd=',dd,'dims=',dims)
if numpy.isnan(numpy.sum(input_var[:])):
print('input has NaN')
if numpy.ndim(input_var) < dd:
print('input signal has too few dimensions')
if dd > 1:
if dims[0:dd] != desire_size[0:dd]:
print(dims[0:dd])
print(desire_size)
print('input signal has wrong size1')
elif dd == 1:
if dims[0] != desire_size:
print(dims[0])
print(desire_size)
print('input signal has wrong size2')
if numpy.mod(numpy.prod(dims), numpy.prod(desire_size)) != 0:
print('input signal shape is not multiples of desired size!')
def _create_kspace_sampling_density(nufft):
"""
Compute kspace sampling density from the nufft object
"""
y = numpy.ones((nufft.st['M'],),dtype = numpy.complex64)
nufft.y = nufft.thr.to_device(y)
nufft._y2k()
w = numpy.abs( nufft.k_Kd2.get())#**2) ))
nufft.st['w'] = w#self.nufftobj.vec2k(w)
RTR=nufft.st['w'] # see __init__() in class "nufft"
return RTR
# def _create_laplacian_kernel(nufft):
# #===============================================================================
# # # # Laplacian oeprator, convolution kernel in spatial domain
# # # related to constraint
# #===============================================================================
# uker = numpy.zeros(nufft.st['Kd'][:],dtype=numpy.complex64,order='C')
# n_dims= numpy.size(nufft.st['Nd'])
#
# if n_dims == 1:
# uker[0] = -2.0
# uker[1] = 1.0
# uker[-1] = 1.0
# elif n_dims == 2:
# uker[0,0] = -4.0
# uker[1,0] = 1.0
# uker[-1,0] = 1.0
# uker[0,1] = 1.0
# uker[0,-1] = 1.0
# elif n_dims == 3:
# uker[0,0,0] = -6.0
# uker[1,0,0] = 1.0
# uker[-1,0,0] = 1.0
# uker[0,1,0] = 1.0
# uker[0,-1,0] = 1.0
# uker[0,0,1] = 1.0
# uker[0,0,-1] = 1.0
#
# uker =numpy.fft.fftn(uker) #, self.nufftobj.st['Kd'], range(0,numpy.ndim(uker)))
# return uker