def eval_numerical_gradient_array(f, x, df, h=1e-5):
'''
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
'''
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
python类nditer()的实例源码
def test_iter_no_inner_dim_coalescing():
# Check no_inner iterators whose dimensions may not coalesce completely
# Skipping the last element in a dimension prevents coalescing
# with the next-bigger dimension
a = arange(24).reshape(2, 3, 4)[:,:, :-1]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (3,))
a = arange(24).reshape(2, 3, 4)[:, :-1,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (8,))
a = arange(24).reshape(2, 3, 4)[:-1,:,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (12,))
# Even with lots of 1-sized dimensions, should still coalesce
a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1)
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (24,))
def test_iter_scalar_cast_errors():
# Check that invalid casts are caught
# Need to allow copying/buffering for write casts of scalars to occur
assert_raises(TypeError, nditer, np.float32(2), [],
[['readwrite']], op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, 2.5, [],
[['readwrite']], op_dtypes=[np.dtype('f4')])
# 'f8' -> 'f4' isn't a safe cast if the value would overflow
assert_raises(TypeError, nditer, np.float64(1e60), [],
[['readonly']],
casting='safe',
op_dtypes=[np.dtype('f4')])
# 'f4' -> 'i4' is neither a safe nor a same-kind cast
assert_raises(TypeError, nditer, np.float32(2), [],
[['readonly']],
casting='same_kind',
op_dtypes=[np.dtype('i4')])
def test_iter_op_axes_errors():
# Check that custom axes throws errors for bad inputs
# Wrong number of items in op_axes
a = arange(6).reshape(2, 3)
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0], [1], [0]])
# Out of bounds items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[2, 1], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [2, -1]])
# Duplicate items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 0], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 1]])
# Different sized arrays in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [0, 1, 0]])
# Non-broadcastable dimensions in the result
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 0]])
def test_iter_allocate_output_types_promotion():
# Check type promotion of automatic outputs
i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f4'))
i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('u4'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('i8'))
def test_iter_write_buffering():
# Test that buffering of writes is working
# F-order swapped array
a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap()
i = nditer(a, ['buffered'],
[['readwrite', 'nbo', 'aligned']],
casting='equiv',
order='C',
buffersize=16)
x = 0
while not i.finished:
i[0] = x
x += 1
i.iternext()
assert_equal(a.ravel(order='C'), np.arange(24))
def test_iter_buffering_delayed_alloc():
# Test that delaying buffer allocation works
a = np.arange(6)
b = np.arange(1, dtype='f4')
i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'],
['readwrite'],
casting='unsafe',
op_dtypes='f4')
assert_(i.has_delayed_bufalloc)
assert_raises(ValueError, lambda i:i.multi_index, i)
assert_raises(ValueError, lambda i:i[0], i)
assert_raises(ValueError, lambda i:i[0:2], i)
def assign_iter(i):
i[0] = 0
assert_raises(ValueError, assign_iter, i)
i.reset()
assert_(not i.has_delayed_bufalloc)
assert_equal(i.multi_index, (0,))
assert_equal(i[0], 0)
i[1] = 1
assert_equal(i[0:2], [0, 1])
assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
def test_iter_buffering_string():
# Safe casting disallows shrinking strings
a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_)
assert_equal(a.dtype, np.dtype('S4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='S2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
assert_equal(i[0], asbytes('abc'))
assert_equal(i[0].dtype, np.dtype('S6'))
a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode)
assert_equal(a.dtype, np.dtype('U4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='U2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
assert_equal(i[0], sixu('abc'))
assert_equal(i[0].dtype, np.dtype('U6'))
def test_iter_buffering_reduction_reuse_reduce_loops():
# There was a bug triggering reuse of the reduce loop inappropriately,
# which caused processing to happen in unnecessarily small chunks
# and overran the buffer.
a = np.zeros((2, 7))
b = np.zeros((1, 7))
it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'],
op_flags=[['readonly'], ['readwrite']],
buffersize=5)
bufsizes = []
for x, y in it:
bufsizes.append(x.shape[0])
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
def test_iter_allocated_array_dtypes():
# If the dtype of an allocated output has a shape, the shape gets
# tacked onto the end of the result.
it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))])
for a, b in it:
b[0] = a - 1
b[1] = a + 1
assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
# Make sure this works for scalars too
it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))])
for a, b, c in it:
c[0, 0] = a - b
c[0, 1] = a + b
c[1, 0] = a * b
c[1, 1] = a / b
assert_equal(it.operands[2], [[8, 12], [20, 5]])
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
needs_writeable = not readonly and array.flags.writeable
extras = ['reduce_ok'] if needs_writeable else []
op_flag = 'readwrite' if needs_writeable else 'readonly'
broadcast = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
op_flags=[op_flag], itershape=shape, order='C').itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if needs_writeable and not result.flags.writeable:
result.flags.writeable = True
return result
def randomize(img, noise_level=.03):
""" given an array, randomizes the values in that array
noise_level [0,1] controls the overall likelihood of a bit being
flipped. This overall level is then multiplied by the levels variable,
which modifies the noise level for the various significant bit values
(i.e. it makes it so that less significant bits are more likely to be
flipped, which is accurate)
"""
levels = [.005, .01, .05, .10, .15, .25, .35, .45]
# more or less randomly chosen modifiers for each bit significance level
for val in np.nditer(img, op_flags=['readwrite']):
xor_val = 0
for level in levels:
if random.random() < level * noise_level:
xor_val = (xor_val << 1) | 1
else:
xor_val = (xor_val << 1) | 0
#print('{:08b}'.format(int(xor_val)))
val[...] = val ^ xor_val
return img
def _passes_gradient_check(self, parameter):
iterator = np.nditer(parameter.value, flags=['multi_index'], op_flags=['readwrite'])
while not iterator.finished:
multi_index = iterator.multi_index
numerical_gradient = self._compute_numerical_gradient(parameter=parameter, multi_index=multi_index)
analytical_gradient = parameter.gradient[multi_index]
relative_error = self._compute_relative_error(
numerical_gradient=numerical_gradient,
analytical_gradient=analytical_gradient
)
if (relative_error > self.error_threshold) or np.isnan(relative_error):
return False
iterator.iternext()
return True
def one_hot_comparison(hot_axes, axes, C):
"""
TODO.
Arguments:
hot_axes: TODO
axes: TODO
"""
u = rng.random_integers(0, C.length - 1, axes, dtype=np.int8)
u_p = ng.placeholder(axes, dtype=u.dtype)
v = np.zeros(hot_axes.lengths, dtype=np.float32)
udxiter = np.nditer(u, flags=['multi_index'])
for uiter in udxiter:
vindex = [int(uiter)]
vindex.extend(udxiter.multi_index)
v[tuple(vindex)] = 1
with executor(ng.one_hot(u_p, axis=C), u_p) as ex:
v_t = ex(u)
ng.testing.assert_allclose(v_t, v)
def numerical_gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val # ??????
it.iternext()
return grad
def finiteprecision(self, coeff=None, totalbits=None, shiftbits=None):
if coeff is None:
coeff = self.coefficients
if totalbits is None:
totalbits = self.totalbits
if shiftbits is None:
shiftbits = self.shiftbits
res = coeff * 0 + coeff
for x in np.nditer(res, op_flags=['readwrite']):
xr = np.round(x * 2 ** shiftbits)
xmax = 2 ** (totalbits - 1)
if xr == 0 and xr != 0:
logger.warning("One value was rounded off to zero: Increase "
"shiftbits in fpga design if this is a "
"problem!")
elif xr > xmax - 1:
xr = xmax - 1
logger.warning("One value saturates positively: Increase "
"totalbits or decrease gain!")
elif xr < -xmax:
xr = -xmax
logger.warning("One value saturates negatively: Increase "
"totalbits or decrease gain!")
x[...] = 2 ** (-shiftbits) * xr
return res
def testComputation(self):
with self.test_session() as sess:
x = sess.run(snt.nets.identity_kernel_initializer([3, 3, 5, 5]))
# Iterate over elements. Assert that only the middle pixel is on when in
# and out channels are same.
it = np.nditer(x, flags=["multi_index"])
while not it.finished:
value, idx = it[0], it.multi_index
(filter_height, filter_width, in_channel, out_channel) = idx
if (filter_height == 1 and filter_width == 1 and
in_channel == out_channel):
self.assertEqual(value, 1)
else:
self.assertEqual(value, 0)
it.iternext()
def testComputation(self):
tf.set_random_seed(0)
with self.test_session() as sess:
initializer = snt.nets.noisy_identity_kernel_initializer(2, stddev=1e-20)
x = initializer([3, 3, 4, 8])
x = tf.reduce_sum(x, axis=[3])
x_ = sess.run(x)
# Iterate over elements. After summing over depth, assert that only the
# middle pixel is on.
it = np.nditer(x_, flags=["multi_index"])
while not it.finished:
value, idx = it[0], it.multi_index
(filter_height, filter_width, _) = idx
if filter_height == 1 and filter_width == 1:
self.assertAllClose(value, 1)
else:
self.assertAllClose(value, 0)
it.iternext()
def lower_dist_cumsum(context, builder, sig, args):
dtype = sig.args[0].dtype
zero = dtype(0)
def cumsum_impl(in_arr, out_arr):
c = zero
for v in np.nditer(in_arr):
c += v.item()
prefix_var = distributed_api.dist_exscan(c)
for i in range(in_arr.size):
prefix_var += in_arr[i]
out_arr[i] = prefix_var
return 0
res = context.compile_internal(builder, cumsum_impl, sig, args,
locals=dict(c=dtype,
prefix_var=dtype))
return res
def sort_breakend_order(svs):
'''
per sv, ensure chrom1, chrom2 and pos1, pos2 are ordered
'''
svs = svs.copy()
for sv in np.nditer(svs, op_flags=['readwrite']):
if sv['chr1'] == sv['chr2']:
if sv['pos1'] > sv['pos2']:
ts = sv.copy()
sv['pos1'], sv['dir1'] = ts['pos2'], ts['dir2']
sv['pos2'], sv['dir2'] = ts['pos1'], ts['dir1']
else:
chrs = [str(sv['chr1']), str(sv['chr2'])]
if not np.all(np.array(chrs) == np.array(nice_sort(chrs))):
ts = sv.copy()
sv['chr1'], sv['pos1'], sv['dir1'] = ts['chr2'], ts['pos2'], ts['dir2']
sv['chr2'], sv['pos2'], sv['dir2'] = ts['chr1'], ts['pos1'], ts['dir1']
return svs
def numerical_gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # ??????
it.iternext()
return grad
def test_iter_no_inner_dim_coalescing():
# Check no_inner iterators whose dimensions may not coalesce completely
# Skipping the last element in a dimension prevents coalescing
# with the next-bigger dimension
a = arange(24).reshape(2, 3, 4)[:,:, :-1]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (3,))
a = arange(24).reshape(2, 3, 4)[:, :-1,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (8,))
a = arange(24).reshape(2, 3, 4)[:-1,:,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (12,))
# Even with lots of 1-sized dimensions, should still coalesce
a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1)
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (24,))
def test_iter_scalar_cast_errors():
# Check that invalid casts are caught
# Need to allow copying/buffering for write casts of scalars to occur
assert_raises(TypeError, nditer, np.float32(2), [],
[['readwrite']], op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, 2.5, [],
[['readwrite']], op_dtypes=[np.dtype('f4')])
# 'f8' -> 'f4' isn't a safe cast if the value would overflow
assert_raises(TypeError, nditer, np.float64(1e60), [],
[['readonly']],
casting='safe',
op_dtypes=[np.dtype('f4')])
# 'f4' -> 'i4' is neither a safe nor a same-kind cast
assert_raises(TypeError, nditer, np.float32(2), [],
[['readonly']],
casting='same_kind',
op_dtypes=[np.dtype('i4')])
def test_iter_op_axes_errors():
# Check that custom axes throws errors for bad inputs
# Wrong number of items in op_axes
a = arange(6).reshape(2, 3)
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0], [1], [0]])
# Out of bounds items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[2, 1], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [2, -1]])
# Duplicate items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 0], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 1]])
# Different sized arrays in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [0, 1, 0]])
# Non-broadcastable dimensions in the result
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 0]])
def test_iter_allocate_output_types_promotion():
# Check type promotion of automatic outputs
i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f4'))
i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('u4'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('i8'))
def test_iter_write_buffering():
# Test that buffering of writes is working
# F-order swapped array
a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap()
i = nditer(a, ['buffered'],
[['readwrite', 'nbo', 'aligned']],
casting='equiv',
order='C',
buffersize=16)
x = 0
while not i.finished:
i[0] = x
x += 1
i.iternext()
assert_equal(a.ravel(order='C'), np.arange(24))
def test_iter_buffering_delayed_alloc():
# Test that delaying buffer allocation works
a = np.arange(6)
b = np.arange(1, dtype='f4')
i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'],
['readwrite'],
casting='unsafe',
op_dtypes='f4')
assert_(i.has_delayed_bufalloc)
assert_raises(ValueError, lambda i:i.multi_index, i)
assert_raises(ValueError, lambda i:i[0], i)
assert_raises(ValueError, lambda i:i[0:2], i)
def assign_iter(i):
i[0] = 0
assert_raises(ValueError, assign_iter, i)
i.reset()
assert_(not i.has_delayed_bufalloc)
assert_equal(i.multi_index, (0,))
assert_equal(i[0], 0)
i[1] = 1
assert_equal(i[0:2], [0, 1])
assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
def test_iter_buffering_string():
# Safe casting disallows shrinking strings
a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_)
assert_equal(a.dtype, np.dtype('S4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='S2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
assert_equal(i[0], asbytes('abc'))
assert_equal(i[0].dtype, np.dtype('S6'))
a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode)
assert_equal(a.dtype, np.dtype('U4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='U2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
assert_equal(i[0], sixu('abc'))
assert_equal(i[0].dtype, np.dtype('U6'))
def test_iter_buffering_reduction_reuse_reduce_loops():
# There was a bug triggering reuse of the reduce loop inappropriately,
# which caused processing to happen in unnecessarily small chunks
# and overran the buffer.
a = np.zeros((2, 7))
b = np.zeros((1, 7))
it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'],
op_flags=[['readonly'], ['readwrite']],
buffersize=5)
bufsizes = []
for x, y in it:
bufsizes.append(x.shape[0])
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
def test_iter_allocated_array_dtypes():
# If the dtype of an allocated output has a shape, the shape gets
# tacked onto the end of the result.
it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))])
for a, b in it:
b[0] = a - 1
b[1] = a + 1
assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
# Make sure this works for scalars too
it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))])
for a, b, c in it:
c[0, 0] = a - b
c[0, 1] = a + b
c[1, 0] = a * b
c[1, 1] = a / b
assert_equal(it.operands[2], [[8, 12], [20, 5]])