def imin(arrays, axis, ignore_nan = False):
"""
Minimum of a stream of arrays along an axis.
Parameters
----------
arrays : iterable
Arrays to be reduced.
axis : int or None, optional
Axis along which the minimum is found. The default
is to find the minimum along the 'stream axis', as if all arrays in ``array``
were stacked along a new dimension. If ``axis = None``, arrays in ``arrays`` are flattened
before reduction.
ignore_nan : bool, optional
If True, NaNs are ignored. Default is propagation of NaNs.
Yields
------
online_min : ndarray
Cumulative minimum.
"""
ufunc = np.fmin if ignore_nan else np.minimum
yield from ireduce_ufunc(arrays, ufunc, axis)
python类fmin()的实例源码
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def __init__(self, dim=3):
assert dim == 3
centers = numpy.array([
[.1, .8, .3],
])
e_mat = numpy.array([
[5, 5, 5],
])
coefs = numpy.array([-5])
def kernel(x):
r2 = self.dist_sq(x, centers, e_mat)
return numpy.exp(-r2)
super(McCourt14, self).__init__(dim, kernel, e_mat, coefs, centers)
self.min_loc = [.1, .8, .3]
self.fmin = -5
self.fmax = 0.00030641748
self.classifiers = ['boring', 'unimodal']
def __init__(self, dim=3):
assert dim == 3
centers = numpy.array([
[.1, .8, .3],
])
e_mat = numpy.array([
[7, 7, 7],
])
coefs = numpy.array([-5])
def kernel(x):
r = numpy.sqrt(self.dist_sq(x, centers, e_mat))
return numpy.exp(-r)
super(McCourt15, self).__init__(dim, kernel, e_mat, coefs, centers)
self.min_loc = [.1, .8, .3]
self.fmin = -5
self.fmax = 0.00030641748
self.classifiers = ['boring', 'unimodal', 'nonsmooth']
def __init__(self, dim=4):
assert dim == 4
centers = numpy.array([
[.3, .8, .3, .6],
[.4, .9, .4, .7],
])
e_mat = numpy.array([
[5, 5, 5, 5],
[5, 5, 5, 5],
])
coefs = numpy.array([-5, 5])
def kernel(x):
r2 = self.dist_sq(x, centers, e_mat)
return 1 / numpy.sqrt(1 + r2)
super(McCourt16, self).__init__(dim, kernel, e_mat, coefs, centers)
self.min_loc = [.1858, .6858, .1858, .4858]
self.fmin = -0.84221700966
self.fmax = 0.84132432380
self.classifiers = ['boring', 'unimodal']
def __init__(self, dim=7):
assert dim == 7
centers = numpy.array([
[.3, .8, .3, .6, .2, .8, .5],
[.8, .3, .8, .2, .5, .2, .8],
[.2, .7, .2, .5, .4, .7, .3],
])
e_mat = numpy.array([
[4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4],
[4, 4, 4, 4, 4, 4, 4],
])
coefs = numpy.array([-5, 5, 5])
def kernel(x):
r2 = self.dist_sq(x, centers, e_mat)
return 1 / numpy.sqrt(1 + r2)
super(McCourt17, self).__init__(dim, kernel, e_mat, coefs, centers)
self.min_loc = [.3125, .9166, .3125, .7062, .0397, .9270, .5979]
self.fmin = -0.47089199032
self.fmax = 4.98733340158
self.classifiers = ['boring', 'unimodal']
def __init__(self, dim=2):
full_min_loc_vec = [
2.202905513296628, 1.570796322320509, 1.284991564577549, 1.923058467505610,
1.720469766517768, 1.570796319218113, 1.454413962081172, 1.756086513575824,
1.655717409323190, 1.570796319387859, 1.497728796097675, 1.923739461688219,
]
full_fmin_vec = [
0.8013034100985499, 1, 0.9590912698958649, 0.9384624184720668,
0.9888010806214966, 1, 0.9932271353558245, 0.9828720362721659,
0.9963943649250527, 1, 0.9973305415507061, 0.9383447102236013,
]
assert dim <= len(full_min_loc_vec)
super(Michalewicz, self).__init__(dim)
self.bounds = lzip([0] * self.dim, [pi] * self.dim)
self.min_loc = full_min_loc_vec[:dim]
self.fmin = -sum(full_fmin_vec[:dim])
self.fmax = 0.0
self.classifiers = ['boring', 'complicated']
def do_evaluate(self, x):
zh1 = (lambda v: 9 - v[0] - v[1])
zh2 = (lambda v: (v[0] - 3) ** 2 + (v[1] - 2) ** 2 - 16)
zh3 = (lambda v: v[0] * v[1] - 14)
zp = (lambda v: 100 * (1 + v))
px = [
zh1(x),
zp(zh2(x)) * sign(zh2(x)),
zp(zh3(x)) * sign(zh3(x)),
zp(-x[0]) * sign(x[0]),
zp(-x[1]) * sign(x[1])
]
return numpy.fmin(max(px), self.fmax)
# Below are all 1D functions
test_umath.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
test_ufunc.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def sample_v_given_h(self, h, eps=1e-5):
mean_v = self.mean_v.eval(feed_dict={self.hidden: h})
if not self.beta_sampling:
rnds = np.random.randn(mean_v.shape[0], mean_v.shape[1]).astype(h.dtype)
return np.clip(mean_v + rnds * self.sigma, eps, 1. - eps)
mvvm = mean_v * (1 - mean_v)
var_v = np.fmin(mvvm, self.sigma**2)
operand = (mvvm + 1.5 * eps) / (var_v + eps) - 1
alpha = mean_v * operand + eps
beta = (1 - mean_v) * operand + eps
return np.random.beta(alpha, beta).astype(h.dtype)
def sample_h_given_v(self, v, eps=1e-5):
mean_h = self.mean_h.eval(feed_dict={self.visible: v})
if not self.beta_sampling:
rnds = np.random.randn(mean_h.shape[0], mean_h.shape[1]).astype(v.dtype)
return np.clip(mean_h + rnds * self.sigma, eps, 1. - eps)
mhhm = mean_h * (1 - mean_h)
# Handle the cases where h is close to 0.0 or 1.0
# Normally beta distribution will give a sample close to 0.0 or 1.0,
# breaking requirement that there be some variation (sample dispersion
# close to 0.0 when it ought to be close to self.sigma).
small_h = self.sigma**2 > mhhm
small_count = np.sum(small_h)
if small_count:
# We randomize these cases with probability self.sigma.
switch = np.random.rand(small_count) < self.sigma
if np.sum(switch):
mean_h[small_h][switch] = np.random.rand(np.sum(switch))
mhhm = mean_h * (1 - mean_h)
var_h = np.fmin(mhhm, self.sigma**2)
operand = (mhhm + 1.5 * eps) / (var_h + eps) - 1
alpha = mean_h * operand + eps
beta = (1 - mean_h) * operand + eps
return np.random.beta(alpha, beta).astype(v.dtype)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1, 2j]), 2j)
assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1, 2j]), 2j)
assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
def __init__(self, dim, verify=True):
assert dim > 0
self.dim = dim
self.verify = verify
self.num_evals = 0
self.min_loc = None
self.fmin = None
self.local_fmin = []
self.fmax = None
self.bounds = None
self.classifiers = []
# Note(Mike) - Not using the records yet, but will be soon
self.records = None
self.reset_records()
def __init__(self, func, res, verify=True):
assert isinstance(func, TestFunction)
if res <= 0:
raise ValueError('Resolution level must be positive, level={0}'.format(res))
super(Discretizer, self).__init__(func.dim, verify)
self.bounds, self.min_loc = func.bounds, func.min_loc
self.res = res
self.fmax = numpy.floor(self.res * func.fmax) / self.res
self.fmin = numpy.floor(self.res * func.fmin) / self.res
self.func = func
self.classifiers = list(set(self.classifiers) | set(['discrete']))
def __init__(self, func, fail_indicator, return_nan=True, verify=True):
assert isinstance(func, TestFunction)
super(Failifier, self).__init__(func.dim, verify)
self.bounds, self.min_loc, self.fmax, self.fmin = func.bounds, func.min_loc, func.fmax, func.fmin
self.func = func
self.fail_indicator = fail_indicator
self.return_nan = return_nan
self.classifiers = list(set(self.classifiers) | set(['failure']))