def imax(arrays, axis, ignore_nan = False):
"""
Maximum of a stream of arrays along an axis.
Parameters
----------
arrays : iterable
Arrays to be reduced.
axis : int or None, optional
Axis along which the maximum is found. The default
is to find the maximum along the 'stream axis', as if all arrays in ``array``
were stacked along a new dimension. If ``axis = None``, arrays in ``arrays`` are flattened
before reduction.
ignore_nan : bool, optional
If True, NaNs are ignored. Default is propagation of NaNs.
Yields
------
online_max : ndarray
Cumulative maximum.
"""
ufunc = np.fmax if ignore_nan else np.maximum
yield from ireduce_ufunc(arrays, ufunc, axis)
python类fmax()的实例源码
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def fea_plot(xg_model, feature, label, type = 'weight', max_num_features = None):
fig, AX = plt.subplots(nrows=1, ncols=2)
xgb.plot_importance(xg_model, xlabel=type, importance_type='weight', ax=AX[0], max_num_features=max_num_features)
fscore = xg_model.get_score(importance_type=type)
fscore = sorted(fscore.items(), key=itemgetter(1), reverse=True) # sort scores
fea_index = get_fea_index(fscore, max_num_features)
feature = feature[:, fea_index]
dimension = len(fea_index)
X = range(1, dimension+1)
Yp = np.mean(feature[np.where(label==1)[0]], axis=0)
Yn = np.mean(feature[np.where(label!=1)[0]], axis=0)
for i in range(0, dimension):
param = np.fmax(Yp[i], Yn[i])
Yp[i] /= param
Yn[i] /= param
p1 = AX[1].bar(X, +Yp, facecolor='#ff9999', edgecolor='white')
p2 = AX[1].bar(X, -Yn, facecolor='#9999ff', edgecolor='white')
AX[1].legend((p1,p2), ('Malware', 'Normal'))
AX[1].set_title('Comparison of selected features by their means')
AX[1].set_xlabel('Feature Index')
AX[1].set_ylabel('Mean Value')
AX[1].set_ylim(-1.1, 1.1)
plt.xticks(X, fea_index+1, rotation=80)
plt.suptitle('Feature Selection results')
def test_obj_value_points_correctly_class_far_from_hyperplane(self):
bias = 0.0
w = np.array([-1, 1, bias])
l = self.svm.l2reg
X = np.array([[5, 0.3], [1, -0.8], [1, 6], [-0.6, 3]])
Y = np.array([-1, -1, 1, 1])
# compute loss for all X -> 1-yi*(xi*w+b)
out = np.fmax(0, 1 - Y * (X.dot(w[0:-1]) + w[-1]))
expectedObj = 1.0
result, _ = self.svm._obj_func(w, X, Y, out)
self.assertAlmostEqual(expectedObj, result)
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
test_umath.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
test_ufunc.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def variability_prob(ndvi, ndsi, whiteness):
"""Use the probability of the spectral variability
to identify clouds over land.
Equation 15 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
ndsi: ndarray
whiteness: ndarray
Output
------
ndarray :
probability of cloud over land based on variability
"""
ndi_max = np.fmax(np.absolute(ndvi), np.absolute(ndsi))
f_max = 1.0 - np.fmax(ndi_max, whiteness)
return f_max
# Eq 16, land_cloud_prob
# lCloud_Prob = lTemperature_Prob x Variability_Prob
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1, 2j]), 1)
assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
def _obj_func(self, w, X, Y, out):
"""
Computes primal value end gradient
Parameters
----------
w : {array-like} - hyperplane normal vector
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Y : array-like, shape = [n_samples]
Target vector relative to X
out: loss function values
Returns
-------
(obj,grad) : tuple, obj - function value, grad - gradient
"""
l2reg = self.l2reg
# we remember bias, to recover it after gradient computation
bias = w[-1]
# set bias to zero, don't penalize b
w[-1] = 0
max_out = np.fmax(0, out)
obj = np.sum(max_out ** 2) / 2 + l2reg * w.dot(w) / 2
grad = l2reg * w - np.append([np.dot(max_out * Y, X)], [np.sum(max_out * Y)])
w[-1] = bias
return (obj, grad)
def test_obj_value_points_correctly_class_close_to_hyperplane(self):
bias = 0.0
w = np.array([-1, 1, bias])
l = self.svm.l2reg
X = np.array([[0.5, 0.3], [1, 0.8], [1, 1.4], [0.6, 0.9]])
Y = np.array([-1, -1, 1, 1])
# compute loss for all X -> 1-yi*(xi*w+b)
out = np.fmax(0, 1 - Y * (X.dot(w[0:-1]) + w[-1]))
expectedObj = 2.0650000000000004
result, _ = self.svm._obj_func(w, X, Y, out)
self.assertAlmostEqual(expectedObj, result)
def test_obj_grad_points(self):
bias = 0.0
w = np.array([-1, 1, bias])
l = self.svm.l2reg
X = np.array([[0.5, 0.3], [1, 0.8], [1, 1.4], [0.6, 0.9]])
Y = np.array([-1, -1, 1, 1])
# compute loss for all X -> 1-yi*(xi*w+b)
out = np.fmax(0, 1 - Y * (X.dot(w[0:-1]) + w[-1]))
expected = np.array([-0.82, 0.41, 0.3])
(obj, grad) = self.svm._obj_func(w, X, Y, out)
np.testing.assert_array_almost_equal(expected, grad)
def _scale_cosine_similarity(x, metric='cosine', inverse=False):
""" Given a cosine similarity on L2 normalized data,
appriximately convert it to Jaccard similarity, and/or
normalize it to the [0, 1] interval
Parameters
----------
x : {float, ndarray}
the cosine similarity value
metric : str
the conversion to apply one of ['cosine', 'jaccard']
inverse : bool
perform the inverse de-normalization operation
"""
valid_metrics = ['cosine', 'jaccard', 'cosine_norm', 'jaccard_norm',
'cosine-positive']
if metric not in valid_metrics:
raise ValueError('metric {} not supported, must be in {}'
.format(metric, valid_metrics))
if metric == 'cosine':
return x
elif metric == 'cosine-positive':
if isinstance(x, (int, float)):
return max(x, 0.0)
else:
return np.fmax(x, 0.0)
if metric.startswith('jaccard'):
if not inverse:
x = cosine2jaccard_similarity(x)
else:
x = jaccard2cosine_similarity(x)
if metric.endswith('norm'):
x = _normalize_similarity(x, metric=metric.split('_')[0],
inverse=inverse)
return x
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1, 2j]), 1)
assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
def forward_cpu(self, inputs):
x, = inputs
# y = log(1 + exp(beta * x)) / beta
bx = self.beta * x
y = (numpy.fmax(bx, 0) +
numpy.log1p(numpy.exp(-numpy.fabs(bx)))) * self.beta_inv
return utils.force_array(y.astype(x.dtype)),
test_umath.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1, 2j]), 1)
assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)