def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
python类logaddexp()的实例源码
def get_collision_force(self, entity_a, entity_b):
if (not entity_a.collide) or (not entity_b.collide):
return [None, None] # not a collider
if (entity_a is entity_b):
return [None, None] # don't collide against itself
# compute actual distance between entities
delta_pos = entity_a.state.p_pos - entity_b.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
# minimum allowable distance
dist_min = entity_a.size + entity_b.size
# softmax penetration
k = self.contact_margin
penetration = np.logaddexp(0, -(dist - dist_min)/k)*k
force = self.contact_force * delta_pos / dist * penetration
force_a = +force if entity_a.movable else None
force_b = -force if entity_b.movable else None
return [force_a, force_b]
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
test_ufunc.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def increment(self,logL,nlive=None):
"""
Increment the state of the evidence integrator
Simply uses rectangle rule for initial estimate
"""
if(logL<=self.logLs[-1]):
print('WARNING: NS integrator received non-monotonic logL. {0:.3f} -> {1:.3f}'.format(self.logLs[-1],logL))
if nlive is None:
nlive = self.nlive
oldZ = self.logZ
logt=-1.0/nlive
Wt = self.logw + logL + logsubexp(0,logt)
self.logZ = logaddexp(self.logZ,Wt)
# Update information estimate
if np.isfinite(oldZ) and np.isfinite(self.logZ):
self.info = exp(Wt - self.logZ)*logL + exp(oldZ - self.logZ)*(self.info + oldZ) - self.logZ
# Update history
self.logw += logt
self.iteration += 1
self.logLs.append(logL)
self.log_vols.append(self.logw)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def _expected_durations(self,
dur_potentials,cumulative_obs_potentials,
alphastarl,betal,normalizer):
if self.trunc is not None:
raise NotImplementedError, "_expected_durations can't handle trunc"
T = self.T
logpmfs = -np.inf*np.ones_like(alphastarl)
errs = np.seterr(invalid='ignore')
for t in xrange(T):
cB, offset = cumulative_obs_potentials(t)
np.logaddexp(dur_potentials(t) + alphastarl[t] + betal[t:] +
cB - (normalizer + offset),
logpmfs[:T-t], out=logpmfs[:T-t])
np.seterr(**errs)
expected_durations = np.exp(logpmfs.T)
return expected_durations
# TODO call this 'time homog'
def messages_backwards(self):
# NOTE: np.maximum calls are because the C++ code doesn't do
# np.logaddexp(-inf,-inf) = -inf, it likes nans instead
from hsmm_messages_interface import messages_backwards_log
betal, betastarl = messages_backwards_log(
np.maximum(self.trans_matrix,1e-50),self.aBl,np.maximum(self.aDl,-1000000),
self.aDsl,np.empty_like(self.aBl),np.empty_like(self.aBl),
self.right_censoring,self.trunc if self.trunc is not None else self.T)
assert not np.isnan(betal).any()
assert not np.isnan(betastarl).any()
if not self.left_censoring:
self._normalizer = np.logaddexp.reduce(np.log(self.pi_0) + betastarl[0])
else:
raise NotImplementedError
return betal, betastarl
def _expected_durations(self,
dur_potentials,cumulative_obs_potentials,
alphastarl,betal,normalizer):
logpmfs = -np.inf*np.ones((self.Tfull,alphastarl.shape[1]))
errs = np.seterr(invalid='ignore') # logaddexp(-inf,-inf)
# TODO censoring not handled correctly here
for tblock in xrange(self.Tblock):
possible_durations = self.segmentlens[tblock:].cumsum()[:self.trunc]
cB, offset = cumulative_obs_potentials(tblock)
logpmfs[possible_durations -1] = np.logaddexp(
dur_potentials(tblock) + alphastarl[tblock]
+ betal[tblock:tblock+self.trunc if self.trunc is not None else None]
+ cB - (offset + normalizer),
logpmfs[possible_durations -1])
np.seterr(**errs)
return np.exp(logpmfs.T)
###################
# sparate trans #
###################
def aBl_einsum(self):
if self._aBBl is None:
sigmas = np.array([[c.sigmas for c in d.components] for d in self.obs_distns])
Js = -1./(2*sigmas)
mus = np.array([[c.mu for c in d.components] for d in self.obs_distns])
# all_likes is T x Nstates x Ncomponents
all_likes = \
(np.einsum('td,td,nkd->tnk',self.data,self.data,Js)
- np.einsum('td,nkd,nkd->tnk',self.data,2*mus,Js))
all_likes += (mus**2*Js - 1./2*np.log(2*np.pi*sigmas)).sum(2)
# weights is Nstates x Ncomponents
weights = np.log(np.array([d.weights.weights for d in self.obs_distns]))
all_likes += weights[na,...]
# aBl is T x Nstates
aBl = self._aBl = np.logaddexp.reduce(all_likes, axis=2)
aBl[np.isnan(aBl).any(1)] = 0.
aBBl = self._aBBl = np.empty((self.Tblock,self.num_states))
for idx, (start,stop) in enumerate(self.changepoints):
aBBl[idx] = aBl[start:stop].sum(0)
return self._aBBl
def _expected_statistics_from_messages_slow(trans_potential,likelihood_log_potential,alphal,betal):
expected_states = alphal + betal
expected_states -= expected_states.max(1)[:,na]
np.exp(expected_states,out=expected_states)
expected_states /= expected_states.sum(1)[:,na]
Al = np.log(trans_potential)
log_joints = alphal[:-1,:,na] + (betal[1:,na,:] + likelihood_log_potential[1:,na,:]) + Al[na,...]
log_joints -= log_joints.max((1,2))[:,na,na]
joints = np.exp(log_joints)
joints /= joints.sum((1,2))[:,na,na] # NOTE: renormalizing each isnt really necessary
expected_transcounts = joints.sum(0)
normalizer = np.logaddexp.reduce(alphal[0] + betal[0])
return expected_states, expected_transcounts, normalizer
### EM
def _messages_backwards_log_slow(trans_potential, init_potential, likelihood_log_potential,
feature_weights, window_data):
errs = np.seterr(over='ignore')
Al = np.log(trans_potential)
pil = np.log(init_potential)
aBl = likelihood_log_potential
nhs = trans_potential.shape[0]
sequence_length = aBl.shape[0]
betal = np.zeros((sequence_length, nhs * 2))
giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2))
for t in xrange(betal.shape[0]-2,-1,-1):
temp_constant = np.sum(feature_weights[:-nhs-1] * window_data[t+1,:]) + feature_weights[-1]
temp_exp = temp_constant + feature_weights[-nhs-1:-1]
temp_logaddexp = np.logaddexp(0, temp_exp)
temp_log_linear = np.tile(temp_exp, 2) * np.repeat([0,1], nhs) - np.tile(temp_logaddexp, 2)
np.logaddexp.reduce( giant_Al_pil + betal[t+1] +
np.hstack((aBl[t+1], aBl[t+1])) +
temp_log_linear
,axis=1 ,out=(betal[t]))
np.seterr(**errs)
return betal
def _messages_backwards_log_fast(trans_potential, init_potential, likelihood_log_potential_llt):
errs = np.seterr(over='ignore')
Al = np.log(trans_potential)
pil = np.log(init_potential)
aBl = likelihood_log_potential_llt
nhs = trans_potential.shape[0]
sequence_length = aBl.shape[0]
betal = np.zeros((sequence_length, nhs * 2))
giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2))
for t in xrange(betal.shape[0]-2,-1,-1):
np.logaddexp.reduce( giant_Al_pil + betal[t+1] + aBl[t+1], axis=1, out=(betal[t]))
np.seterr(**errs)
return betal
### Gibbs sampling
def _expected_segmentation_states(init_potential, expected_states, trans_potential, expected_joints,
feature_weights, window_data):
#log_q(s_t) for s_t = 1
data_length = window_data.shape[0]
mega_mat = np.hstack((window_data[:data_length - 1,:], expected_states[:data_length - 1,:]))
temp_1 = np.sum(feature_weights * mega_mat, axis=1)
with np.errstate(invalid='ignore'):
temp_2 = np.sum(np.sum(expected_joints[:data_length - 1,:] * np.log(trans_potential), axis = 1), axis = 1)
log_s_t_1 = temp_1 + temp_2
log_s_t_1 = np.append(log_s_t_1, -float("inf")) #the last state is always zero so the probability of s_t = 1 is zero
#log q(s_t) for s_t = 0
log_s_t_0 = np.sum(expected_states[1:, :] * np.log(init_potential), axis = 1)
log_s_t_0 = np.append(log_s_t_0, 0)
temp_stack = np.hstack((log_s_t_1[:, na], log_s_t_0[:, na])) #number of rows is the length of the sequence
expected_states = np.exp(temp_stack - np.logaddexp.reduce(temp_stack[:,:,na], axis = 1))
return expected_states
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=np.int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features])
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))])
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing],
-np.inf)
return log_proba
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def logaddexp(arr):
"""Computes log(exp(arr[0]) + exp(arr[1]) + ...). """
assert(len(arr) >= 2)
res = np.logaddexp(arr[0], arr[1])
for i in arr[2:]:
res = np.logaddexp(res, i)
return res
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def sigmoid(x):
if x >= 0:
return math.exp(-np.logaddexp(0, -x))
else:
return math.exp(x - np.logaddexp(x, 0))
def sigmoid(x):
return math.exp(-np.logaddexp(0, -x))
test_softmax_cross_entropy.py 文件源码
项目:chainer-segnet
作者: pfnet-research
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def check_forward(self, x_data, t_data, class_weight, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = softmax_cross_entropy.softmax_cross_entropy(
x, t, use_cudnn=use_cudnn, normalize=self.normalize,
cache_score=self.cache_score, class_weight=class_weight)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, self.dtype)
self.assertEqual(hasattr(loss.creator, 'y'), self.cache_score)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
loss_expect = 0.0
count = 0
x = numpy.rollaxis(self.x, 1, self.x.ndim).reshape(
(self.t.size, self.x.shape[1]))
t = self.t.ravel()
for xi, ti in six.moves.zip(x, t):
if ti == -1:
continue
log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
if class_weight is None:
loss_expect -= (xi - log_z)[ti]
else:
loss_expect -= (xi - log_z)[ti] * class_weight[ti]
count += 1
if self.normalize:
if count == 0:
loss_expect = 0.0
else:
loss_expect /= count
else:
loss_expect /= len(t_data)
testing.assert_allclose(
loss_expect, loss_value, **self.check_forward_options)
def integrate_remainder(sampler, logwidth, logVolremaining, logZ, H, globalLmax):
# logwidth remains the same now for each sample
remainder = list(sampler.remainder())
logV = logwidth
L0 = remainder[-1][2]
L0 = globalLmax
logLs = [Li - L0 for ui, xi, Li in remainder]
Ls = numpy.exp(logLs)
LsMax = Ls.copy()
LsMax[-1] = numpy.exp(globalLmax - L0)
Lmax = LsMax[1:].sum(axis=0) + LsMax[-1]
#Lmax = Ls[1:].sum(axis=0) + Ls[-1]
Lmin = Ls[:-1].sum(axis=0) + Ls[0]
logLmid = log(Ls.sum(axis=0)) + L0
logZmid = logaddexp(logZ, logV + logLmid)
logZup = logaddexp(logZ, logV + log(Lmax) + L0)
logZlo = logaddexp(logZ, logV + log(Lmin) + L0)
logZerr = logZup - logZlo
assert numpy.isfinite(H).all()
assert numpy.isfinite(logZerr).all(), logZerr
for i in range(len(remainder)):
ui, xi, Li = remainder[i]
wi = logwidth + Li
logZnew = logaddexp(logZ, wi)
#Hprev = H
H = exp(wi - logZnew) * Li + exp(logZ - logZnew) * (H + logZ) - logZnew
H[H < 0] = 0
#assert (H>0).all(), (H, Hprev, wi, Li, logZ, logZnew)
logZ = logZnew
#assert numpy.isfinite(logZerr + (H / sampler.nlive_points)**0.5), (H, sampler.nlive_points, logZerr)
return logV + logLmid, logZerr, logZmid, logZerr + (H / sampler.nlive_points)**0.5, logZerr + (H / sampler.nlive_points)**0.5
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)