def logpdf(self, samples):
'''
Calculates the log of the probability density function.
Parameters
----------
samples : array_like
n-by-2 matrix of samples where n is the number of samples.
Returns
-------
vals : ndarray
Log of the probability density function evaluated at `samples`.
'''
samples = np.copy(np.asarray(samples))
samples = self.__rotate_input(samples)
inner = np.all(np.bitwise_and(samples > 0.0, samples < 1.0), axis=1)
outer = np.invert(inner)
vals = np.zeros(samples.shape[0])
vals[inner] = self._logpdf(samples[inner, :])
# Assign zero mass to border
vals[outer] = -np.inf
return vals
python类inf()的实例源码
def _logcdf(self, samples):
lower = np.full(2, -np.inf)
upper = norm.ppf(samples)
limit_flags = np.zeros(2)
if upper.shape[0] > 0:
def func1d(upper1d):
'''
Calculates the multivariate normal cumulative distribution
function of a single sample.
'''
return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1]
vals = np.apply_along_axis(func1d, -1, upper)
else:
vals = np.empty((0, ))
old_settings = np.seterr(divide='ignore')
vals = np.log(vals)
np.seterr(**old_settings)
vals[np.any(samples == 0.0, axis=1)] = -np.inf
vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1])
vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0])
return vals
def munch(self, data, panel_params):
ranges = self.range(panel_params)
data.loc[data['x'] == -np.inf, 'x'] = ranges.x[0]
data.loc[data['x'] == np.inf, 'x'] = ranges.x[1]
data.loc[data['y'] == -np.inf, 'y'] = ranges.y[0]
data.loc[data['y'] == np.inf, 'y'] = ranges.y[1]
dist = self.distance(data['x'], data['y'], panel_params)
bool_idx = data['group'].iloc[1:].values != \
data['group'].iloc[:-1].values
dist[bool_idx] = np.nan
# Munch
munched = munch_data(data, dist)
return munched
def test_remove_missing():
df = pd.DataFrame({'a': [1.0, np.NaN, 3, np.inf],
'b': [1, 2, 3, 4]})
df2 = pd.DataFrame({'a': [1.0, 3, np.inf],
'b': [1, 3, 4]})
df3 = pd.DataFrame({'a': [1.0, 3],
'b': [1, 3]})
with warnings.catch_warnings(record=True) as w:
res = remove_missing(df, na_rm=True, vars=['b'])
res.equals(df)
res = remove_missing(df)
res.equals(df2)
res = remove_missing(df, na_rm=True, finite=True)
res.equals(df3)
assert len(w) == 1
def test_removes_infinite_values():
df = mtcars.copy()
df.loc[[0, 5], 'wt'] = [np.inf, -np.inf]
p = ggplot(df, aes(x='wt')) + geom_bar()
with pytest.warns(UserWarning) as record:
p._build()
def removed_2_row_with_infinites(record):
for item in record:
msg = str(item.message).lower()
if '2 rows' in msg and 'non-finite' in msg:
return True
return False
assert removed_2_row_with_infinites(record)
def process(self, **kwargs):
"""Process module."""
self._times = kwargs[self.key('dense_times')]
self._alpha = kwargs[self.key('alpha')]
self._beta = kwargs[self.key('beta')]
self._t_peak = kwargs[self.key('tpeak')]
self._lum_scale = kwargs[self.key('lumscale')]
self._rest_t_explosion = kwargs[self.key('resttexplosion')]
ts = [
np.inf
if self._rest_t_explosion > x else (x - self._rest_t_explosion)
for x in self._times
]
luminosities = [
self._lum_scale * (1.0 - np.exp(-t / self._t_peak)) **
self._alpha * (t / self._t_peak) ** (-self._beta) for t in ts
]
luminosities = [0.0 if isnan(x) else x for x in luminosities]
return {self.dense_key('luminosities'): luminosities}
def process(self, **kwargs):
"""Process module."""
self._times = kwargs[self.key('dense_times')]
self._mnickel = kwargs[self.key('fnickel')] * kwargs[
self.key('mejecta')]
self._rest_t_explosion = kwargs[self.key('resttexplosion')]
# From 1994ApJS...92..527N
ts = [
np.inf
if self._rest_t_explosion > x else (x - self._rest_t_explosion)
for x in self._times
]
luminosities = [
self._mnickel * (self.NI56_LUM * np.exp(-t / self.NI56_LIFE) +
self.CO56_LUM * np.exp(-t / self.CO56_LIFE))
for t in ts
]
luminosities = [0.0 if isnan(x) else x for x in luminosities]
return {self.dense_key('luminosities'): luminosities}
def pick_n_hidden(data, repeat=1, verbose=False, **kwargs):
"""A helper function to pick the number of hidden factors / clusters to use."""
# TODO: Use an efficient search strategy
max_score = - np.inf
n = 1
all_scores = []
while True:
scores = []
for _ in range(repeat):
out = Corex(n_hidden=n, gpu=False, **kwargs).fit(data)
m = out.moments
scores.append(m["TC_no_overlap"])
score = max(scores)
if verbose:
print(("n: {}, score: {}".format(n, score)))
all_scores.append((score, n))
if score < 0.95 * max_score:
break
else:
n += 1
if score > max_score:
max_score = score
return all_scores
def help_generate_np_gives_adversarial_example(self, ord):
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=.5, ord=ord,
clip_min=-5, clip_max=5)
if ord == np.inf:
delta = np.max(np.abs(x_adv - x_val), axis=1)
elif ord == 1:
delta = np.sum(np.abs(x_adv - x_val), axis=1)
elif ord == 2:
delta = np.sum(np.square(x_adv - x_val), axis=1)**.5
self.assertClose(delta, 0.5)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.5)
def test_attack_strength(self):
"""
If clipping is not done at each iteration (not passing clip_min and
clip_max to fgm), this attack fails by
np.mean(orig_labels == new_labels) == .39.
"""
x_val = np.random.rand(100, 2)
x_val = np.array(x_val, dtype=np.float32)
x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
clip_min=0.5, clip_max=0.7,
nb_iter=5)
orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
def ExpM(self):
"""
Approximate a signal via element-wise exponentiation. As appears in :
S.I. Mimilakis, K. Drossos, T. Virtanen, and G. Schuller,
"Deep Neural Networks for Dynamic Range Compression in Mastering Applications,"
in proc. of the 140th Audio Engineering Society Convention, Paris, 2016.
Args:
sTarget: (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component
Returns:
mask: (2D ndarray) Array that contains time frequency gain values
"""
print('Exponential mask')
self._mask = np.divide(np.log(self._sTarget.clip(self._eps, np.inf)**self._alpha),\
np.log(self._nResidual.clip(self._eps, np.inf)**self._alpha))
def prior_contribution_phylogeny_parameters(self, state):
""" Evaluate prior probability of phylogeny mean/std
Log scale.
"""
mean_prior = scipy.stats.norm.logpdf(
state.phylogeny_mean,
loc = self.phylogeny_lambda_l,
scale = np.sqrt(self.phylogeny_mean_hyperprior_variance)
)
if (0. <= state.phylogeny_std and
state.phylogeny_std <= self.phylogeny_std_upper_bound):
std_prior = -1.0*np.log(self.phylogeny_std_upper_bound)
else:
std_prior = -np.inf
return mean_prior + std_prior
def getError(self):
#Test function
fx = lambda x, y: np.sin(2*np.pi*x)
fy = lambda x, y: np.sin(2*np.pi*y)
sol = lambda x, y: 2*np.pi*(np.cos(2*np.pi*x)+np.cos(2*np.pi*y))
Fc = cartF2(self.M, fx, fy)
F = self.M.projectFaceVector(Fc)
divF = self.M.faceDiv.dot(F)
divF_ana = call2(sol, self.M.gridCC)
err = np.linalg.norm((divF-divF_ana), np.inf)
# self.M.plotImage(divF-divF_ana, showIt=True)
return err
def getError(self):
# fun: i (cos(y)) + j (cos(z)) + k (cos(x))
# sol: i (sin(z)) + j (sin(x)) + k (sin(y))
funX = lambda x, y, z: np.cos(2*np.pi*y)
funY = lambda x, y, z: np.cos(2*np.pi*z)
funZ = lambda x, y, z: np.cos(2*np.pi*x)
solX = lambda x, y, z: 2*np.pi*np.sin(2*np.pi*z)
solY = lambda x, y, z: 2*np.pi*np.sin(2*np.pi*x)
solZ = lambda x, y, z: 2*np.pi*np.sin(2*np.pi*y)
Ec = cartE3(self.M, funX, funY, funZ)
E = self.M.projectEdgeVector(Ec)
Fc = cartF3(self.M, solX, solY, solZ)
curlE_ana = self.M.projectFaceVector(Fc)
curlE = self.M.edgeCurl.dot(E)
err = np.linalg.norm((curlE - curlE_ana), np.inf)
# err = np.linalg.norm((curlE - curlE_ana)*self.M.area, 2)
return err
def getError(self):
#Test function
fun = lambda x, y, z: (np.cos(x)+np.cos(y)+np.cos(z))
# i (sin(x)) + j (sin(y)) + k (sin(z))
solX = lambda x, y, z: -np.sin(x)
solY = lambda x, y, z: -np.sin(y)
solZ = lambda x, y, z: -np.sin(z)
phi = call3(fun, self.M.gridN)
gradE = self.M.nodalGrad.dot(phi)
Ec = cartE3(self.M, solX, solY, solZ)
gradE_ana = self.M.projectEdgeVector(Ec)
err = np.linalg.norm((gradE-gradE_ana), np.inf)
return err
def getError(self):
#Test function
fun = lambda x, y: (np.cos(x)+np.cos(y))
# i (sin(x)) + j (sin(y)) + k (sin(z))
solX = lambda x, y: -np.sin(x)
solY = lambda x, y: -np.sin(y)
phi = call2(fun, self.M.gridN)
gradE = self.M.nodalGrad.dot(phi)
Ec = cartE2(self.M, solX, solY)
gradE_ana = self.M.projectEdgeVector(Ec)
err = np.linalg.norm((gradE-gradE_ana), np.inf)
return err
def getError(self):
funR = lambda r, z: np.sin(2.*np.pi*r)
funZ = lambda r, z: np.sin(2.*np.pi*z)
sol = lambda r, t, z: (2*np.pi*r*np.cos(2*np.pi*r) + np.sin(2*np.pi*r))/r + 2*np.pi*np.cos(2*np.pi*z)
Fc = cylF2(self.M, funR, funZ)
Fc = np.c_[Fc[:, 0], np.zeros(self.M.nF), Fc[:, 1]]
F = self.M.projectFaceVector(Fc)
divF = self.M.faceDiv.dot(F)
divF_ana = call3(sol, self.M.gridCC)
err = np.linalg.norm((divF-divF_ana), np.inf)
return err
def getError(self):
funR = lambda r, z: np.sin(2.*np.pi*z) * np.cos(np.pi*r)
funZ = lambda r, z: np.sin(3.*np.pi*z) * np.cos(2.*np.pi*r)
Fc = cylF2(self.M, funR, funZ)
Fc = np.c_[Fc[:, 0], np.zeros(self.M.nF), Fc[:, 1]]
F = self.M.projectFaceVector(Fc)
aveF = self.M.aveF2CCV * F
aveF_anaR = funR(self.M.gridCC[:, 0], self.M.gridCC[:, 2])
aveF_anaZ = funZ(self.M.gridCC[:, 0], self.M.gridCC[:, 2])
aveF_ana = np.hstack([aveF_anaR, aveF_anaZ])
err = np.linalg.norm((aveF-aveF_ana), np.inf)
return err
def getError(self):
#Test function
fx = lambda x: -2*np.pi*np.sin(2*np.pi*x)
sol = lambda x: np.cos(2*np.pi*x)
xc = sol(self.M.gridCC)
gradX_ana = fx(self.M.gridFx)
bc = np.array([1,1])
self.M.setCellGradBC('dirichlet')
gradX = self.M.cellGrad.dot(xc) + self.M.cellGradBC*bc
err = np.linalg.norm((gradX-gradX_ana), np.inf)
return err
def getError(self):
#Test function
fx = lambda x, y: 2*np.pi*np.cos(2*np.pi*x)*np.sin(2*np.pi*y)
fy = lambda x, y: 2*np.pi*np.cos(2*np.pi*y)*np.sin(2*np.pi*x)
sol = lambda x, y: np.sin(2*np.pi*x)*np.sin(2*np.pi*y)
xc = call2(sol, self.M.gridCC)
Fc = cartF2(self.M, fx, fy)
gradX_ana = self.M.projectFaceVector(Fc)
self.M.setCellGradBC('dirichlet')
gradX = self.M.cellGrad.dot(xc)
err = np.linalg.norm((gradX-gradX_ana), np.inf)
return err
def _ncc_c(x, y):
"""
>>> _ncc_c([1,2,3,4], [1,2,3,4])
array([ 0.13333333, 0.36666667, 0.66666667, 1. , 0.66666667,
0.36666667, 0.13333333])
>>> _ncc_c([1,1,1], [1,1,1])
array([ 0.33333333, 0.66666667, 1. , 0.66666667, 0.33333333])
>>> _ncc_c([1,2,3], [-1,-1,-1])
array([-0.15430335, -0.46291005, -0.9258201 , -0.77151675, -0.46291005])
"""
den = np.array(norm(x) * norm(y))
den[den == 0] = np.Inf
x_len = len(x)
fft_size = 1<<(2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size)))
cc = np.concatenate((cc[-(x_len-1):], cc[:x_len]))
return np.real(cc) / den
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in range(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in range(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def SLcomputeSNR(X, Xnoisy):
"""
SLcomputeSNR Compute signal to noise ratio (SNR).
Usage:
SNR = SLcomputeSNR(X, Xnoisy)
Input:
X: 2D or 3D signal.
Xnoisy: 2D or 3D noisy signal.
Output:
SNR: The signal to noise ratio (in dB).
"""
if np.linalg.norm(X-Xnoisy) == 0:
return np.Inf
else:
return 10 * np.log10( np.sum(np.power(X,2)) / np.sum(np.power(X-Xnoisy,2)) )
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in range(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in range(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def __init__(self,shape,z0rep_axes=(0,), z1rep_axes=(0,), map_est=False):
Estim.__init__(self)
self.shape = shape
ndim = len(shape)
if z0rep_axes == 'all':
z0rep_axes = tuple(range(ndim))
if z1rep_axes == 'all':
z1rep_axes = tuple(range(ndim))
self.z0rep_axes = z0rep_axes
self.z1rep_axes = z1rep_axes
self.cost_avail = True
self.map_est = map_est
# Initial variances
self.zvar0_init= np.Inf
self.zvar1_init= np.Inf
def __init__(self,y,shape,zrep_axes=(0,),thresh=0,perr=1e-6,\
var_init=np.Inf):
Estim.__init__(self)
self.y = y
self.shape = shape
self.thresh = thresh
self.perr = perr
self.cost_avail = True
self.var_init = var_init
# Set the repetition axes
ndim = len(self.shape)
if zrep_axes == 'all':
zrep_axes = tuple(range(ndim))
self.zrep_axes = zrep_axes
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in range(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in range(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def test_invalid_nbins():
with raises(ValueError):
ew = graynet.extract(subject_id_list, fs_dir, num_bins=np.NaN)
with raises(ValueError):
ew = graynet.extract(subject_id_list, fs_dir, num_bins=np.Inf)
with raises(ValueError):
ew = graynet.extract(subject_id_list, fs_dir, num_bins=2)
# test_multi_edge()
# test_multi_edge_CLI()
# test_empty_subject_list()
# test_run_no_IO()
# test_run_roi_stats_via_API()
# test_run_roi_stats_via_CLI()
# test_CLI_only_weight_or_stats()
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in xrange(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in xrange(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)