def matrix_rank(M, tol=None):
"""Return matrix rank of array using SVD method
Args:
M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to
2.
tol (None or float): Threshold of singular value of `M`.
When `tol` is `None`, and `eps` is the epsilon value for datatype
of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,
where `S` is the singular value of `M`.
It obeys :func:`numpy.linalg.matrix_rank`.
Returns:
cupy.ndarray: Rank of `M`.
.. seealso:: :func:`numpy.linalg.matrix_rank`
"""
if M.ndim < 2:
return (M != 0).any().astype('l')
S = decomposition.svd(M, compute_uv=False)
if tol is None:
tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *
numpy.finfo(S.dtype).eps)
return (S > tol).sum(axis=-1)
python类finfo()的实例源码
def test_eos_masking(self):
probs = tf.constant([[-.2, -.2, -.2, -.2, -.2], [-.3, -.3, -.3, 3, 0],
[5, 6, 0, 0, 0]])
eos_token = 0
previously_finished = tf.constant([0, 1, 0], dtype=tf.float32)
masked = beam_search.mask_probs(probs, eos_token, previously_finished)
with self.test_session() as sess:
probs = sess.run(probs)
masked = sess.run(masked)
np.testing.assert_array_equal(probs[0], masked[0])
np.testing.assert_array_equal(probs[2], masked[2])
np.testing.assert_equal(masked[1][0], 0)
np.testing.assert_approx_equal(masked[1][1], np.finfo('float32').min)
np.testing.assert_approx_equal(masked[1][2], np.finfo('float32').min)
np.testing.assert_approx_equal(masked[1][3], np.finfo('float32').min)
np.testing.assert_approx_equal(masked[1][4], np.finfo('float32').min)
def EStep(self):
P = np.zeros((self.M, self.N))
for i in range(0, self.M):
diff = self.X - np.tile(self.TY[i, :], (self.N, 1))
diff = np.multiply(diff, diff)
P[i, :] = P[i, :] + np.sum(diff, axis=1)
c = (2 * np.pi * self.sigma2) ** (self.D / 2)
c = c * self.w / (1 - self.w)
c = c * self.M / self.N
P = np.exp(-P / (2 * self.sigma2))
den = np.sum(P, axis=0)
den = np.tile(den, (self.M, 1))
den[den==0] = np.finfo(float).eps
self.P = np.divide(P, den)
self.Pt1 = np.sum(self.P, axis=0)
self.P1 = np.sum(self.P, axis=1)
self.Np = np.sum(self.P1)
def mean_variance_normalisation(h5f, mvn_h5f, vad=None):
"""Do mean variance normlization. Optionnaly use a vad.
Parameters:
----------
h5f: str. h5features file name
mvn_h5f: str, h5features output name
"""
dset = h5py.File(h5f).keys()[0]
if vad is not None:
raise NotImplementedError
else:
data = h5py.File(h5f)[dset]['features'][:]
features = data
epsilon = np.finfo(data.dtype).eps
mean = np.mean(data)
std = np.std(data)
mvn_features = (features - mean) / (std + epsilon)
shutil.copy(h5f, mvn_h5f)
h5py.File(mvn_h5f)[dset]['features'][:] = mvn_features
def test_two_factors(self):
"""Tests the alias list for two identical factors."""
factor_data = [
[-1, -1],
[-1, -1],
[1, 1],
[1, 1],
[0, 0]
]
factor_names = design.get_factor_names(len(factor_data[0]))
factor_data = pd.DataFrame(factor_data, columns=factor_names)
aliases, alias_coefs = alias_list("X1+X2", factor_data)
answer = [[1, 0, 0], [0, 1, 1]]
np.testing.assert_allclose(alias_coefs,
answer,
rtol=1e-4,
atol=np.finfo(float).eps)
answer_list = ["X1 = X2"]
self.assertEqual(answer_list, aliases)
def __init__(self, mX, sTarget, nResidual, psTarget = [], pnResidual = [], alpha = 1.2, method = 'Wiener'):
self._mX = mX
self._eps = np.finfo(np.float).eps
self._sTarget = sTarget
self._nResidual = nResidual
self._pTarget = psTarget
self._pY = pnResidual
self._mask = []
self._Out = []
self._alpha = alpha
self._method = method
self._iterations = 200
self._lr = 1.5e-3#2e-3
self._hetaplus = 1.1
self._hetaminus = 0.1
self._amountiter = 0
def xover(rate):
"""
This is a mimic of a fwdpp
recombination policy.
We return a sorted list of breakpoints
on the interval [0,1). The list is capped
with the max value of a float (C/C++ double),
which is a trick fwdpp uses.
It happens that we generate the exact same value
from time to time. Internall, fwdpp doesn't care,
and recoginizes that as a "double x-over". However,
msprime cares, b/c it results in an edge with
left == right and an Exception gets raised. So,
we purge out double x-overs via np.unique.
"""
nbreaks = np.random.poisson(rate)
if nbreaks == 0:
return np.empty([0], dtype=np.float)
rv = np.random.random_sample(nbreaks)
rv = np.unique(rv)
rv = np.insert(rv, len(rv), np.finfo(np.float).max)
return rv
def test_against_cmath(self):
import cmath
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
# Values from http://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_complex128_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
# The test condition needs to be at least a factor of sqrt(2) smaller
# because the real and imaginary parts both change
y = x + x*eps*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x - x*epsneg*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def negativeLogLikelihoodWeighted(self, y, weightPerClass):
#Weighting the cost of the different classes in the cost-function, in order to counter class imbalance.
e1 = np.finfo(np.float32).tiny
addTinyProbMatrix = T.lt(self.p_y_given_x_train, 4*e1) * e1
weights = weightPerClass.dimshuffle('x', 0, 'x', 'x', 'x')
log_p_y_given_x_train = T.log(self.p_y_given_x_train + addTinyProbMatrix)
weighted_log_probs = log_p_y_given_x_train * weights
wShape = weighted_log_probs.shape
# Re-arrange
idx0 = T.arange( wShape[0] ).dimshuffle( 0, 'x','x','x')
idx2 = T.arange( wShape[2] ).dimshuffle('x', 0, 'x','x')
idx3 = T.arange( wShape[3] ).dimshuffle('x','x', 0, 'x')
idx4 = T.arange( wShape[4] ).dimshuffle('x','x','x', 0)
return -T.mean( weighted_log_probs[ idx0, y, idx2, idx3, idx4] )
cochleagram_extractor.py 文件源码
项目:speech_feature_extractor
作者: ZhihaoDU
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def cochleagram_extractor(xx, sr, win_len, shift_len, channel_number, win_type):
fcoefs, f = make_erb_filters(sr, channel_number, 50)
fcoefs = np.flipud(fcoefs)
xf = erb_frilter_bank(xx, fcoefs)
if win_type == 'hanning':
window = np.hanning(channel_number)
elif win_type == 'hamming':
window = np.hamming(channel_number)
elif win_type == 'triangle':
window = (1 - (np.abs(channel_number - 1 - 2 * np.arange(1, channel_number + 1, 1)) / (channel_number + 1)))
else:
window = np.ones(channel_number)
window = window.reshape((channel_number, 1))
xe = np.power(xf, 2.0)
frames = 1 + ((np.size(xe, 1)-win_len) // shift_len)
cochleagram = np.zeros((channel_number, frames))
for i in range(frames):
one_frame = np.multiply(xe[:, i*shift_len:i*shift_len+win_len], np.repeat(window, win_len, 1))
cochleagram[:, i] = np.sqrt(np.mean(one_frame, 1))
cochleagram = np.where(cochleagram == 0.0, np.finfo(float).eps, cochleagram)
return cochleagram
def rasta_plp_extractor(x, sr, plp_order=0, do_rasta=True):
spec = log_power_spectrum_extractor(x, int(sr*0.02), int(sr*0.01), 'hamming', False)
bark_filters = int(np.ceil(freq2bark(sr//2)))
wts = get_fft_bark_mat(sr, int(sr*0.02), bark_filters)
bark_spec = np.matmul(wts, spec)
if do_rasta:
bark_spec = np.where(bark_spec == 0.0, np.finfo(float).eps, bark_spec)
log_bark_spec = np.log(bark_spec)
rasta_log_bark_spec = rasta_filt(log_bark_spec)
bark_spec = np.exp(rasta_log_bark_spec)
post_spec = postaud(bark_spec, sr/2.)
if plp_order > 0:
lpcas = do_lpc(post_spec, plp_order)
else:
lpcas = post_spec
return lpcas
def __init__(self, agent, env, n_runs, eval_interval,
outdir, max_episode_len=None, explorer=None,
step_offset=0, logger=None):
self.agent = agent
self.env = env
self.max_score = np.finfo(np.float32).min
self.start_time = time.time()
self.n_runs = n_runs
self.eval_interval = eval_interval
self.outdir = outdir
self.max_episode_len = max_episode_len
self.explorer = explorer
self.step_offset = step_offset
self.prev_eval_t = (self.step_offset -
self.step_offset % self.eval_interval)
self.logger = logger or logging.getLogger(__name__)
# Write a header line first
with open(os.path.join(self.outdir, 'scores.txt'), 'w') as f:
custom_columns = tuple(t[0] for t in self.agent.get_statistics())
column_names = _basic_columns + custom_columns
print('\t'.join(column_names), file=f)
def __init__(self, n_runs, eval_interval,
outdir, max_episode_len=None, explorer=None,
step_offset=0, logger=None):
self.start_time = time.time()
self.n_runs = n_runs
self.eval_interval = eval_interval
self.outdir = outdir
self.max_episode_len = max_episode_len
self.explorer = explorer
self.step_offset = step_offset
self.logger = logger or logging.getLogger(__name__)
# Values below are shared among processes
self.prev_eval_t = mp.Value(
'l', self.step_offset - self.step_offset % self.eval_interval)
self._max_score = mp.Value('f', np.finfo(np.float32).min)
self.wrote_header = mp.Value('b', False)
# Create scores.txt
with open(os.path.join(self.outdir, 'scores.txt'), 'a'):
pass
def _calculate_new_weights(self, instance_probabilites, bag_probabilities):
weights = []
for p_ij, p_i, Y_i in zip(self._bag_split(instance_probabilites),
bag_probabilities,
self._bag_labels):
if Y_i > 0:
if p_i == 0.0:
p_i = np.finfo(float).resolution
term_1 = (2 * p_ij * (1 - p_ij)) / p_i
else:
if p_i == 1.0:
p_i = 1 - np.finfo(float).resolution
term_1 = -((2 * p_ij * (1 - p_ij)) / (1 - p_i))
weights += (term_1 * self.softmax_fcn.d_dt(p_ij)).tolist()
return np.array(weights) / np.sum(np.abs(weights))
def randn_abs_clip(self, axes, clip_min=0, clip_max=0, dtype=None):
"""
Returns a tensor initialized with a absolute value of normal distribution
with mean 0 and std 1 clipped to given range
Arguments:
axes: The axes of the tensor.
clip_min: If supplied number below this value are clipped to this value
clip_max: If supplied number above this value are clipped to this value
dtype: If supplied, the type of the values.
Returns:
The initialized tensor.
"""
if dtype is None:
dtype = self.dtype
if clip_max == 0:
clip_max = np.finfo(dtype).max
return np.clip(np.absolute(np.random.randn(*axes.lengths)), clip_min, clip_max)
def test_eos_masking(self):
probs = tf.constant([[-.2, -.2, -.2, -.2, -.2], [-.3, -.3, -.3, 3, 0],
[5, 6, 0, 0, 0]])
eos_token = 0
previously_finished = tf.constant([0, 1, 0], dtype=tf.float32)
masked = beam_search.mask_probs(probs, eos_token, previously_finished)
with self.test_session() as sess:
probs = sess.run(probs)
masked = sess.run(masked)
np.testing.assert_array_equal(probs[0], masked[0])
np.testing.assert_array_equal(probs[2], masked[2])
np.testing.assert_equal(masked[1][0], 0)
np.testing.assert_approx_equal(masked[1][1], np.finfo('float32').min)
np.testing.assert_approx_equal(masked[1][2], np.finfo('float32').min)
np.testing.assert_approx_equal(masked[1][3], np.finfo('float32').min)
np.testing.assert_approx_equal(masked[1][4], np.finfo('float32').min)
def __init__(
self, initial_params=None, variance=1.0, covariance=None,
n_samples_per_update=None, active=False, bounds=None, maximize=True,
min_variance=2 * np.finfo(np.float).eps ** 2,
min_fitness_dist=2 * np.finfo(np.float).eps, max_condition=1e7,
log_to_file=False, log_to_stdout=False, random_state=None):
self.initial_params = initial_params
self.variance = variance
self.covariance = covariance
self.n_samples_per_update = n_samples_per_update
self.active = active
self.bounds = bounds
self.maximize = maximize
self.min_variance = min_variance
self.min_fitness_dist = min_fitness_dist
self.max_condition = max_condition
self.log_to_file = log_to_file
self.log_to_stdout = log_to_stdout
self.random_state = random_state
def logistic_regression_cost_gradient(parameters, input, output):
"""
Cost and gradient for logistic regression
:param parameters: weight vector
:param input: feature vector
:param output: binary label (0 or 1)
:return: cost and gradient for the input and output
"""
prediction = expit(np.dot(input, parameters))
if output:
inside_log = prediction
else:
inside_log = 1.0 - prediction
if inside_log != 0.0:
cost = -np.log(inside_log)
else:
cost = np.finfo(float).min
gradient = (prediction - output) * input
return cost, gradient
def compute_overlap(a, b):
"""
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0] + 1) * (b[:, 3] - b[:, 1] + 1)
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0]) + 1
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1]) + 1
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0] + 1) * (a[:, 3] - a[:, 1] + 1), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
def configureActions(self, discrete_actions):
# true if action space is discrete; 3 values; no push, left, right
# false if action space is continuous; fx, both (-action_force, action_force)
self.discrete_actions = discrete_actions
# 3 discrete actions: no push, left, right
# 1 continuous action elements; fx
if self.discrete_actions:
self.action_space = spaces.Discrete(3)
else:
self.action_space = spaces.Box(-1.0, 1.0, shape=(1, 1))
# Our observations can be within this box
float_max = np.finfo(np.float32).max
self.observation_space = gym.spaces.Box(-float_max, float_max, self.state_shape)
def configureActions(self, discrete_actions):
# true if action space is discrete; 5 values; no push, left, right, up & down
# false if action space is continuous; fx, fy both (-action_force, action_force)
self.discrete_actions = discrete_actions
# 5 discrete actions: no push, left, right
# 2 continuous action elements; fx & fy
if self.discrete_actions:
self.action_space = spaces.Discrete(5)
else:
self.action_space = spaces.Box(-1.0, 1.0, shape=(2,))
# Our observations can be within this box
float_max = np.finfo(np.float32).max
self.observation_space = gym.spaces.Box(-float_max, float_max, self.state_shape)
def __init__(self, model_xml, robot_name, timestep, frame_skip, action_dim, obs_dim, repeats):
self.action_space = gym.spaces.Box(-1.0, 1.0, shape=(action_dim,))
float_max = np.finfo(np.float32).max
# obs space for problem is (R, obs_dim)
# R = number of repeats
# obs_dim d tuple
self.state_shape = (repeats, obs_dim)
self.observation_space = gym.spaces.Box(-float_max, float_max, shape=self.state_shape)
# no state until reset.
self.state = np.empty(self.state_shape, dtype=np.float32)
self.frame_skip = frame_skip
self.timestep = timestep
self.model_xml = model_xml
self.parts, self.joints, = self.getScene(p.loadMJCF(model_xml))
self.robot_name = robot_name
self.dt = timestep * frame_skip
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / timestep / frame_skip))
}
self._seed()
def configureActions(self, discrete_actions):
# if it is possible to switch actions, do this here
# true if action space is discrete
# false if action space is continuous
self.discrete_actions = discrete_actions
# if self.discrete_actions:
# self.action_space = spaces.Discrete(3)
# else:
# self.action_space = spaces.Box(-1.0, 1.0, shape=(1, 1))
# # Our observations can be within this box
# float_max = np.finfo(np.float32).max
# self.observation_space = gym.spaces.Box(-float_max, float_max, self.state_shape)
def _ncrs_python(self, Delta, delta, d, R, G):
if R == 0 or R < np.finfo(float).eps:
return 0
GAMMA = 267.5987E6
alpha_roots = jnp_zeros(1, 20) / R
sum = 0
for i in range(20):
alpha = alpha_roots[i]
num = (2 * d * alpha**2 * delta
- 2
+ 2 * np.exp(-d * alpha**2 * delta)
+ 2 * np.exp(-d * alpha**2 * Delta)
- np.exp(-d * alpha**2 * (Delta - delta))
- np.exp(-d * alpha**2 * (Delta + delta)))
dem = d**2 * alpha**6 * (R**2 * alpha**2 - 1)
sum += (num / dem)
return -2 * GAMMA**2 * G**2 * sum
def _sample_discrete_actions(batch_probs):
"""Sample a batch of actions from a batch of action probabilities.
Args:
batch_probs (ndarray): batch of action probabilities BxA
Returns:
List consisting of sampled actions
"""
action_indices = []
# Subtract a tiny value from probabilities in order to avoid
# "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial
batch_probs = batch_probs - np.finfo(np.float32).epsneg
for i in range(batch_probs.shape[0]):
histogram = np.random.multinomial(1, batch_probs[i])
action_indices.append(int(np.nonzero(histogram)[0]))
return action_indices
def make_2d_gaussian(self, center=(0, 0)):
'''Makes a 2D Gaussian filter with arbitary mean and variance.
Args:
center (tuple): The coordinates of the center of the Gaussian,
specified as :data:`(row, col)`. The center of the image is
:data:`(0, 0)`.
Returns:
numpy array: The Gaussian mask.
'''
sigma = self.sigma
n_rows = (self.patch_size - 1.) / 2.
n_cols = (self.patch_size - 1.) / 2.
y, x = np.ogrid[-n_rows: n_rows + 1, -n_cols: n_cols + 1]
y0, x0 = center[1], center[0]
gaussian_mask = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) /
(2. * sigma ** 2))
gaussian_mask[gaussian_mask <
np.finfo(gaussian_mask.dtype).eps *
gaussian_mask.max()] = 0
gaussian_mask = 1. / gaussian_mask.max() * gaussian_mask
return gaussian_mask
def test_against_cmath(self):
import cmath
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
# Values from http://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)