def test_out_parameter(self):
""" Test that the kwargs ``out`` is correctly passed to reduction function """
with self.subTest('axis = -1'):
not_out = last(ireduce_ufunc(self.source, np.add, axis = -1))
out = np.empty_like(self.source[0])
last(ireduce_ufunc(self.source, ufunc = np.add, out = out))
self.assertTrue(np.allclose(not_out, out))
with self.subTest('axis != -1'):
not_out = last(ireduce_ufunc(self.source, np.add, axis = 2))
out = np.empty_like(self.source[0])
from_out = last(ireduce_ufunc(self.source, ufunc = np.add, out = out, axis = 2))
self.assertTrue(np.allclose(not_out, from_out))
python类empty_like()的实例源码
def log_loss_value(Z, weights, total_weights, rho):
"""
computes the value and slope of the logistic loss in a numerically stable way
supports sample non-negative weights for each example in the training data
see http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
total_weights numpy.sum(total_weights) (only included to reduce computation)
weights numpy.array of sample weights with shape (n_rows,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
scores = Z.dot(rho)
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.dot(weights) / total_weights
return loss_value
def log_loss_value(Z, rho):
"""
computes the value and slope of the logistic loss in a numerically stable way
see also: http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
scores = Z.dot(rho)
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.mean()
return loss_value
def log_probs(Z, rho):
"""
compute the probabilities of the logistic loss function in a way that is numerically stable
see also: http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
Returns
-------
log_probs numpy.array of probabilities under the logit model
"""
scores = Z.dot(rho)
pos_idx = scores > 0
log_probs = np.empty_like(scores)
log_probs[pos_idx] = 1.0 / (1.0 + np.exp(-scores[pos_idx]))
log_probs[~pos_idx] = np.exp(scores[~pos_idx]) / (1.0 + np.exp(scores[~pos_idx]))
return log_probs
def sample(self):
"""
Draws either a single sample (if alpha.dim() == 1), or one sample per param (if alpha.dim() == 2).
(Un-reparameterized).
:param torch.autograd.Variable alpha:
"""
alpha_np = self.alpha.data.cpu().numpy()
if self.alpha.dim() == 1:
x_np = spr.dirichlet.rvs(alpha_np)[0]
else:
x_np = np.empty_like(alpha_np)
for i in range(alpha_np.shape[0]):
x_np[i, :] = spr.dirichlet.rvs(alpha_np[i, :])[0]
x = Variable(type(self.alpha.data)(x_np))
return x
def black_scholes_numba(stockPrice, optionStrike,
optionYears, Riskfree, Volatility):
callResult = np.empty_like(stockPrice)
putResult = np.empty_like(stockPrice)
S = stockPrice
X = optionStrike
T = optionYears
R = Riskfree
V = Volatility
for i in range(len(S)):
sqrtT = math.sqrt(T[i])
d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd_numba(d1)
cndd2 = cnd_numba(d2)
expRT = math.exp((-1. * R) * T[i])
callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))
return callResult, putResult
def predict(self, t_new):
""" Use the segments in this model to predict the v value for new t values.
Params:
t_new (np.array): t values for which predictions should be made
Returns:
np.array of predictions
"""
v_hats = np.empty_like(t_new, dtype=float)
for idx, t in enumerate(t_new):
# Find the applicable segment.
seg_index = bisect.bisect_left(self._starts, t) - 1
seg = self.segments[max(0, seg_index)]
# Use it for prediction
v_hats[idx] = seg.predict(t)
return v_hats
## Data structures used during the fitting of the regression in `piecewise()`.
# Segment represents a time range and a linear regression fit through it.
def chrom_convert(arr):
#assert(arr.min()>=0 and arr.max()<=1)
opp = opp_convert(arr)
out = np.empty_like(opp[:,:,[0,1]])
rg = opp[:,:,0]
by = opp[:,:,1]
intensity = opp[:,:,2]
lowi = intensity < 0.1*intensity.max()
rg[lowi] = 0
by[lowi] = 0
denom = intensity
denom[denom==0] = 1
out[:,:,0] = rg / denom
out[:,:,1] = by / denom
return out
# ------------------------------------------------------------------------------
def rg2_convert(arr):
#assert(arr.min()>=0 and arr.max()<=1)
out = np.empty_like(arr[:,:,[0,1]])
red = arr[:,:,0]
green = arr[:,:,1]
#blue = arr[:,:,2]
intensity = arr.mean(2)
lowi = intensity < 0.1*intensity.max()
arr[lowi] = 0
denom = arr.sum(2)
denom[denom==0] = 1
out[:,:,0] = red / denom
out[:,:,1] = green / denom
return out
# ------------------------------------------------------------------------------
def ft_autocorrelation_function(self, k):
"""Compute the 3D Fourier transform of the isotropic correlation
function for an independent sphere for given magnitude k of the 3D wave vector
(float).
"""
X = self.radius * np.asarray(k)
volume_sphere = 4.0 / 3 * np.pi * self.radius**3
bessel_term = np.empty_like(X)
zero_X = np.isclose(X, 0)
non_zero_X = np.logical_not(zero_X)
X_non_zero = X[non_zero_X]
bessel_term[non_zero_X] = (9 * ((np.sin(X_non_zero) - X_non_zero * np.cos(X_non_zero))
/ X_non_zero**3)**2)
bessel_term[zero_X] = 1.0
return self.corr_func_at_origin * volume_sphere * bessel_term
def make_deepfool(sess, env, X_data, epochs=1, batch_size=128):
"""
Generate FGSM by running env.xadv.
"""
print('\nMaking adversarials via FGSM')
n_sample = X_data.shape[0]
n_batch = int((n_sample + batch_size - 1) / batch_size)
X_adv = np.empty_like(X_data)
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
adv = sess.run(env.xadv, feed_dict={env.x: X_data[start:end],
env.adv_epochs: epochs})
X_adv[start:end] = adv
print()
return X_adv
def make_jsma(sess, env, X_data, epochs=0.2, eps=1.0, batch_size=128):
"""
Generate JSMA by running env.x_jsma.
"""
print('\nMaking adversarials via JSMA')
n_sample = X_data.shape[0]
n_batch = int((n_sample + batch_size - 1) / batch_size)
X_adv = np.empty_like(X_data)
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
feed_dict = {
env.x: X_data[start:end],
env.target: np.random.choice(n_classes),
env.adv_epochs: epochs,
env.adv_eps: eps}
adv = sess.run(env.x_jsma, feed_dict=feed_dict)
X_adv[start:end] = adv
print()
return X_adv
def make_fgsm(sess, env, X_data, epochs=1, eps=0.01, batch_size=128):
"""
Generate FGSM by running env.x_fgsm.
"""
print('\nMaking adversarials via FGSM')
n_sample = X_data.shape[0]
n_batch = int((n_sample + batch_size - 1) / batch_size)
X_adv = np.empty_like(X_data)
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
adv = sess.run(env.x_fgsm, feed_dict={
env.x: X_data[start:end],
env.fgsm_eps: eps,
env.fgsm_epochs: epochs})
X_adv[start:end] = adv
print()
return X_adv
def make_deepfool(sess, env, X_data, epochs=1, eps=0.01, batch_size=128):
"""
Generate FGSM by running env.xadv.
"""
print('\nMaking adversarials via FGSM')
n_sample = X_data.shape[0]
n_batch = int((n_sample + batch_size - 1) / batch_size)
X_adv = np.empty_like(X_data)
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
adv = sess.run(env.xadv, feed_dict={env.x: X_data[start:end],
env.adv_epochs: epochs})
X_adv[start:end] = adv
print()
return X_adv
def make_jsma(sess, env, X_data, y, epochs=0.2, eps=1.0, batch_size=128):
"""
Generate JSMA by running env.x_jsma.
"""
print('\nMaking adversarials via JSMA')
n_sample = X_data.shape[0]
n_batch = int((n_sample + batch_size - 1) / batch_size)
X_adv = np.empty_like(X_data)
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
feed_dict = {
env.x: X_data[start:end],
env.target: y,
env.adv_epochs: epochs,
env.adv_eps: eps}
adv = sess.run(env.x_jsma, feed_dict=feed_dict)
X_adv[start:end] = adv
print()
return X_adv
def dihedral_transform_batch(x):
g = np.random.randint(low=0, high=8, size=x.shape[0])
h, w = x.shape[-2:]
hh = (h - 1) / 2.
hw = (w - 1) / 2.
I, J = np.meshgrid(np.linspace(-hh, hh, x.shape[-2]), np.linspace(-hw, hw, x.shape[-1]))
C = np.r_[[I, J]]
D4C = np.einsum('...ij,jkl->...ikl', D4, C)
D4C[:, 0] += hh
D4C[:, 1] += hw
D4C = D4C.astype(int)
x_out = np.empty_like(x)
for i in range(x.shape[0]):
I, J = D4C[g[i]]
x_out[i, :] = x[i][:, J, I]
return x_out
wav2feature_python_speech_features.py 文件源码
项目:asr_preprocessing
作者: hirofumi0810
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def _delta(feat, N):
"""Compute delta features from a feature vector sequence.
Args:
feat: A numpy array of size (NUMFRAMES by number of features)
containing features. Each row holds 1 feature vector.
N: For each frame, calculate delta features based on preceding and
following N frames
Returns:
A numpy array of size (NUMFRAMES by number of features) containing
delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = len(feat)
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = np.empty_like(feat)
# padded version of feat
padded = np.pad(feat, ((N, N), (0, 0)), mode='edge')
for t in range(NUMFRAMES):
# [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
delta_feat[t] = np.dot(np.arange(-N, N + 1),
padded[t: t + 2 * N + 1]) / denominator
return delta_feat
def test_polarisation_products(self):
n = 89
real = np.random.randint(-127, 128, size=(n,2)).astype(np.float32)
imag = np.random.randint(-127, 128, size=(n,2)).astype(np.float32)
a = real + 1j * imag
a_orig = a
a = bf.asarray(a, space='cuda')
b = bf.empty_like(a)
for _ in xrange(3):
bf.map('''
auto x = a(_,0);
auto y = a(_,1);
b(_,0).assign(x.mag2(), y.mag2());
b(_,1) = x*y.conj();
''', shape=b.shape[:-1], data={'a': a, 'b': b})
b = b.copy('system')
a = a_orig
gold = np.empty_like(a)
def mag2(x):
return x.real * x.real + x.imag * x.imag
gold[...,0] = mag2(a[...,0]) + 1j * mag2(a[...,1])
gold[...,1] = a[...,0] * a[...,1].conj()
np.testing.assert_equal(b, gold)
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = len(feat)
denominator = 2 * sum([i**2 for i in range(1, N+1)])
delta_feat = numpy.empty_like(feat)
padded = numpy.pad(feat, ((N, N), (0, 0)), mode='edge') # padded version of feat
for t in range(NUMFRAMES):
delta_feat[t] = numpy.dot(numpy.arange(-N, N+1), padded[t : t+2*N+1]) / denominator # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
Evolution Strategy Basic.py 文件源码
项目:Evolutionary-Algorithm
作者: MorvanZhou
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def make_kid(pop, n_kid):
# generate empty kid holder
kids = {'DNA': np.empty((n_kid, DNA_SIZE))}
kids['mut_strength'] = np.empty_like(kids['DNA'])
for kv, ks in zip(kids['DNA'], kids['mut_strength']):
# crossover (roughly half p1 and half p2)
p1, p2 = np.random.choice(np.arange(POP_SIZE), size=2, replace=False)
cp = np.random.randint(0, 2, DNA_SIZE, dtype=np.bool) # crossover points
kv[cp] = pop['DNA'][p1, cp]
kv[~cp] = pop['DNA'][p2, ~cp]
ks[cp] = pop['mut_strength'][p1, cp]
ks[~cp] = pop['mut_strength'][p2, ~cp]
# mutate (change DNA based on normal distribution)
ks[:] = np.maximum(ks + (np.random.rand(*ks.shape)-0.5), 0.) # must > 0
kv += ks * np.random.randn(*kv.shape)
kv[:] = np.clip(kv, *DNA_BOUND) # clip the mutated value
return kids
def invert_map(x):
"""Generate an inverse map.
:param x: map, such as that generated by :func:`gray_code`
:returns: an inverse map y, such that ``y[x[j]] = j``
>>> import arlpy
>>> x = arlpy.comms.gray_code(8)
>>> y = arlpy.comms.invert_map(x)
>>> x[2]
3
>>> y[3]
2
"""
y = _np.empty_like(x)
y[x] = _np.arange(len(x))
return y
def _handle_empty_like(self, lhs, rhs, assign, call_table):
# B = empty_like(A) -> B = empty(len(A), dtype)
if (rhs.op == 'call'
and rhs.func.name in call_table
and call_table[rhs.func.name] == ['empty_like', np]):
in_arr= rhs.args[0]
def f(A):
c = len(A)
f_block = compile_to_numba_ir(f, {}, self.typingctx, (self.typemap[in_arr.name],),
self.typemap, self.calltypes).blocks.popitem()[1]
replace_arg_nodes(f_block, [in_arr])
nodes = f_block.body[:-3] # remove none return
size_var = nodes[-1].target
alloc_nodes = mk_alloc(self.typemap, self.calltypes, assign.target,
size_var,
self.typemap[in_arr.name].dtype, in_arr.scope, in_arr.loc)
return nodes + alloc_nodes
return None
def _handle_df_col_filter(self, lhs_name, rhs, assign):
# find df['col2'] = df['col1'][arr]
# since columns should have the same size, output is filled with NaNs
# TODO: check for float, make sure col1 and col2 are in the same df
if (rhs.op=='getitem'
and rhs.value.name in self.df_cols
and lhs_name in self.df_cols
and self.is_bool_arr(rhs.index.name)):
lhs = assign.target
in_arr = rhs.value
index_var = rhs.index
f_blocks = compile_to_numba_ir(_column_filter_impl_float,
{'numba': numba, 'np': np}, self.typingctx,
(self.typemap[lhs.name], self.typemap[in_arr.name],
self.typemap[index_var.name]),
self.typemap, self.calltypes).blocks
first_block = min(f_blocks.keys())
replace_arg_nodes(f_blocks[first_block], [lhs, in_arr, index_var])
alloc_nodes = gen_np_call('empty_like', np.empty_like, lhs, [in_arr],
self.typingctx, self.typemap, self.calltypes)
f_blocks[first_block].body = alloc_nodes + f_blocks[first_block].body
return f_blocks
feature_extraction.py 文件源码
项目:tensorflow_end2end_speech_recognition
作者: hirofumi0810
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def _delta(feat, N):
"""Compute delta features from a feature vector sequence.
Args:
feat: A numpy array of size (NUMFRAMES by number of features)
containing features. Each row holds 1 feature vector.
N: For each frame, calculate delta features based on preceding and
following N frames
Returns:
A numpy array of size (NUMFRAMES by number of features) containing
delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = len(feat)
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = np.empty_like(feat)
# padded version of feat
padded = np.pad(feat, ((N, N), (0, 0)), mode='edge')
for t in range(NUMFRAMES):
# [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
delta_feat[t] = np.dot(np.arange(-N, N + 1),
padded[t: t + 2 * N + 1]) / denominator
return delta_feat
RATSforClassification.py 文件源码
项目:python_scripting_for_spatial_data_processing
作者: upsdeepak
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def classifyLevel1Assign(classLevel1Img):
# Create Output Array
level1 = numpy.empty_like(classLevel1Img, dtype = numpy.dtype('a255'))
level1[...] = "NA"
# Non Vegetated
level1 = numpy.where(numpy.logical_or(classLevel1Img == "NA",
numpy.logical_or(classLevel1Img == "Water",
classLevel1Img == "Urban")),
"Non Vegetated", level1)
# Vegetated
level1 = numpy.where(numpy.logical_or(classLevel1Img == "Photosynthetic Vegetated",
classLevel1Img == "Non Photosynthetic Vegetated",
classLevel1Img == "Non Submerged Aquatic Vegetated"),
"Vegetated", level1)
return level1
# A function for classifying level 2
def init_param(self, nodes_list):
if self.activation == 'logistic':
init_bound = lambda inb, outb: np.sqrt(2. / (inb + outb))
else:
init_bound = lambda inb, outb: np.sqrt(6. / (inb + outb))
self.ww = [self._random_state.uniform(-init_bound(nodes_list[i], nodes_list[i + 1]), init_bound(nodes_list[i], nodes_list[i + 1]), (nodes_list[i], nodes_list[i + 1]))
for i in xrange(self.layers_ - 1)]
self.th = [self._random_state.uniform(-init_bound(nodes_list[i], nodes_list[i + 1]), init_bound(nodes_list[i], nodes_list[i + 1]), (nodes_list[i + 1],))
for i in xrange(self.layers_ - 1)]
self.dww = [np.empty_like(w) for w in self.ww]
self.dww_last = [np.empty_like(w) for w in self.ww]
self.dth = [np.empty_like(th) for th in self.th]
self.z = [np.empty_like(th) for th in self.th]
self.a = [np.empty_like(th) for th in self.th]
self.ro = [np.empty_like(th) for th in self.th]
self.delta = [np.empty_like(th) for th in self.th]
def logistic(x, prime=0):
if prime == 0:
##v = np.empty_like(x)
##mask = x < 0.0
##zl = np.exp(x[mask])
##zl = 1.0 / (1.0 + zl)
##v[mask] = zl
##zh = np.exp(-x[~mask])
##zh = zh / (1.0 + zh)
##v[~mask] = zh
v = sps.expit(x)
return v
elif prime == 1:
return logistic(x) * (1.0 - logistic(x))
else:
raise NotImplementedError('%d order derivative not implemented.' % int(prime))
def exprect(x, prime=0):
#v = np.empty_like(x)
#mask = x >= 0.0
#nmask = ~mask
#if prime == 0:
# v[mask] = x[mask]
# v[nmask] = np.exp(x[nmask]) - 1.0
#elif prime == 1:
# v[mask] = 1.0
# v[nmask] = np.exp(x[nmask])
mask = x < 0.0
if prime == 0:
v = x.copy()
v[mask] = np.exp(v[mask]) - 1.0
elif prime == 1:
v = np.ones_like(x)
v[mask] = np.exp(v[mask])
return v
def apply_mask_column(data, index, mask):
"""
Sieve picks a mask over data and returns the filtered data array and index
"""
new_data = data[mask]
new_index = np.empty_like(index)
data_cursor = 0
for i, idx in enumerate(index):
if idx:
if mask[data_cursor]:
new_index[i] = 1
else:
new_index[i] = 0
data_cursor += 1
else:
new_index[i] = 0
return new_data, new_index
def predictiveQQ(simulations, targets, bands):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bands = toCustomLogSpace(np.array(bands)[::-1])
pValues = np.empty_like(targets)
for i0 in range(pValues.shape[0]):
sims, idxs = np.unique(simulations[i0,:],return_index=True)
try:
pValues[i0] = interp1d(sims, bands[idxs], kind='linear', assume_sorted=True)(targets[i0])
except np.linalg.linalg.LinAlgError as ex:
pValues[i0] = np.nan
except ValueError as ex:
# TODO: handle better extrapolations
if targets[i0]<sims[0]:
pValues[i0] = bands[0]+(bands[0]-bands[1])/(sims[0]-sims[1])*(targets[i0]-sims[0])
else:
pValues[i0] = bands[-1]+(bands[-1]-bands[-2])/(sims[-1]-sims[-2])*(targets[i0]-sims[-1])
pValues = fromCustomLogSpace(pValues)
pValues[pValues<0] = 0
pValues[pValues>1] = 1
pValues = np.sort(1-pValues[np.logical_not(np.isnan(pValues))])
return (np.linspace(0,1, pValues.shape[0]), pValues)