def __split_apply(self, func_y_lt_f, func_y_gt_f, func_f_lt_0, y, f):
# Make sure all arrays are of a compatible type
y, f = np.broadcast_arrays(y, f)
y = y.astype(float, copy=False)
f = f.astype(float, copy=False)
if any(y < 0):
raise ValueError("y has to be > 0!")
# get indicators of which likelihoods to apply where
y_lt_f = np.logical_and(y <= f, f > 0)
y_gt_f = np.logical_and(y > f, f > 0)
f_lt_0 = f <= 0
result = np.zeros_like(y)
result[y_lt_f] = func_y_lt_f(y[y_lt_f], f[y_lt_f])
result[y_gt_f] = func_y_gt_f(y[y_gt_f], f[y_gt_f])
result[f_lt_0] = func_f_lt_0(y[f_lt_0], f[f_lt_0])
return result
python类broadcast_arrays()的实例源码
def compute(self, today, assets, out, dependent, independent):
alpha = out.alpha
beta = out.beta
r_value = out.r_value
p_value = out.p_value
stderr = out.stderr
def regress(y, x):
regr_results = linregress(y=y, x=x)
# `linregress` returns its results in the following order:
# slope, intercept, r-value, p-value, stderr
alpha[i] = regr_results[1]
beta[i] = regr_results[0]
r_value[i] = regr_results[2]
p_value[i] = regr_results[3]
stderr[i] = regr_results[4]
# If `independent` is a Slice or single column of data, broadcast it
# out to the same shape as `dependent`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
independent = broadcast_arrays(independent, dependent)[0]
for i in range(len(out)):
regress(y=dependent[:, i], x=independent[:, i])
def _cartesian_product(*arrays):
"""
Get the cartesian product of a number of arrays.
Parameters
----------
arrays : Iterable[np.ndarray]
The arrays to get a cartesian product of. Always sorted with respect
to the original array.
Returns
-------
out : np.ndarray
The overall cartesian product of all the input arrays.
"""
broadcastable = np.ix_(*arrays)
broadcasted = np.broadcast_arrays(*broadcastable)
rows, cols = np.prod(broadcasted[0].shape), len(broadcasted)
dtype = np.result_type(*arrays)
out = np.empty(rows * cols, dtype=dtype)
start, end = 0, rows
for a in broadcasted:
out[start:end] = a.reshape(-1)
start, end = end, end + rows
return out.reshape(cols, rows)
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
def Ey(self, f, var, z):
f, z = np.broadcast_arrays(f, z)
Ey = np.zeros_like(f)
nz = ~ z
Ey[z] = self.gaus.Ey(f[z], var)
Ey[nz] = self.unif.Ey(f[nz])
return Ey
def dp(self, y, f, var, z):
yz, fz = np.broadcast_arrays(y[z], f[:, z])
dp = np.zeros_like(f)
dp[:, z] = self.gaus.dp(yz, fz, var)
return dp
def __split_on_z(self, func_unif, func_gaus, y, f, var, z):
y, f, z = np.broadcast_arrays(y, f, z)
val = np.zeros_like(f)
nz = ~ z
val[z] = func_gaus(y[z], f[z], var)
val[nz] = func_unif(y[nz], f[nz])
return val
def compute(self, today, assets, out, base_data, target_data):
# If `target_data` is a Slice or single column of data, broadcast it
# out to the same shape as `base_data`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
target_data = broadcast_arrays(target_data, base_data)[0]
for i in range(len(out)):
out[i] = pearsonr(base_data[:, i], target_data[:, i])[0]
def compute(self, today, assets, out, base_data, target_data):
# If `target_data` is a Slice or single column of data, broadcast it
# out to the same shape as `base_data`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
target_data = broadcast_arrays(target_data, base_data)[0]
for i in range(len(out)):
out[i] = spearmanr(base_data[:, i], target_data[:, i])[0]
def world2image_uvMat(self, uv_mat):
'''
@param XYZ_mat: is a 4 or 3 times n matrix
'''
if uv_mat.shape[0] == 2:
if len(uv_mat.shape)==1:
uv_mat = uv_mat.reshape(uv_mat.shape + (1,))
uv_mat = np.vstack((uv_mat, np.ones((1, uv_mat.shape[1]), uv_mat.dtype)))
result = np.dot(self.Tr33, uv_mat)
#w0 = -(uv_mat[0]* self.Tr_inv_33[1,0]+ uv_mat[1]* self.Tr_inv[1,1])/self.Tr_inv[1,2]
resultB = np.broadcast_arrays(result, result[-1,:])
return resultB[0] / resultB[1]
def image2world_uvMat(self, uv_mat):
'''
@param XYZ_mat: is a 4 or 3 times n matrix
'''
#assert self.transformMode == 'kitti' or self.transformMode == 'angles'
if uv_mat.shape[0] == 2:
if len(uv_mat.shape)==1:
uv_mat = uv_mat.reshape(uv_mat.shape + (1,))
uv_mat = np.vstack((uv_mat, np.ones((1, uv_mat.shape[1]), uv_mat.dtype)))
result = np.dot(self.Tr33.I, uv_mat)
#w0 = -(uv_mat[0]* self.Tr_inv_33[1,0]+ uv_mat[1]* self.Tr_inv[1,1])/self.Tr_inv[1,2]
resultB = np.broadcast_arrays(result, result[-1,:])
return resultB[0] / resultB[1]
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
def __call__(self, t, tau, debug=False):
"""Calculate h(t, tau).
Compute h(t, tau), which is the output at t subject to an impulse
at time tau. standard numpy broadcasting rules apply.
Parameters
----------
t : array-like
the output time.
tau : array-like
the input impulse time.
debug : bool
True to print debug messages.
Returns
-------
val : :class:`numpy.ndarray`
the time-varying impulse response evaluated at the given coordinates.
"""
# broadcast arguments to same shape
t, tau = np.broadcast_arrays(t, tau)
# compute impulse using efficient matrix multiply and numpy broadcasting.
dt = t - tau
zero_indices = (dt < 0) | (dt > self.k * self.tper)
t_row = t.reshape((1, -1))
dt_row = dt.reshape((1, -1))
tmp = np.dot(self.hmat, np.exp(np.dot(self.n_col, dt_row))) * np.exp(np.dot(self.m_col, t_row))
result = np.sum(tmp, axis=0).reshape(dt.shape)
# zero element such that dt < 0 or dt > k * T.
result[zero_indices] = 0.0
if debug:
self._print_debug_msg(result)
# discard imaginary part
return np.real(result)
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
test_api.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 39
收藏 0
点赞 0
评论 0
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
def cartesian_product(arrays):
""" Returns Cartesian product of given arrays (x and y): cartesian_product([x,y]) """
broadcastable = np.ix_(*arrays)
broadcasted = np.broadcast_arrays(*broadcastable)
rows, cols = reduce(np.multiply, broadcasted[0].shape), len(broadcasted)
out = np.empty(rows * cols, dtype=broadcasted[0].dtype)
start, end = 0, rows
for a in broadcasted:
out[start:end] = a.reshape(-1)
start, end = end, end + rows
# Return value(s)
return out.reshape(cols, rows).T
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
def omega_output(self, fname, parameters):
"""This method writes the dispersion relation to the file fname.
:param fname: Filename
:type fname: string
:param parameters: Array containing the parameters.
:type parameters: numpy.ndarray
"""
omega = self.omega(parameters)
kappa = self.kappamesh
kmesh = self.kmesh
k = self.k
#print(omega, self.dks)
vgs = np.gradient(omega, *self.dks, edge_order=2)
vg = np.sqrt(sum(vgc**2 for vgc in vgs))
if self.dim==2:
vg[:3,:3] = 1
elif self.dim==3:
vg[:3,:3,:3] = 1
vph = omega/k
vph.ravel()[0] = 1.
data = [*np.broadcast_arrays(*kappa), k, omega, vph, vg, *vgs]
data = np.broadcast_arrays(*data)
blocks = zip(*map(lambda x: np.vsplit(x, x.shape[0]), data))
with open(fname, 'wb') as f:
kappanames = ' '.join(['kx', 'ky', 'kz'][:len(kappa)])
vgsnames = ' '.join(['vgx', 'vgy', 'vgz'][:len(vgs)])
f.write(('#' + kappanames + ' k omega vph vg ' + vgsnames + '\n').encode('us-ascii'))
for block_columns in blocks:
np.savetxt(f, np.vstack(map(np.ravel, block_columns)).T)
f.write(b'\n')
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
def _numpy_second(x, y):
return numpy.broadcast_arrays(x, y)[1]
def __init__(self, *args, mask='B', **kwargs):
super(MaskedConvolution2D, self).__init__(
*args, **kwargs
)
Cout, Cin, kh, kw = self.W.shape
pre_mask = self.xp.ones_like(self.W.data).astype('f')
yc, xc = kh // 2, kw // 2
# context masking - subsequent pixels won't hav access to next pixels (spatial dim)
pre_mask[:, :, yc+1:, :] = 0.0
pre_mask[:, :, yc:, xc+1:] = 0.0
# same pixel masking - pixel won't access next color (conv filter dim)
def bmask(i_out, i_in):
cout_idx = np.expand_dims(np.arange(Cout) % 3 == i_out, 1)
cin_idx = np.expand_dims(np.arange(Cin) % 3 == i_in, 0)
a1, a2 = np.broadcast_arrays(cout_idx, cin_idx)
return a1 * a2
for j in range(3):
pre_mask[bmask(j, j), yc, xc] = 0.0 if mask == 'A' else 1.0
pre_mask[bmask(0, 1), yc, xc] = 0.0
pre_mask[bmask(0, 2), yc, xc] = 0.0
pre_mask[bmask(1, 2), yc, xc] = 0.0
self.mask = pre_mask
def bmask(i_out, i_in):
cout_idx = np.expand_dims(np.arange(Cout) % 3 == i_out, 1)
cin_idx = np.expand_dims(np.arange(Cin) % 3 == i_in, 0)
a1, a2 = np.broadcast_arrays(cout_idx, cin_idx)
return a1 * a2
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
def min_traveltimes(modelname, source_depths, receiver_depths, distances, phases, callback=None):
'''computes the minimum travel times using multiprocessing to speed up
calculations
min_traveltimes(modelname, ARRAY[N], ARRAY[N], SCALAR, ...) -> [N-length array]
min_traveltimes(modelname, ARRAY[N], ARRAY[N], ARRAY[M]) -> [NxM] matrix
min_traveltimes(modelname, SCALAR, SCALAR, ARRAY[M]) -> [M-length array]
'''
model = taumodel(modelname)
source_depths, receiver_depths = np.broadcast_arrays(source_depths, receiver_depths)
# assert we passed arrays, not matrices:
if len(source_depths.shape) > 1 or len(distances.shape) > 1:
raise ValueError("Need to have arrays, not matrices")
norowdim = source_depths.ndim == 0
if norowdim:
source_depths = np.array([source_depths])
receiver_depths = np.array([receiver_depths])
nocoldim = distances.ndim == 0
if nocoldim:
distances = np.array([distances])
ttimes = np.full(shape=(source_depths.shape[0], distances.shape[0]), fill_value=np.nan)
def mp_callback(index, array, _callback=None):
def _(result):
array[index] = result
if _callback is not None:
_callback()
return _
pool = Pool()
for idx, sd, rd in zip(count(), source_depths, receiver_depths):
tmp_ttimes = ttimes[idx]
for i, d in enumerate(distances):
pool.apply_async(min_traveltime, (model, sd, rd, d, phases),
callback=mp_callback(i, tmp_ttimes, callback))
pool.close()
pool.join()
if norowdim or nocoldim:
ttimes = ttimes.flatten()
return ttimes
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
def periodic_fit(t, y, dy, frequency, t_fit,
center_data=True, fit_bias=True, nterms=1):
"""Compute the Lomb-Scargle model fit at a given frequency
Parameters
----------
t, y, dy : float or array_like
The times, observations, and uncertainties to fit
frequency : float
The frequency at which to compute the model
t_fit : float or array_like
The times at which the fit should be computed
center_data : bool (default=True)
If True, center the input data before applying the fit
fit_bias : bool (default=True)
If True, include the bias as part of the model
nterms : int (default=1)
The number of Fourier terms to include in the fit
Returns
-------
y_fit : ndarray
The model fit evaluated at each value of t_fit
"""
if dy is None:
dy = 1
t, y, dy = np.broadcast_arrays(t, y, dy)
t_fit = np.asarray(t_fit)
assert t.ndim == 1
assert t_fit.ndim == 1
assert np.isscalar(frequency)
if center_data:
w = dy ** -2.0
y_mean = np.dot(y, w) / w.sum()
y = (y - y_mean)
else:
y_mean = 0
X = design_matrix(t, frequency, dy=dy, bias=fit_bias, nterms=nterms)
theta_MLE = np.linalg.solve(np.dot(X.T, X),
np.dot(X.T, y / dy))
X_fit = design_matrix(t_fit, frequency, bias=fit_bias, nterms=nterms)
return y_mean + np.dot(X_fit, theta_MLE)
def lombscargle_scipy(t, y, frequency, normalization='normalized',
center_data=True):
"""Lomb-Scargle Periodogram
This is a wrapper of ``scipy.signal.lombscargle`` for computation of the
Lomb-Scargle periodogram. This is a relatively fast version of the naive
O[N^2] algorithm, but cannot handle heteroskedastic errors.
Parameters
----------
t, y: array_like
times, values, and errors of the data points. These should be
broadcastable to the same shape.
frequency : array_like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : string (optional, default='normalized')
Normalization to use for the periodogram
TODO: figure out what options to use
center_data : bool (optional, default=True)
if True, pre-center the data by subtracting the weighted mean
of the input data.
Returns
-------
power : array_like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipies in C (2002)
.. [3] Scargle, J.D. 1982, ApJ 263:835-853
"""
if not HAS_SCIPY:
raise ValueError("scipy must be installed to use lombscargle_scipy")
t, y = np.broadcast_arrays(t, y)
frequency = np.asarray(frequency)
assert t.ndim == 1
assert frequency.ndim == 1
if center_data:
y = y - y.mean()
# Note: scipy input accepts angular frequencies
p = signal.lombscargle(t, y, 2 * np.pi * frequency)
if normalization == 'unnormalized':
pass
elif normalization == 'normalized':
p *= 2 / (t.size * np.mean(y ** 2))
else:
raise ValueError("normalization='{0}' "
"not recognized".format(normalization))
return p