def remap_band1(layer_type, date_conf_array, lookup_dict):
band1_array = np.copy(date_conf_array)
# Remap the band1_array to reflect int( total_days / 255 )
if layer_type == 'glad':
# create an empty array for our offsets
offset = np.empty_like(band1_array)
# populate it with the proper offset value depending on band1_array values
offset[np.logical_and(band1_array >= 20000, band1_array < 30000)] = 20000
offset[np.logical_and(a >= 30000, a < 40000)] = 30000
# subtract them to remove the confidence digit (ten thousandths place)
# and divide by 255
band1_array = (band1_array - offset) / 255
else:
for k, v in lookup_dict.iteritems():
band1_array[band1_array == k] = v[0]
return band1_array
python类empty_like()的实例源码
def __mul__(self, other):
"""Multiply with a single gaussian."""
assert isinstance(other, Gaussian)
ys = [x * other for x in self.xs]
lcs = np.empty_like(self.a)
for i, (x, y) in enumerate(izip(self.xs, ys)):
lcs[i] = x.logdetP + other.logdetP - y.logdetP
lcs[i] -= np.dot(x.m, np.dot(x.P, x.m)) + np.dot(other.m, np.dot(other.P, other.m)) - np.dot(y.m, np.dot(y.P, y.m))
lcs[i] *= 0.5
la = np.log(self.a) + lcs
la -= scipy.misc.logsumexp(la)
a = np.exp(la)
return MoG(a=a, xs=ys)
def __div__(self, other):
"""Divide by a single gaussian."""
assert isinstance(other, Gaussian)
ys = [x / other for x in self.xs]
lcs = np.empty_like(self.a)
for i, (x, y) in enumerate(izip(self.xs, ys)):
lcs[i] = x.logdetP - other.logdetP - y.logdetP
lcs[i] -= np.dot(x.m, np.dot(x.P, x.m)) - np.dot(other.m, np.dot(other.P, other.m)) - np.dot(y.m, np.dot(y.P, y.m))
lcs[i] *= 0.5
la = np.log(self.a) + lcs
la -= scipy.misc.logsumexp(la)
a = np.exp(la)
return MoG(a=a, xs=ys)
def standardDeviation2d(img, ksize=5, blurred=None):
'''
calculate the spatial resolved standard deviation
for a given 2d array
ksize -> kernel size
blurred(optional) -> with same ksize gaussian filtered image
setting this parameter reduces processing time
'''
if ksize not in (list, tuple):
ksize = (ksize,ksize)
if blurred is None:
blurred = gaussian_filter(img, ksize)
else:
assert blurred.shape == img.shape
std = np.empty_like(img)
_calc(img, ksize[0], ksize[1], blurred, std)
return std
def mask_randomly(self, imgs):
y1 = np.random.randint(0, self.img_rows - self.mask_height, imgs.shape[0])
y2 = y1 + self.mask_height
x1 = np.random.randint(0, self.img_rows - self.mask_width, imgs.shape[0])
x2 = x1 + self.mask_width
masked_imgs = np.empty_like(imgs)
missing_parts = np.empty((imgs.shape[0], self.mask_height, self.mask_width, self.channels))
for i, img in enumerate(imgs):
masked_img = img.copy()
_y1, _y2, _x1, _x2 = y1[i], y2[i], x1[i], x2[i]
missing_parts[i] = masked_img[_y1:_y2, _x1:_x2, :].copy()
masked_img[_y1:_y2, _x1:_x2, :] = 0
masked_imgs[i] = masked_img
return masked_imgs, missing_parts, (y1, y2, x1, x2)
def __mul__(self, other):
"""Multiply with a single gaussian."""
assert isinstance(other, Gaussian)
ys = [x * other for x in self.xs]
lcs = np.empty_like(self.a)
for i, (x, y) in enumerate(izip(self.xs, ys)):
lcs[i] = x.logdetP + other.logdetP - y.logdetP
lcs[i] -= np.dot(x.m, np.dot(x.P, x.m)) + np.dot(other.m, np.dot(other.P, other.m)) - np.dot(y.m, np.dot(y.P, y.m))
lcs[i] *= 0.5
la = np.log(self.a) + lcs
la -= scipy.misc.logsumexp(la)
a = np.exp(la)
return MoG(a=a, xs=ys)
def __div__(self, other):
"""Divide by a single gaussian."""
assert isinstance(other, Gaussian)
ys = [x / other for x in self.xs]
lcs = np.empty_like(self.a)
for i, (x, y) in enumerate(izip(self.xs, ys)):
lcs[i] = x.logdetP - other.logdetP - y.logdetP
lcs[i] -= np.dot(x.m, np.dot(x.P, x.m)) - np.dot(other.m, np.dot(other.P, other.m)) - np.dot(y.m, np.dot(y.P, y.m))
lcs[i] *= 0.5
la = np.log(self.a) + lcs
la -= scipy.misc.logsumexp(la)
a = np.exp(la)
return MoG(a=a, xs=ys)
frame.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def _apply_broadcast(self, func, axis):
if axis == 0:
target = self
elif axis == 1:
target = self.T
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % axis)
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = func(target[col])
result = self._constructor(result_values, index=target.index,
columns=target.columns)
if axis == 1:
result = result.T
return result
def _from_rotvec_array(rv):
norm = np.linalg.norm(rv, axis=1)
norm2 = norm ** 2
norm4 = norm2 ** 2
k1 = np.empty_like(norm2)
k2 = np.empty_like(norm2)
small = norm2 < 1e-6
k1[small] = 1 - norm2[small] / 6 + norm4[small] / 120
k2[small] = 0.5 - norm2[small] / 24 + norm4[small] / 720
big = ~small
k1[big] = np.sin(norm[big]) / norm[big]
k2[big] = (1 - np.cos(norm[big])) / norm2[big]
skew = _skew_matrix_array(rv)
skew_squared = np.einsum('...ij,...jk->...ik', skew, skew)
identity = np.empty_like(skew)
identity[:] = np.identity(3)
return (identity +
k1[:, np.newaxis, np.newaxis] * skew +
k2[:, np.newaxis, np.newaxis] * skew_squared)
def _dtheta_from_omega_matrix(theta):
norm = np.linalg.norm(theta, axis=1)
k = np.empty_like(norm)
mask = norm > 1e-4
nm = norm[mask]
k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm**2
mask = ~mask
nm = norm[mask]
k[mask] = 1/12 + 1/720 * nm**2
A = np.empty((norm.shape[0], 3, 3))
skew = _skew_matrix_array(theta)
A[:] = np.identity(3)
A[:] += 0.5 * skew
A[:] += k[:, None, None] * util.mm_prod(skew, skew)
return A
def _omega_from_dtheta_matrix(theta):
norm = np.linalg.norm(theta, axis=1)
k1 = np.empty_like(norm)
k2 = np.empty_like(norm)
mask = norm > 1e-4
nm = norm[mask]
k1[mask] = (1 - np.cos(nm)) / nm**2
k2[mask] = (nm - np.sin(nm)) / nm**3
mask = ~mask
nm = norm[mask]
k1[mask] = 0.5 - nm**2 / 24
k2[mask] = 1/6 - nm**2 / 120
A = np.empty((norm.shape[0], 3, 3))
skew = _skew_matrix_array(theta)
A[:] = np.identity(3)
A[:] -= k1[:, None, None] * skew
A[:] += k2[:, None, None] * util.mm_prod(skew, skew)
return A
def test_coning_sculling():
# Basically a smoke test, because the function is quite simple.
gyro = np.zeros((10, 3))
gyro[:, 0] = 0.01
gyro[:, 2] = -0.01
accel = np.zeros((10, 3))
accel[:, 2] = 0.1
dv_true = np.empty_like(accel)
dv_true[:, 0] = 0
dv_true[:, 1] = -0.5e-3
dv_true[:, 2] = 0.1
theta, dv = coning_sculling(gyro, accel)
assert_allclose(theta, gyro, rtol=1e-10)
assert_allclose(dv, dv_true, rtol=1e-10)
def residual(r,theta,u,d):
out = np.empty_like(u)
out[0] = (2*np.sin(theta)*r*d(u[0],1,0)
+ r*r*np.sin(theta)*d(u[0],2,0)
+ np.cos(theta)*d(u[0],0,1)
+ np.sin(theta)*d(u[1],0,2))
out[1] = (2*np.sin(theta)*r*d(u[1],1,0)
+ r*r*np.sin(theta)*d(u[1],2,0)
+ np.cos(theta)*d(u[1],0,1)
+ np.sin(theta)*d(u[1],0,2))
return out
def DEFAULT_BDRY_THETA_MIN(r,u,d):
ushape = u.shape
num_vars = np.prod(ushape[:-2])
uflat = u.reshape(tuple([num_vars])+ushape[-2:])
out = np.empty_like(uflat)
for i in range(out.shape[0]):
out[i] = d(uflat[i],0,1)
out.reshape(ushape)
return out
def DEFAULT_BDRY_THETA_MAX(r,u,d):
ushape = u.shape
num_vars = np.prod(ushape[:-2])
uflat = u.reshape(tuple([num_vars])+ushape[-2:])
out = np.empty_like(uflat)
for i in range(out.shape[0]):
out[i] = d(uflat[i],0,1)
out.reshape(ushape)
return out
def dsdt(t, s, params):
"""Wrapper for system derivative with respect to time"""
derivs = np.empty_like(s)
eps1,eps2,perturb_params,p_lambda1,p_lambda2,p_k1,p_k2,p_f,p_m1,p_m2,p_lambdaE,p_bE,p_Kb,p_d_E,p_Kd = params
dsdt_(derivs, s, t, eps1, eps2, perturb_params, p_lambda1, p_lambda2, p_k1, \
p_k2, p_f, p_m1, p_m2, p_lambdaE, p_bE, p_Kb, p_d_E, p_Kd)
return derivs
def dsdt(self, s_augmented, t):
derivs = np.empty_like(s_augmented)
self._dsdt(derivs, s_augmented, t)
return derivs
def log_loss_value_from_scores(weights, total_weights, scores):
"""
computes the logistic loss value from a vector of scores in a numerically stable way
where scores = Z.dot(rho)
see also: http://stackoverflow.com/questions/20085768/
this function is used for heuristics (discrete_descent, sequential_rounding).
to save computation when running the heuristics, we store the scores and
call this function to compute the loss directly from the scores
this reduces the need to recompute the dot product.
Parameters
----------
scores numpy.array of scores = Z.dot(rho)
total_weights numpy.sum(total_weights) (only included to reduce computation)
weights numpy.array of sample weights with shape (n_rows,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.dot(weights) / total_weights
return loss_value
def log_loss_value_and_slope(Z, rho):
"""
computes the value and slope of the logistic loss in a numerically stable way
this function should only be used when generating cuts in cutting-plane algorithms
(computing both the value and the slope at the same time is slightly cheaper)
see also: http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
loss_slope: (n_cols x 1) vector = 1/n_rows * sum(-Z*rho ./ (1+exp(-Z*rho))
"""
scores = Z.dot(rho)
pos_idx = scores > 0
exp_scores_pos = np.exp(-scores[pos_idx])
exp_scores_neg = np.exp(scores[~pos_idx])
#compute loss value
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(exp_scores_pos)
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(exp_scores_neg)
loss_value = loss_value.mean()
#compute loss slope
log_probs = np.empty_like(scores)
log_probs[pos_idx] = 1.0 / (1.0 + exp_scores_pos)
log_probs[~pos_idx] = exp_scores_neg / (1.0 + exp_scores_neg)
loss_slope = Z.T.dot(log_probs - 1.0) / Z.shape[0]
return loss_value, loss_slope
def _ecdf_formal(x, data):
"""
Compute the values of the formal ECDF generated from `data` at x.
I.e., if F is the ECDF, return F(x).
Parameters
----------
x : array_like
Positions at which the formal ECDF is to be evaluated.
data : array_like
*Sorted* data set to use to generate the ECDF.
Returns
-------
output : float or ndarray
Value of the ECDF at `x`.
"""
output = np.empty_like(x)
for i, x_val in enumerate(x):
j = 0
while j < len(data) and x_val >= data[j]:
j += 1
output[i] = j
return output / len(data)