def roll_zeropad(a, shift, axis=None):
a = np.asanyarray(a)
if shift == 0: return a
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
if np.abs(shift) > n:
res = np.zeros_like(a)
elif shift < 0:
shift += n
zeros = np.zeros_like(a.take(np.arange(n-shift), axis))
res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis)
else:
zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis))
res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis)
if reshape:
return res.reshape(a.shape)
else:
return res
python类abs()的实例源码
def test_quantize_from_probs2(size, resolution):
set_random_seed(make_seed(size, resolution))
probs = np.exp(np.random.random(size)).astype(np.float32)
probs2 = probs.reshape((1, size))
quantized = quantize_from_probs2(probs2, resolution)
assert quantized.shape == probs2.shape
assert quantized.dtype == np.int8
assert np.all(quantized.sum(axis=1) == resolution)
# Check that quantized result is closer to target than any other value.
quantized = quantized.reshape((size, ))
target = resolution * probs / probs.sum()
distance = np.abs(quantized - target).sum()
for combo in itertools.combinations(range(size), resolution):
other = np.zeros(size, np.int8)
for i in combo:
other[i] += 1
assert other.sum() == resolution
other_distance = np.abs(other - target).sum()
assert distance <= other_distance
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def Kdim(self, kdimParams):
if (self.prevKdimParams is not None and np.max(np.abs(kdimParams-self.prevKdimParams)) < self.epsilon): return self.cache['Kdim']
K = np.zeros((self.n, self.n, len(self.kernels)))
params_ind = 0
for k_i, k in enumerate(self.kernels):
numHyp = k.getNumParams()
kernelParams_range = np.array(xrange(params_ind, params_ind+numHyp), dtype=np.int)
kernel_params = kdimParams[kernelParams_range]
if ((numHyp == 0 and 'Kdim' in self.cache) or (numHyp>0 and self.prevKdimParams is not None and np.max(np.abs(kernel_params-self.prevKdimParams[kernelParams_range])) < self.epsilon)):
K[:,:,k_i] = self.cache['Kdim'][:,:,k_i]
else:
K[:,:,k_i] = k.getTrainKernel(kernel_params)
params_ind += numHyp
self.prevKdimParams = kdimParams.copy()
self.cache['Kdim'] = K
return K
def mypsd(Rates,time_range,bin_w = 5., nmax = 4000):
bins = np.arange(0,len(time_range),1)
#print bins
a,b = np.histogram(Rates, bins)
ff = (1./len(bins))*abs(np.fft.fft(Rates- np.mean(Rates)))**2
Fs = 1./(1*0.001)
freq2 = np.fft.fftfreq(len(bins))[0:len(bins/2)+1] # d= dt
freq = np.fft.fftfreq(len(bins))[:len(ff)/2+1]
px = ff[0:len(ff)/2+1]
max_px = np.max(px[1:])
idx = px == max_px
corr_freq = freq[pl.find(idx)]
new_px = px
max_pow = new_px[pl.find(idx)]
return new_px,freq,corr_freq[0],freq2, max_pow
def scale_variance(Theta, eps):
"""Allows to scale a Precision Matrix such that its
corresponding covariance has unit variance
Parameters
----------
Theta: ndarray
Precision Matrix
eps: float
values to threshold to zero
Returns
-------
Theta: ndarray
Precision of rescaled Sigma
Sigma: ndarray
Sigma with ones on diagonal
"""
Sigma = np.linalg.inv(Theta)
V = np.diag(np.sqrt(np.diag(Sigma) ** -1))
Sigma = V.dot(Sigma).dot(V.T) # = VSV
Theta = np.linalg.inv(Sigma)
Theta[np.abs(Theta) <= eps] = 0.
return Theta, Sigma
def idamax(a):
""" Returns the index of maximum absolute value (positive or negative)
in the input array a.
Note: Loosely based of a subroutine in GAMESS with the same name
Arguments:
a -- a numpy array where we are to find the maximum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 0.0
for i, value in enumerate(numpy.abs(a)):
if value > v:
idx = i
v = value
return idx
def idamin(a):
""" Returns the index of minimum absolute value (positive or negative)
in the input array a.
Arguments:
a -- a numpy array where we are to find the minimum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 1.0e30
for i, value in enumerate(numpy.abs(a)):
if value < v:
idx = i
v = value
return idx
def fCauchy(ftrue, alpha, p):
"""Returns Cauchy model noisy value
Cauchy with median 1e3*alpha and with p=0.2, zero otherwise
P(Cauchy > 1,10,100,1000) = 0.25, 0.032, 0.0032, 0.00032
"""
# expects ftrue to be a np.array
popsi = np.shape(ftrue)
fval = ftrue + alpha * np.maximum(0., 1e3 + (_rand(popsi) < p) *
_randn(popsi) / (np.abs(_randn(popsi)) + 1e-199))
tol = 1e-8
fval = fval + 1.01 * tol
idx = ftrue < tol
try:
fval[idx] = ftrue[idx]
except IndexError: # fval is a scalar
if idx:
fval = ftrue
return fval
### CLASS DEFINITION ###
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.xopt[:min(dim, self.maxindex):2] = abs(self.xopt[:min(dim, self.maxindex):2])
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(self.scales, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
# COMPUTATION core
ftrue = np.sqrt(np.sum(np.abs(x) ** self.arrexpo, -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = 0.5 * sign(unif(dim, self.rseed) - 0.5) * 4.2096874633
self.scales = (self.condition ** .5) ** np.linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(2 * np.abs(self.xopt), curshape)
self.arrscales = resize(self.scales, curshape)
self.arrsigns = resize(sign(self.xopt), curshape)
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in range(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in range(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that ``self.idx`` contains the indices where the fitness
lists differ.
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def rho(self, points):
""" Solves the goodness of fit.
"""
assert self._solved, 'you need to solve first.'
m, n = self.A.shape
#numer = [ np.abs(np.dot(self.c, point) - np.dot(self.dual, self.b)) / np.abs(np.dot(self.dual, self.b)) for point in points ]
numer = [np.abs(np.dot(self.c, point) - 1) for point in points]
numer = sum(numer)
denom = 0
for i in range(m):
#denomTerm = [ np.abs(np.dot(self.A[i], point) - self.b[i]) / np.abs(self.b[i]) for point in points ]
denomTerm = [
np.abs(
np.dot(self.A[i] / np.linalg.norm(
self.A[i].T, self.normalize_c), point) - 1)
for point in points
]
denom += sum(denomTerm)
rho = 1 - numer / denom
return rho[0, 0]
def rho(self, points):
""" Solves the goodness of fit.
"""
assert self._solved, 'you need to solve first.'
m, n = self.A.shape
numer = [
np.abs(np.dot(self.c, point) - np.dot(self.dual, self.b))
for point in points
]
numer = sum(numer)
denom = 0
for i in range(m):
denomTerm = [
np.abs(np.dot(self.A[i], point) - self.b[i]) / np.linalg.norm(
self.A[i].T, self.normalize_c) for point in points
]
denom += sum(denomTerm)
rho = 1 - numer / denom
return rho[0, 0]
def find_outliers(data):
absolute_normalized = np.abs(zscore(data))
return absolute_normalized > 3
def genplot(x, y, fit, xdata=None, ydata=None, maxpts=10000):
bin_range = (0, 360)
a = (np.arange(*bin_range))
f_a = nuth_func(a, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
if xdata.size > maxpts:
import random
idx = random.sample(list(range(xdata.size)), 10000)
else:
idx = np.arange(xdata.size)
f, ax = plt.subplots()
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.plot(xdata[idx], ydata[idx], 'k.', label='Orig pixels')
ax.plot(x, y, 'ro', label='Bin median')
ax.axhline(color='k')
ax.plot(a, f_a, 'b', label=nuth_func_str)
ax.set_xlim(*bin_range)
pad = 0.2 * np.max([np.abs(y.min()), np.abs(y.max())])
ax.set_ylim(y.min() - pad, y.max() + pad)
ax.legend(prop={'size':8})
return f
#Function copied from from openPIV pyprocess
def update(self, params, grads):
# init
self.iterations += 1
a_t = self.lr / (1 - np.power(self.beta1, self.iterations))
if self.ms is None:
self.ms = [_zero(p.shape) for p in params]
if self.vs is None:
self.vs = [_zero(p.shape) for p in params]
# update parameters
for i, (m, v, p, g) in enumerate(zip(self.ms, self.vs, params, grads)):
m = self.beta1 * m + (1 - self.beta1) * g
v = np.maximum(self.beta2 * v, np.abs(g))
p -= a_t * m / (v + self.epsilon)
self.ms[i] = m
self.vs[i] = v
def resize_image(image,target_shape, pad_value = 0):
assert isinstance(target_shape, list) or isinstance(target_shape, tuple)
add_shape, subs_shape = [], []
image_shape = image.shape
shape_difference = np.asarray(target_shape, dtype=int) - np.asarray(image_shape,dtype=int)
for diff in shape_difference:
if diff < 0:
subs_shape.append(np.s_[int(np.abs(np.ceil(diff/2))):int(np.floor(diff/2))])
add_shape.append((0, 0))
else:
subs_shape.append(np.s_[:])
add_shape.append((int(np.ceil(1.0*diff/2)),int(np.floor(1.0*diff/2))))
output = np.pad(image, tuple(add_shape), 'constant', constant_values=(pad_value, pad_value))
output = output[subs_shape]
return output
def __test_ks(self,x):
x = x[~np.isnan(x)]
n = x.size
x.sort()
yCDF = np.arange(1,n+1)/float(n)
notdup = np.hstack([np.diff(x,1),[1]])
notdup = notdup>0
x_expcdf = x[notdup]
y_expcdf = np.hstack([[0],yCDF[notdup]])
zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1);
mu = 0
sigma = 1
theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma))
delta1 = y_expcdf[:-1]-theocdf
delta2 = y_expcdf[1:]-theocdf
deltacdf = np.abs(np.hstack([delta1,delta2]))
KSmax = deltacdf.max()
return KSmax
def __test_ks(self,x):
x = x[~np.isnan(x)]
n = x.size
x.sort()
yCDF = np.arange(1,n+1)/float(n)
notdup = np.hstack([np.diff(x,1),[1]])
notdup = notdup>0
x_expcdf = x[notdup]
y_expcdf = np.hstack([[0],yCDF[notdup]])
zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1);
mu = 0
sigma = 1
theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma))
delta1 = y_expcdf[:-1]-theocdf
delta2 = y_expcdf[1:]-theocdf
deltacdf = np.abs(np.hstack([delta1,delta2]))
KSmax = deltacdf.max()
return KSmax
def verify_result(result, expected, float_cmp, debug):
if not isinstance(result, pd.core.series.Series):
if result == expected:
print('TEST OK')
return
else:
print('TEST Failed.')
return
result = result.dropna()
expected = expected.dropna()
if debug:
print('RESULT:')
print(result)
print('EXPECTED:')
print(expected)
if float_cmp:
cmp = (np.abs(result - expected) < 2.631048e-06)
else:
cmp = (result == expected)
if len(cmp[cmp == False]) > 0 :
print('TEST Failed.')
return
print('TEST OK.')
return
def Saliency_map(image,model,preprocess,ground_truth,use_gpu=False,method=util.GradType.GUIDED):
vis_param_dict['method'] = method
img_tensor = preprocess(image)
img_tensor.unsqueeze_(0)
if use_gpu:
img_tensor=img_tensor.cuda()
input = Variable(img_tensor,requires_grad=True)
if input.grad is not None:
input.grad.data.zero_()
model.zero_grad()
output = model(input)
ind=torch.LongTensor(1)
if(isinstance(ground_truth,np.int64)):
ground_truth=np.asscalar(ground_truth)
ind[0]=ground_truth
ind=Variable(ind)
energy=output[0,ground_truth]
energy.backward()
grad=input.grad
if use_gpu:
return np.abs(grad.data.cpu().numpy()[0]).max(axis=0)
return np.abs(grad.data.numpy()[0]).max(axis=0)
def nufft_scale1(N, K, alpha, beta, Nmid):
'''
calculate image space scaling factor
'''
# import types
# if alpha is types.ComplexType:
alpha = numpy.real(alpha)
# print('complex alpha may not work, but I just let it as')
L = len(alpha) - 1
if L > 0:
sn = numpy.zeros((N, 1))
n = numpy.arange(0, N).reshape((N, 1), order='F')
i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
for l1 in range(-L, L + 1):
alf = alpha[abs(l1)]
if l1 < 0:
alf = numpy.conj(alf)
sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
else:
sn = numpy.dot(alpha, numpy.ones((N, 1), dtype=numpy.float32))
return sn
def nufft_T(N, J, K, alpha, beta):
'''
equation (29) and (26)Fessler's paper
create the overlapping matrix CSSC (diagonal dominent matrix)
of J points
and then find out the pseudo-inverse of CSSC '''
# import scipy.linalg
L = numpy.size(alpha) - 1
# print('L = ', L, 'J = ',J, 'a b', alpha,beta )
cssc = numpy.zeros((J, J))
[j1, j2] = numpy.mgrid[1:J + 1, 1:J + 1]
overlapping_mat = j2 - j1
for l1 in range(-L, L + 1):
for l2 in range(-L, L + 1):
alf1 = alpha[abs(l1)]
# if l1 < 0: alf1 = numpy.conj(alf1)
alf2 = alpha[abs(l2)]
# if l2 < 0: alf2 = numpy.conj(alf2)
tmp = overlapping_mat + beta * (l1 - l2)
tmp = dirichlet(1.0 * tmp / (1.0 * K / N))
cssc = cssc + alf1 * numpy.conj(alf2) * tmp
return mat_inv(cssc)
def fit(self, X, y=None):
old_threshold = None
threshold = None
self.threshold_ = 0.0
self._fit(X,y)
count = 0
while count < 100 and (old_threshold is None or abs(threshold - old_threshold) > 0.01):
old_threshold = threshold
ss = self.decision_function(X,y)
threshold = percentile(ss, 100 * self.contamination)
self._fit(X[ss > threshold],y[ss > threshold] if y is not None else None)
count += 1
self.threshold_ = threshold
return self
def round_solution_pool(pool, constraints):
pool.distinct().sort()
P = pool.P
L0_reg_ind = np.isnan(constraints['coef_set'].C_0j)
L0_max = constraints['L0_max']
rounded_pool = SolutionPool(P)
for solution in pool.solutions:
# sort from largest to smallest coefficients
feature_order = np.argsort([-abs(x) for x in solution])
rounded_solution = np.zeros(shape=(1, P))
l0_norm_count = 0
for k in range(0, P):
j = feature_order[k]
if not L0_reg_ind[j]:
rounded_solution[0, j] = np.round(solution[j], 0)
elif l0_norm_count < L0_max:
rounded_solution[0, j] = np.round(solution[j], 0)
l0_norm_count += L0_reg_ind[j]
rounded_pool.add(objvals=np.nan, solutions=rounded_solution)
rounded_pool.distinct().sort()
return rounded_pool
def a_metric (solution, prediction, task='regression'):
''' 1 - Mean absolute error divided by mean absolute deviation '''
mae = mvmean(np.abs(solution-prediction)) # mean absolute error
mad = mvmean(np.abs(solution-mvmean(solution))) # mean absolute deviation
score = 1 - mae / mad
return mvmean(score)
### END REGRESSION METRICS
### CLASSIFICATION METRICS (work on solutions in {0, 1} and predictions in [0, 1])
# These can be computed for regression scores only after running normalize_array
def pac_metric (solution, prediction, task='binary.classification'):
''' Probabilistic Accuracy based on log_loss metric.
We assume the solution is in {0, 1} and prediction in [0, 1].
Otherwise, run normalize_array.'''
debug_flag=False
[sample_num, label_num] = solution.shape
if label_num==1: task='binary.classification'
eps = 1e-15
the_log_loss = log_loss(solution, prediction, task)
# Compute the base log loss (using the prior probabilities)
pos_num = 1.* sum(solution) # float conversion!
frac_pos = pos_num / sample_num # prior proba of positive class
the_base_log_loss = prior_log_loss(frac_pos, task)
# Alternative computation of the same thing (slower)
# Should always return the same thing except in the multi-label case
# For which the analytic solution makes more sense
if debug_flag:
base_prediction = np.empty(prediction.shape)
for k in range(sample_num): base_prediction[k,:] = frac_pos
base_log_loss = log_loss(solution, base_prediction, task)
diff = np.array(abs(the_base_log_loss-base_log_loss))
if len(diff.shape)>0: diff=max(diff)
if(diff)>1e-10:
print('Arrggh {} != {}'.format(the_base_log_loss,base_log_loss))
# Exponentiate to turn into an accuracy-like score.
# In the multi-label case, we need to average AFTER taking the exp
# because it is an NL operation
pac = mvmean(np.exp(-the_log_loss))
base_pac = mvmean(np.exp(-the_base_log_loss))
# Normalize: 0 for random, 1 for perfect
score = (pac - base_pac) / sp.maximum(eps, (1 - base_pac))
return score