def fftfilt(b, x, *n):
N_x = len(x)
N_b = len(b)
N = 2**np.arange(np.ceil(np.log2(N_b)),np.floor(np.log2(N_x)))
cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1)
N_fft = int(N[np.argmin(cost)])
N_fft = int(N_fft)
# Compute the block length:
L = int(N_fft - N_b + 1)
# Compute the transform of the filter:
H = np.fft.fft(b,N_fft)
y = np.zeros(N_x, x.dtype)
i = 0
while i <= N_x:
il = np.min([i+L,N_x])
k = np.min([i+N_fft,N_x])
yt = np.fft.ifft(np.fft.fft(x[i:il],N_fft)*H,N_fft) # Overlap..
y[i:k] = y[i:k] + yt[:k-i] # and add
i += L
return y
python类argmin()的实例源码
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def rasterMaskToGrid( rasterMask ):
grid = []
mask = rasterMask['mask']
for y in range(rasterMask['height']):
for x in range(rasterMask['width']):
if mask[y,x]==0:
grid.append([x,y])
grid = np.array(grid,dtype=np.float)
if not (rasterMask is None) and rasterMask['hex'] is True:
f = math.sqrt(3.0)/2.0
offset = -0.5
if np.argmin(rasterMask['mask'][0]) > np.argmin(rasterMask['mask'][1]):
offset = 0.5
for i in range(len(grid)):
if (grid[i][1]%2.0==0.0):
grid[i][0]-=offset
grid[i][1] *= f
return grid
def update_waveforms(self):
self.waveforms_ax.clear()
for idx, p in enumerate(self.to_consider[list(self.inspect_templates)]):
tmp = self.templates[:, p]
tmp = tmp.toarray().reshape(self.N_e, self.N_t)
elec = numpy.argmin(numpy.min(tmp, 1))
thr = self.thresholds[elec]
if self.ui.show_peaks.isChecked():
indices = [self.inv_nodes[self.nodes[elec]]]
else:
indices = self.inv_nodes[self.edges[self.nodes[elec]]]
for sidx in indices:
xaxis = numpy.linspace(self.x_position[sidx], self.x_position[sidx] + (self.N_t/(self.sampling_rate*1e-3)), self.N_t)
self.waveforms_ax.plot(xaxis, self.y_position[sidx] + tmp[sidx], c=colorConverter.to_rgba(self.inspect_colors_templates[idx]))
#self.waveforms_ax.plot([0, xaxis[-1]], [-thr, -thr], c=colorConverter.to_rgba(self.inspect_colors_templates[idx]), linestyle='--')
self.waveforms_ax.set_xlabel('Probe Space')
self.waveforms_ax.set_ylabel('Probe Space')
for fig in [self.ui.waveforms]:
fig.draw_idle()
def run_forests():
print('random forest: \n')
params = []
scores = []
for _ in range(5):
max_features = np.random.randint(400,800)
max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
forest = RandomForestClassifier(n_estimators=50,
max_features=max_features,
max_depth=max_depth)
forest_fit = forest.fit(X_train, Y_train)
pred = forest_fit.predict(X_test)
print('\n params:', dict(max_features=max_features, max_depth=max_depth))
print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
zero_one_score(Y_test, pred))
params.append( (max_features, max_depth) )
scores.append( zero_one_score(Y_test, pred))
print('best:', params[np.argmin(scores)])
def run_forests():
print('random forest: \n')
params = []
scores = []
for _ in range(5):
max_features = np.random.randint(400,800)
max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
forest = RandomForestClassifier(n_estimators=50,
max_features=max_features,
max_depth=max_depth)
forest_fit = forest.fit(X_train, Y_train)
pred = forest_fit.predict(X_test)
print('\n params:', dict(max_features=max_features, max_depth=max_depth))
print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
zero_one_score(Y_test, pred))
params.append( (max_features, max_depth) )
scores.append( zero_one_score(Y_test, pred))
print('best:', params[np.argmin(scores)])
def run_forests():
print('random forest: \n')
params = []
scores = []
for _ in range(5):
max_features = np.random.randint(400,800)
max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
forest = RandomForestClassifier(n_estimators=50,
max_features=max_features,
max_depth=max_depth)
forest_fit = forest.fit(X_train, Y_train)
pred = forest_fit.predict(X_test)
print('\n params:', dict(max_features=max_features, max_depth=max_depth))
print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
zero_one_score(Y_test, pred))
params.append( (max_features, max_depth) )
scores.append( zero_one_score(Y_test, pred))
print('best:', params[np.argmin(scores)])
def run_forests():
print('random forest: \n')
params = []
scores = []
for _ in range(5):
max_features = np.random.randint(400,800)
max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
forest = RandomForestClassifier(n_estimators=50,
max_features=max_features,
max_depth=max_depth)
forest_fit = forest.fit(X_train, Y_train)
pred = forest_fit.predict(X_test)
print('\n params:', dict(max_features=max_features, max_depth=max_depth))
print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
zero_one_score(Y_test, pred))
params.append( (max_features, max_depth) )
scores.append( zero_one_score(Y_test, pred))
print('best:', params[np.argmin(scores)])
def run_forests():
print('random forest: \n')
params = []
scores = []
for _ in range(5):
max_features = np.random.randint(400,800)
max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
forest = RandomForestClassifier(n_estimators=50,
max_features=max_features,
max_depth=max_depth)
forest_fit = forest.fit(X_train, Y_train)
pred = forest_fit.predict(X_test)
print('\n params:', dict(max_features=max_features, max_depth=max_depth))
print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
zero_one_score(Y_test, pred))
params.append( (max_features, max_depth) )
scores.append( zero_one_score(Y_test, pred))
print('best:', params[np.argmin(scores)])
def _translate(seq, f_init, f_next, trg_eos_idx, src_sel, trg_sel,
k, cond_init_trg, normalize, n_best, **kwargs):
sample, score = gen_sample(
f_init, f_next, x=numpy.array(seq).reshape([len(seq), 1]),
eos_idx=trg_eos_idx, src_selector=src_sel, trg_selector=trg_sel,
k=k, maxlen=3*len(seq), stochastic=False, argmax=False,
cond_init_trg=cond_init_trg, **kwargs)
if normalize:
lengths = numpy.array([len(s) for s in sample])
score = score / lengths
if n_best == 1:
sidx = numpy.argmin(score)
elif n_best > 1:
sidx = numpy.argsort(score)[:n_best]
else:
raise ValueError('n_best cannot be negative!')
return sample[sidx], score[sidx]
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
words = [word for word in words if word in self.vocab] # filter out OOV words
logger.debug("using words %s" % words)
if not words:
raise ValueError("cannot select a word from an empty list")
# which word vector representation is furthest away from the mean?
selection = self.syn0norm[[self.vocab[word].index for word in words]]
mean = np.mean(selection, axis=0)
sim = np.dot(selection, mean / np.linalg.norm(mean))
return words[np.argmin(sim)]
def temporalize(x, smoothing_steps, distance='L1'):
"""
:param x: An (n_samples, n_dims) dataset
:return: A (n_samples, ) array of indexes that can be used to shuffle the input for temporal smoothness.
"""
x_flat = x.reshape(x.shape[0], -1)
index_buffer = np.arange(1, smoothing_steps+1)
next_sample_buffer = x_flat[1:smoothing_steps+1].copy()
# Technically, we could do this without a next_sample_buffer (and only an index_buffer), but it would require
# repeatedly accessing a bunch of really scattered memory, so we do it this way.
shuffling_indices = np.zeros(len(x), dtype=int)
rectifier = np.abs if distance=='L1' else np.square if distance=='L2' else bad_value(distance)
p=ProgressIndicator(len(x), name = 'Temporalize')
current_index = 0
for i in xrange(len(x)):
shuffling_indices[i] = current_index
closest = np.argmin(rectifier(x_flat[current_index]-next_sample_buffer).sum(axis=1))
current_index = index_buffer[closest]
weve_aint_done_yet = i+smoothing_steps+1 < len(x)
next_index = i+smoothing_steps+1
next_sample_buffer[closest] = x_flat[next_index] if weve_aint_done_yet else float('inf')
index_buffer[closest] = next_index if weve_aint_done_yet else -1
p()
return shuffling_indices
def _solveHyperplaneProjection(self, points):
m, n = self.A.shape
errors = np.zeros(m)
for i in range(m):
if i in self.ban_constraints:
errors[i] = 9999999
else:
ai = self.A[i] / np.linalg.norm(self.A[i].T, self.normalize_c)
bi = self.b[i] / np.linalg.norm(self.A[i].T, self.normalize_c)
errors[i] = np.sum([ai * pt - bi for pt in points])
minInd = np.argmin(errors)
self.c = self.A[minInd] / np.linalg.norm(self.A[minInd].T,
self.normalize_c)
self.c = self.c.tolist()[0]
self.error = errors[minInd]
self.dual = np.zeros(m)
self.dual[minInd] = 1 / np.linalg.norm(self.A[minInd].T,
self.normalize_c)
self._solved = True
return errors[minInd]
def get_min_pos_kinect():
(depth,_) = get_depth()
minVal = np.min(depth) #This is the minimum value from the depth image
minPos = np.argmin(depth) #This is the raw index of the minimum value above
xPos = np.mod(minPos, xSize) #This is the x component of the raw index
yPos = minPos//xSize #This is the y component of the raw index
xList.append(xPos)
del xList[0]
xPos = int(np.mean(xList))
yList.append(yPos)
del yList[0]
yPos = int(np.mean(yList))
return (xSize - xPos-10, yPos, minVal)
def al_just_expert(adata, clf, thresh_random = 3):
n = len(adata.taken_items) # examples taken
m = adata.mat.shape[0] # examples available
if m < 1: return -1
if n < thresh_random or not adata.taken_both_classes():
i = random.randint(0, m-1)
return adata.query_expert_direct(i)
# uncertainty sampling
# undersample:
#mat, rel = undersam(adata.taken_mat.tocsr(), adata.taken_rel)
#clf.fit(mat, rel)
clf.fit(adata.taken_mat, adata.taken_rel)
pp = clf.predict_proba(adata.mat)
uncertain = np.abs(pp[:,0] - 0.5)
i = np.argmin(uncertain)
j = np.argmin(pp[:,0])
#print pp[i,0]
return adata.query_expert_direct(i)
def al_crowd_fin_expert(adata, clf, turk_uncer, crowd_budget = 5*1500):
if adata.spent_crowd < crowd_budget and len(adata.rel) > 0:
res = al_just_crowd(adata, clf)
if res != -1: return res
print "q expert"
n = len(adata.taken_items)
crowd_prob = np.zeros(n)
found = False
for i in range(n):
if not adata.expert_fixed[i]:
found = True
j = adata.taken_items[i]
crowd_prob[i] = turk_uncer[j][0] *1.0/ (turk_uncer[j][0] + turk_uncer[j][1])
else:
crowd_prob[i] = 100
if not found: return -1
uncertain = np.abs(crowd_prob - 0.5)
i = np.argmin(uncertain)
#print i, adata.expert_fixed[i]
print "most", turk_uncer[adata.taken_items[i]]
return adata.query_expert_fix(i)
def _spatial_sort(glyph):
from scipy.spatial.distance import cdist
from numpy import argsort
from numpy import argmin
curr = argmin(glyph[:,0])
visited = set([curr])
order = [curr]
dd = cdist(glyph, glyph)
while len(visited)<len(glyph):
row = dd[curr,:]
for i in argsort(row):
if row[i]<=0.0 or i==curr or i in visited:
continue
order.append(i)
visited.add(i)
break
glyph[:,:] = glyph[order,:]
def _hpd_interval(self, x, width):
"""
Code adapted from pymc3.stats.calc_min_interval:
https://github.com/pymc-devs/pymc3/blob/master/pymc3/stats.py
"""
x = np.sort(x)
n = len(x)
interval_idx_inc = int(np.floor(width * n))
n_intervals = n - interval_idx_inc
interval_width = x[interval_idx_inc:] - x[:n_intervals]
if len(interval_width) == 0:
raise ValueError('Too few elements for interval calculation')
min_idx = np.argmin(interval_width)
hdi_min = x[min_idx]
hdi_max = x[min_idx + interval_idx_inc]
index = ['hpd{}_{}'.format(width, x) for x in ['lower', 'upper']]
return pd.Series([hdi_min, hdi_max], index=index)
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b, g, r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1 - self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def tune_tal(mono_phi_score, tal_list):
errs = []
tals = []
for tal in tal_list:
err = []
for i in range(len(mono_phi_score)):
mono_1 = numpy.delete(mono_phi_score, i, axis=0)
dim_h = mono_phi_score[i][:-1]
value_h, alpha = train_predict_regression(mono_1, dim_h, tal)
err.append((value_h - mono_phi_score[i][-1])**2)
err = numpy.mean(err)
errs.append(err)
tals.append(tal)
print 'regression tal:', tal, 'err', err
idx = numpy.argmin(errs)
return tals[idx]
def find_min_phase(sdata,a,f,sr,phase):
rms1 = 0
rms2 = 0
rms3 = 0
samples = len(sdata)
for i in xrange(samples):
diff1 = (sdata[i] - a*cos(2*pi*i*f/sr + phase[0]))
rms1 += diff1*diff1
diff2 = (sdata[i] - a*cos(2*pi*i*f/sr + phase[1]))
rms2 += diff2*diff2
diff3 = (sdata[i] - a*cos(2*pi*i*f/sr + phase[2]))
rms3 += diff3*diff3
rms = numpy.zeros(3)
rms[0] = rms1
rms[1] = rms2
rms[2] = rms3
i = numpy.argmin(rms)
p = phase[i]
return i,p
def _center_mahalanobis(self, data):
"""
Finds a point that is in the center of the data using Mahalanobis distance.
Parameters
----------
data: input data as numpy array
Returns
-------
mean: numpy array
"""
distances = cdist(data, data, metric='mahalanobis', VI=self._inv_covar_matrices)
sum_distances = np.sum(distances, axis=0)
center_idx = np.argmin(sum_distances)
return data[center_idx]
def find_stable_a(self, A, cond_h, cond_s):
# build the different combinations of
# condition equations
condeq = []
if cond_h.size > 0:
condeq.append(cond_h)
condeq.append(cond_s)
if cond_h.size > 0:
condeq.append(numpy.row_stack((cond_s,cond_h)))
condnum = []
condnum.append(numpy.linalg.cond(A))
for cond in condeq:
condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))
i = numpy.argmin(numpy.array(condnum))
if i == 0:
return numpy.array([])
else:
return condeq[i-1]
def find_stable_a(self, A, cond_h, cond_s):
# build the different combinations of
# condition equations
condeq = []
if cond_h.size > 0:
condeq.append(cond_h)
condeq.append(cond_s)
if cond_h.size > 0:
condeq.append(numpy.row_stack((cond_s,cond_h)))
condnum = []
condnum.append(numpy.linalg.cond(A))
for cond in condeq:
condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))
i = numpy.argmin(numpy.array(condnum))
if i == 0:
return numpy.array([])
else:
return condeq[i-1]
def find_stable_a(self, A, cond_h, cond_s):
# build the different combinations of
# condition equations
condeq = []
if cond_h.size > 0:
condeq.append(cond_h)
condeq.append(cond_s)
if cond_h.size > 0:
condeq.append(numpy.row_stack((cond_s,cond_h)))
condnum = []
condnum.append(numpy.linalg.cond(A))
for cond in condeq:
condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))
i = numpy.argmin(numpy.array(condnum))
if i == 0:
return numpy.array([])
else:
return condeq[i-1]
def nearest_cell(self, x, y, bbox=None, shape=None):
"""
Returns the index of the cell (column, row) closest
to a given geographical coordinate.
Parameters
----------
x : int or float
x coordinate.
y : int or float
y coordinate.
"""
if not bbox:
bbox = self._bbox
if not shape:
shape = self.shape
# Note: this speedup assumes grid cells are square
y_ix, x_ix = self.bbox_indices(self._bbox, self.shape)
y_ix += self.cellsize / 2.0
x_ix += self.cellsize / 2.0
desired_y = np.argmin(np.abs(y_ix - y))
desired_x = np.argmin(np.abs(x_ix - x))
return desired_x, desired_y
def autoencoder(args, model):
latent_dim = args.latent_dim
structures = read_smiles_data(args.data)
datobj = SmilesDataGenerator(structures, 120)
train_gen = datobj.generator(1)
if os.path.isfile(args.model):
model.load(datobj.chars, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
true_pred_gen = (((mat, weight, model.autoencoder.predict(mat))
for (mat, _, weight) in train_gen))
text_gen = ((str.join('\n',
[str((datobj.table.decode(true_mat[vec_ix])[:np.argmin(weight[vec_ix])],
datobj.table.decode(vec)[:]))
for (vec_ix, vec) in enumerate(pred_mat)]))
for (true_mat, weight, pred_mat) in true_pred_gen)
for _ in range(args.sample):
print(text_gen.next())
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def get_closest_k(kpoint, ref_ks, return_diff=False):
"""
returns the list of difference between kpoints. If return_diff True, then
for a given kpoint the minimum distance among distances with ref_ks is
returned or just the reference kpoint that results if not return_diff
Args:
kpoint (1x3 array): the coordinates of the input k-point
ref_ks ([1x3 array]): list of reference k-points from which the
distance with initial_ks are calculated
return_diff (bool): if True, the minimum distance is returned
Returns (1x3 array):
"""
min_dist_ik = np.array([norm(ki - kpoint) for ki in ref_ks]).argmin()
if return_diff:
return kpoint - ref_ks[min_dist_ik]
else:
return ref_ks[min_dist_ik]
def mergeLocation(tx0, sp0, tx1, sp1):
"""
find merge location in (tx0,sp0)
"""
if len(tx0) < 2:
return 9999
txmin=min( np.array(tx0)[:,1] ) # minimal line heigh, used as detection threshold
txx=[]
for i in range(min(len(tx0)-1,len(tx1))):
tx=tx0[i][1] + sp0[i][1] + tx0[i+1][1]
txx.append(tx - tx1[i][1])
cc = np.argmin(txx)
if txx[cc] < txmin/3: #expected to be near 0
return cc
else:
return 9999