def a_score_(solution, prediction):
mad = float(mvmean(abs(solution-mvmean(solution))))
return 1 - metrics.mean_absolute_error(solution, prediction)/mad
python类abs()的实例源码
def build_data_auto_encoder(data, step, win_size):
count = data.shape[1] / float(step)
docX = np.zeros((count, 3, win_size))
for i in range(0, data.shape[1] - win_size, step):
c = i / step
docX[c][0] = np.abs(data[0, i:i + win_size] - data[1, i:i + win_size])
docX[c][1] = np.power(data[0, i:i + win_size] - data[1, i:i + win_size], 2)
docX[c][2] = np.pad(
(data[0, i:i + win_size - 1] - data[0, i + 1:i + win_size]) * (data[1, i:i + win_size - 1] - data[1, i + 1:i + win_size]),
(0, 1), 'constant', constant_values=0)
data = np.dstack((docX[:, 0], docX[:, 1], docX[:, 2])).reshape(docX.shape[0], docX.shape[1]*docX.shape[2])
return data
def reject_outliers(data, m = 2.):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s<m]
def get_line_region(self, position, name=''):
"""Creates a line region at the given position (start_x, start_y, end_x, end_y),
inclusive.
Args:
position: Position of the line region (start_x, start_y, end_x, end_y).
name: Name of the region.
Returns:
Line region.
"""
start_idx = self.get_index(position[:2])
end_idx = self.get_index(position[2:])
x_diff = start_idx % self.x.samples - end_idx % self.x.samples
y_diff = int(start_idx / self.x.samples) - int(end_idx / self.x.samples)
num_points = max(np.abs([x_diff, y_diff]))
point_indices = []
for ii in range(num_points + 1):
x_position = start_idx % self.x.samples - np.round(ii / num_points * x_diff)
y_position = int(start_idx / self.x.samples) - np.round(ii / num_points * y_diff)
point_indices.append(int(x_position + self.x.samples * y_position))
return reg.LineRegion(point_indices, position, name=name)
def get_index(self, value):
"""Returns the index of a given value.
Args:
value: Value the index requested for.
Returns:
Index.
"""
index, = np.where(np.abs(self.vector - value) <= self.snap_radius)
assert len(index) < 2, "Multiple points found within snap radius of given value."
assert len(index) > 0, "No point found within snap radius of given value."
return int(index)
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def calc_scores(self, lag):
data = self.raw_data[:, abs(self.raw_lags) <= lag]
control = self.raw_control
score = self.overlap[self.pairs[:, 0], self.pairs[:, 1]]
score2 = control - data.mean(axis=1)
score3 = control
return score, score2, score3
def data_tooltip(self, x, y):
row = int(y)
if row >= 0 and row < len(self.raw_data):
all_raw_data = self.raw_data
data_idx = self.sort_idcs[row]
lag_diff = np.abs(x - self.raw_lags)
nearest_lag_idx = np.argmin(lag_diff)
nearest_lag = self.raw_lags[nearest_lag_idx]
value = all_raw_data[data_idx, nearest_lag_idx]
return ('%.2f - lag: %.2fms (template similarity: %.2f '
'CC metric %.2f)') % (value, nearest_lag,
self.score_x[data_idx],
self.score_y[data_idx])
else:
return ''
def update_statusbar(self, event):
# Update information about the mouse position to the status bar
status_bar = self.statusbar
if event.inaxes == self.electrode_ax:
status_bar.showMessage(u'x: %.0f?m y: %.0f?m' % (event.xdata, event.ydata))
elif event.inaxes == self.data_x:
yspacing = numpy.max(np.abs(self.data))*1.05
if yspacing != 0:
row = int((event.ydata + 0.5*yspacing)/yspacing)
else:
row = int((event.ydata))
if row < 0 or row >= len(self.inspect_points):
status_bar.clearMessage()
else:
time_idx = np.argmin(np.abs(self.time - event.xdata))
start_idx = np.argmin(np.abs(self.time - self.t_start))
rel_time_idx = time_idx - start_idx
electrode_idx = self.inspect_points[row]
electrode_x, electrode_y = self.points[electrode_idx]
data = self.data[rel_time_idx, electrode_idx]
msg = '%.2f' % data
if self.show_fit:
fit = self.curve[electrode_idx, rel_time_idx]
msg += ' (fit: %.2f)' % fit
msg += ' t: %.2fs ' % self.time[time_idx]
msg += u'(electrode %d at x: %.0f?m y: %.0f?m)' % (electrode_idx, electrode_x, electrode_y)
status_bar.showMessage(msg)
def sameParams(self, params, i=None):
if (self.prevParams is None): return False
if (i is None): return (np.max(np.abs(params-self.prevParams)) < self.epsilon)
return ((np.abs(params[i]-self.prevParams[i])) < self.epsilon)
def getEE(self, EEParams):
if (self.prevEEParams is not None):
if (EEParams.shape[0] == 0 or np.max(np.abs(EEParams-self.prevEEParams < self.epsilon))): return self.cache['EE']
Kd = self.Kdim(EEParams)
EE = elsympol(Kd, len(self.kernels))
self.prevEEParams = EEParams.copy()
self.cache['EE'] = EE
return EE
def getScaledE(self, params, i, E):
if (self.prevHyp0Params is not None and np.abs(self.prevHyp0Params[i]-params[i]) < self.epsilon): return self.cache['E_scaled'][i]
if ('E_scaled' not in self.cache.keys()): self.cache['E_scaled'] = [None for j in xrange(len(self.kernels))]
for j in xrange(len(self.kernels)):
if (self.prevHyp0Params is not None and np.abs(self.prevHyp0Params[j]-params[j]) < self.epsilon): continue
E_scaled = E[:,:,j+1]*np.exp(2*params[j])
self.cache['E_scaled'][j] = E_scaled
self.prevHyp0Params = params.copy()
return self.cache['E_scaled'][i]
def __init__(self, X, pos):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
d = pos.shape[0]
self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
def __init__(self, X, pos):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
d = pos.shape[0]
self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
def __init__(self, X, pos):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
d = pos.shape[0]
self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
def AorthogonalityCheck(A, U, d):
"""
Test the frobenious norm of D^{-1}(U^TAU) - I_k
"""
V = np.zeros(U.shape)
AV = np.zeros(U.shape)
Av = Vector()
v = Vector()
A.init_vector(Av,0)
A.init_vector(v,1)
nvec = U.shape[1]
for i in range(0,nvec):
v.set_local(U[:,i])
v *= 1./math.sqrt(d[i])
A.mult(v,Av)
AV[:,i] = Av.get_local()
V[:,i] = v.get_local()
VtAV = np.dot(V.T, AV)
err = VtAV - np.eye(nvec, dtype=VtAV.dtype)
# plt.imshow(np.abs(err))
# plt.colorbar()
# plt.show()
print("i, ||Vt(i,:)AV(:,i) - I_i||_F, V[:,i] = 1/sqrt(lambda_i) U[:,i]")
for i in range(1,nvec+1):
print(i, np.linalg.norm(err[0:i,0:i], 'fro') )
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
acquisitions = [x.AcquisitionNumber for x in slices]
vals, counts = np.unique(acquisitions, return_counts=True)
vals = vals[::-1] # reverse order so the later acquisitions are first (the np.uniques seems to always return the ordered 1 2 etc.
counts = counts[::-1]
## take the acquistions that has more entries; if these are identical take the later entrye
acq_val_sel = vals[np.argmax(counts)]
##acquisitions = sorted(np.unique(acquisitions), reverse=True)
if len(vals) > 1:
print ("WARNING ##########: MULTIPLE acquisitions & counts, acq_val_sel, path: ", vals, counts, acq_val_sel, path)
slices2= [x for x in slices if x.AcquisitionNumber == acq_val_sel]
slices = slices2
## ONE path includes 2 acquisitions (2 sets), take the latter acquiisiton only whihch cyupically is better than the first/previous ones.
## example of the '../input/stage1/b8bb02d229361a623a4dc57aa0e5c485'
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8, BUG should be float
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8 - BUGGY (should be float caused issues with segmenting and rescaling ....
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 8
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def soft_thresh(r, w):
return np.sign(w) * np.max(np.abs(w)-r, 0)
def active_set_Lam(self, fixed, vary):
grad = self.grad_wrt_Lam(fixed, vary)
assert np.allclose(grad, grad.T, 1e-3)
return np.where((np.abs(np.triu(grad)) > self.lamL) | (self.Lam != 0))
# return np.where((np.abs(grad) > self.lamL) | (~np.isclose(self.Lam, 0)))