def compute(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
descriptor = []
dominantGradients = np.zeros_like(frame)
maxGradient = cv2.filter2D(frame, cv2.CV_32F, self.kernels[0])
maxGradient = np.absolute(maxGradient)
for k in range(1,len(self.kernels)):
kernel = self.kernels[k]
gradient = cv2.filter2D(frame, cv2.CV_32F, kernel)
gradient = np.absolute(gradient)
np.maximum(maxGradient, gradient, maxGradient)
indices = (maxGradient == gradient)
dominantGradients[indices] = k
frameH, frameW = frame.shape
for row in range(self.rows):
for col in range(self.cols):
mask = np.zeros_like(frame)
mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 255
hist = cv2.calcHist([dominantGradients], [0], mask, self.bins, self.range)
hist = cv2.normalize(hist, None)
descriptor.append(hist)
return np.concatenate([x for x in descriptor])
python类absolute()的实例源码
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
def soft_threshold(X, thresh):
"""Proximal mapping of l1-norm results in soft-thresholding. Therefore, it is required
for the optimisation of the GFGL or IFGL.
Parameters
----------
X : ndarray
input data of arbitrary shape
thresh : float
threshold value
Returns
-------
ndarray soft threshold applied
"""
return (np.absolute(X) - thresh).clip(0) * np.sign(X)
def tensorize(sentence, max_length):
""" Input:
- sentence: The sentence is a tuple of lists (s1, s2, ..., sk)
s1 is always a sequence of word ids.
sk is always a sequence of label ids.
s2 ... sk-1 are sequences of feature ids,
such as predicate or supertag features.
- max_length: The maximum length of sequences, used for padding.
"""
x = np.array([t for t in zip(*sentence[:-1])])
y = np.array(sentence[-1])
weights = (y >= 0).astype(float)
x.resize([max_length, x.shape[1]])
y.resize([max_length])
weights.resize([max_length])
return x, np.absolute(y), len(sentence[0]), weights
two_sigma_financial_modelling.py 文件源码
项目:PortfolioTimeSeriesAnalysis
作者: MizioAnd
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def outlier_identification(self, model, x_train, y_train):
# Split the training data into an extra set of test
x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(x_train, y_train)
print('\nOutlier shapes')
print(np.shape(x_train_split), np.shape(x_test_split), np.shape(y_train_split), np.shape(y_test_split))
model.fit(x_train_split, y_train_split)
y_predicted = model.predict(x_test_split)
residuals = np.absolute(y_predicted - y_test_split)
rmse_pred_vs_actual = self.rmse(y_predicted, y_test_split)
outliers_mask = residuals >= rmse_pred_vs_actual
outliers_mask = np.concatenate([np.zeros((np.shape(y_train_split)[0],), dtype=bool), outliers_mask])
not_an_outlier = outliers_mask == 0
# Resample the training set from split, since the set was randomly split
x_out = np.insert(x_train_split, np.shape(x_train_split)[0], x_test_split, axis=0)
y_out = np.insert(y_train_split, np.shape(y_train_split)[0], y_test_split, axis=0)
return x_out[not_an_outlier, ], y_out[not_an_outlier, ]
def edge_LoG(I, sigma):
LoG = laplace(gaussian(I, sigma=sigma), ksize=3)
thres = np.absolute(LoG).mean() * 1.0
output = sp.zeros(LoG.shape)
w = output.shape[1]
h = output.shape[0]
for y in range(1, h - 1):
for x in range(1, w - 1):
patch = LoG[y - 1:y + 2, x - 1:x + 2]
p = LoG[y, x]
maxP = patch.max()
minP = patch.min()
if p > 0:
zeroCross = True if minP < 0 else False
else:
zeroCross = True if maxP > 0 else False
if ((maxP - minP) > thres) and zeroCross:
output[y, x] = 1
#FIXME: It is necesary to define if return the closing of the output or just the output
#return binary_closing(output)
return output
def fft(frames, nfft=512):
"""
???????
????????????????????????????????????
??????????????????????????????????????
??????????????????????????????????????
??????????????????????????????????????
????
:param frames:????????
:param nfft:fft???????
:return:???nfft//2+1?????????????
"""
complex_spec = np.fft.rfft(frames, nfft)
return np.absolute(complex_spec)
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Define a function that applies Sobel x and y,
# then computes the magnitude of the gradient
# and applies a threshold
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Define a function that applies Sobel x or y,
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=5, thresh_max=100
# should produce output like the example image shown above this quiz.
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
if orient == 'y':
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# 6) Return this mask as your binary_output image
return binary_output
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
def build_parser():
"""Build argument parser."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required args
parser.add_argument("--in_gct_path", "-i", required=True,
help="filepath to input gct")
# Optional args
parser.add_argument("--out_name", "-o", default=None,
help="name of output file (default is <INPUT_GCT>.tear.processed.gct")
parser.add_argument("--divide_by_mad", "-dm", action="store_true", default=False,
help=("whether to divide by median absolute deviation " +
"in addition to subtracting the probe median"))
parser.add_argument("--ignore_subset_norm", "-ig", action="store_true", default=False,
help="whether to ignore subset-specific normalization")
parser.add_argument("-psp_config_path", type=str,
default="~/psp_production.cfg",
help="filepath to PSP config file")
parser.add_argument("-verbose", "-v", action="store_true", default=False,
help="increase the number of messages reported")
return parser
def flow(self, Kc, Ks, Kz, Ka, numexpr):
zeros = np.zeros
where = np.where
min = np.minimum
max = np.maximum
abs = np.absolute
arctan = np.arctan
sin = np.sin
center = (slice( 1, -1,None),slice( 1, -1,None))
rock = self.center
ds = self.scour[center]
rcc = rock[center]
rock[center] = rcc - ds * Kz
# there isn't really a bottom to the rock but negative values look ugly
rock[center] = where(rcc<0,0,rcc)
preprocessing.py 文件源码
项目:Epileptic-Seizure-Prediction
作者: cedricsimar
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def compute_spectrogram(self, sig, data_length_sec, sampling_frequency, nfreq_bands, win_length_sec, stride_sec):
n_channels = 16
n_timesteps = int((data_length_sec - win_length_sec) / stride_sec + 1)
n_fbins = nfreq_bands
sig = np.transpose(sig)
sig2 = np.zeros((n_channels, n_fbins, n_timesteps))
for i in range(n_channels):
sigc = np.zeros((n_fbins, n_timesteps))
for frame_num, w in enumerate(range(0, int(data_length_sec - win_length_sec + 1), stride_sec)):
sigw = sig[i, w * sampling_frequency: (w + win_length_sec) * sampling_frequency]
sigw = self.hanning(sigw)
fft = self.log10(np.absolute(np.fft.rfft(sigw)))
fft_freq = np.fft.rfftfreq(n=sigw.shape[-1], d=1.0 / sampling_frequency)
sigc[:nfreq_bands, frame_num] = self.group_into_bands(fft, fft_freq, nfreq_bands)
sig2[i, :, :] = sigc
return np.transpose(sig2, axes=(2,1,0))
def scrub(cls, image):
"""
Apply Stroke-Width Transform to image.
:param filepath: relative or absolute filepath to source image
:return: numpy array representing result of transform
"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
canny, sobelx, sobely, theta = cls._create_derivative(gray)
swt = cls._swt(theta, canny, sobelx, sobely)
shapes = cls._connect_components(swt)
swts, heights, widths, topleft_pts, images = cls._find_letters(swt, shapes)
if(len(swts)==0):
#didn't find any text, probably a bad face
return None
word_images = cls._find_words(swts, heights, widths, topleft_pts, images)
final_mask = np.zeros(swt.shape)
for word in word_images:
final_mask += word
return final_mask
def compute_ang_corr(self, input_frame, normed=True, ang_max=10):
"""Compute the angular correlation from the polar representation of given pattern
Arguments:
polar_arr (array) - Polar data array (usually output of convert())
normed (bool, optional) - Whether to normalize Fourier transform in each radial bin
ang_max (float, optional) - How many Fourier components to keep
Returns:
ang_corr (array) - Angular correlations for each input bin
"""
polar_arr = self.compute_polar(input_frame)
ang_corr = np.array([a - a.mean() for a in polar_arr])
temp = []
for a in ang_corr:
if normed:
la = np.linalg.norm(a)
if la > 0.:
temp.append(np.absolute(np.fft.fft(a/la))[1:ang_max])
else:
temp.append(np.zeros(ang_max-1))
else:
temp.append(np.absolute(np.fft.fft(a))[1:ang_max])
return np.array(temp)
def plot_gradients(self, foo=False):
'''
Shows the difference between the computed gradients in the ANN modul
and the numerically calculated gradients.
'''
fig = plt.gcf()
fig.canvas.set_window_title('Comparison of the computed gradients')
numgrad, grad, qua, ok = ngc.compare_gradients(self.Net,
self.inputdata_tr,
self.outputdata_tr)
print(qua, ok)
y = numgrad-grad
y2 = np.absolute(y)
plt.bar(np.arange(1,len(y)+1), y)
plt.grid(1)
plt.xlabel('Gradient')
plt.ylabel('Difference')
plt.show()
if foo:
print('numgrad: ', numgrad)
print('grad: ', grad)
print('difference: ', y)
LSFIR.py 文件源码
项目:Least-Squared-Error-Based-FIR-Filters
作者: fourier-being
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def lpfls(N,wp,ws,W):
M = (N-1)/2
nq = np.arange(0,2*M+1)
nb = np.arange(0,M+1)
q = (wp/np.pi)*np.sinc((wp/np.pi)*nq) - W*(ws/np.pi)*np.sinc((ws/np.pi)*nq)
b = (wp/np.pi)*np.sinc((wp/np.pi)*nb)
b[0] = wp/np.pi
q[0] = wp/np.pi + W*(1-ws/np.pi) # since sin(pi*n)/pi*n = 1, not 0
b = b.transpose()
Q1 = ln.toeplitz(q[0:M+1])
Q2 = ln.hankel(q[0:M+1],q[M:])
Q = Q1+Q2
a = ln.solve(Q,b)
h = list(nq)
for i in nb:
h[i] = 0.5*a[M-i]
h[N-1-i] = h[i]
h[M] = 2*h[M]
hmax = max(np.absolute(h))
for i in nq:
h[i] = (8191/hmax)*h[i]
return h
LSFIR.py 文件源码
项目:Least-Squared-Error-Based-FIR-Filters
作者: fourier-being
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def bpfls(N,ws1,wp1,wp2,ws2,W):
M = (N-1)/2
nq = np.arange(0,2*M+1)
nb = np.arange(0,M+1)
q = W*np.sinc(nq) - (W*ws2/np.pi) * np.sinc(nq* (ws2/np.pi)) + (wp2/np.pi) * np.sinc(nq*(wp2/np.pi)) - (wp1/np.pi) * np.sinc(nq*(wp1/np.pi)) + (W*ws1/np.pi) * np.sinc(nq*(ws1/np.pi))
b = (wp2/np.pi)*np.sinc((wp2/np.pi)*nb) - (wp1/np.pi)*np.sinc((wp1/np.pi)*nb)
b[0] = wp2/np.pi - wp1/np.pi
q[0] = W - W*ws2/np.pi + wp2/np.pi - wp1/np.pi + W*ws1/np.pi # since sin(pi*n)/pi*n = 1, not 0
b = b.transpose()
Q1 = ln.toeplitz(q[0:M+1])
Q2 = ln.hankel(q[0:M+1],q[M:])
Q = Q1+Q2
a = ln.solve(Q,b)
h = list(nq)
for i in nb:
h[i] = 0.5*a[M-i]
h[N-1-i] = h[i]
h[M] = 2*h[M]
hmax = max(np.absolute(h))
for i in nq:
h[i] = (8191/hmax)*h[i]
return h
LSFIR.py 文件源码
项目:Least-Squared-Error-Based-FIR-Filters
作者: fourier-being
项目源码
文件源码
阅读 41
收藏 0
点赞 0
评论 0
def hpfls(N,ws,wp,W):
M = (N-1)/2
nq = np.arange(0,2*M+1)
nb = np.arange(0,M+1)
b = 1 - (wp/np.pi)* np.sinc(nb * wp/np.pi)
b[0] = 1- wp/np.pi
q = 1 - (wp/np.pi)* np.sinc(nq * wp/np.pi) + W * (ws/np.pi) * np.sinc(nq * ws/np.pi) # since sin(pi*n)/pi*n = 1, not 0
q[0] = b[0] + W* ws/np.pi
b = b.transpose()
Q1 = ln.toeplitz(q[0:M+1])
Q2 = ln.hankel(q[0:M+1],q[M:])
Q = Q1+Q2
a = ln.solve(Q,b)
h = list(nq)
for i in nb:
h[i] = 0.5*a[M-i]
h[N-1-i] = h[i]
h[M] = 2*h[M]
hmax = max(np.absolute(h))
for i in nq:
h[i] = (8191/hmax)*h[i]
return h
def bias_var(true_preds, sum_preds, counts, n_replicas):
'''
compute bias and variance
@param true_preds: true labels
@param sum_preds: array of summation of the predictions of each sample
@param counts: the times each sample is tested (predicted)
@return: squared bias, variance
'''
sample_bias = np.absolute(true_preds - sum_preds / counts)
sample_var = sample_bias * (1.0 - sample_bias)
weighted_sample_bias_2 = np.power(sample_bias, 2.0) * (counts / n_replicas)
weighted_sample_var = sample_var * (counts / n_replicas)
bias = np.mean(weighted_sample_bias_2)
var = np.mean(weighted_sample_var)
return bias, var
def Mesh_Theta(target_size,omega,mu_0,s_Ray,N_turns,l,electrodes_angle):
freq = omega/(2*numpy.pi)
lambda_approx = 1/(freq*numpy.mean(s_Ray))
R0 = target_size + omega*numpy.mean(mu_0)*lambda_approx/(2*numpy.pi)
if l==0:
Theta = []
phi_0 = 2*numpy.pi*R0/lambda_approx
for n in range(N_turns):
Theta_max = 2*numpy.pi
R_approx = R0+n*lambda_approx
dTheta = lambda_approx/(6*R_approx)
Theta.append(numpy.arange(electrodes_angle[0],Theta_max+electrodes_angle[0],dTheta))
else:
Theta_max = 2*numpy.pi*numpy.ceil(N_turns/numpy.absolute(l))
Theta = [electrodes_angle[0]]
while Theta[-1]<(electrodes_angle[0]+Theta_max):
R_approx = R0+numpy.abs(l)*Theta[-1]*lambda_approx/(2*numpy.pi)
dTheta = lambda_approx/(6*R_approx)
Theta.append(Theta[-1]+dTheta)
if l<0:
Theta = Theta[::-1]
phi_0 = numpy.abs(l)*(Theta[0]+2*numpy.pi*R0/lambda_approx)
else:
phi_0 = (2*numpy.pi*R0/lambda_approx)
return {'Theta':Theta,'phi_0':phi_0}
def transform_depth(numpy_array):
"""
Performs custom version of log transformation on a numpy array. Where each
value is processed to be equal to:
initial_sign * abs(log10(abs(value)))
Parameters
----------
numpy_array : numpy array
Array of values without NaNs
Returns
-------
numpy array
"""
signs = np.sign(numpy_array)
step1 = np.absolute(numpy_array)
id_zeros = step1 != 0
step2 = np.absolute(np.log10(step1, where=id_zeros))
return signs * step2
def __call__(self):
locations = LinearLocator.__call__(self)
new_locations = []
for location in locations:
if np.absolute(location) < 0.01:
new_locations.append(float("{:.1e}".format(location)))
else:
new_locations.append(np.round(location, 3))
if np.isclose(new_locations[-1], self.max_val) or new_locations[-1] >= self.max_val:
new_locations[-1] = self.max_val
if new_locations[0] <= self.min_val:
new_locations[0] = self.min_val
return new_locations
def test_slow_gridding_against_jvdp_nfft(self):
t, tsc, y, err = data()
nf = int(nfft_sigma * len(t))
gpu_grid = simple_gpu_nfft(t, y, nf, sigma=nfft_sigma, m=nfft_m,
just_return_gridded_data=True,
fast_grid=False,
minimum_frequency=-int(nf/2),
samples_per_peak=spp)
# get CPU grid
cpu_grid = get_cpu_grid(tsc, y, nf, sigma=nfft_sigma, m=nfft_m)
diffs = np.absolute(gpu_grid - cpu_grid)
inds = (np.argsort(diffs)[::-1])[:10]
for i, gpug, cpug, d in zip(inds, gpu_grid[inds],
cpu_grid[inds],
diffs[inds]):
print(i, gpug, cpug, d)
tols = dict(rtol=nfft_rtol, atol=nfft_atol)
assert_allclose(gpu_grid, cpu_grid, **tols)
def test_large_run(self, make_plot=False, **kwargs):
proc = ConditionalEntropyAsyncProcess(**kwargs)
t, y, dy = data(sigma=0.01, ndata=100, freq=4.)
df = 0.001
max_freq = 100.
min_freq = df
nf = int((max_freq - min_freq) / df)
freqs = min_freq + df * np.arange(nf)
r0 = proc.run([(t, y, dy)], freqs=freqs)
r1 = proc.large_run([(t, y, dy)], freqs=freqs, max_memory=1e7)
f0, p0 = r0[0]
f1, p1 = r1[0]
rel_err = max(np.absolute(p0 - p1)) / np.median(np.absolute(p0))
print(max(np.absolute(p0 - p1)), rel_err)
assert_allclose(p0, p1, rtol=1e-4, atol=1e-2)
def quasistable(self, quasi_stable_strain_ids=None, surviving_strain_ids=None):
"""
Stability check.
If stable return True, else return False
"""
if quasi_stable_strain_ids is not None:
i_1 = int(self.t / 3.)
i_2 = 2 * i_1
max_diff = n_max(absolute(
divide(
n_sum(self._counts_over_time[i_1:i_2], axis=0), n_sum(self._counts_over_time[i_2:], axis=0)
)[quasi_stable_strain_ids]
))
if abs(1 - max_diff) >= 0.02:
return False
else:
print 'quasistable at t= ', self.t
return True
if surviving_strain_ids is not None:
if not count_nonzero(self._counts_over_time[int(self.t)][surviving_strain_ids]):
print 'protected strain died out at t= ', self.t
return True
else:
return False
return False
def maxImagen(img, tamanyo):
''''''
bOri, gOri, rOri = cv2.split(img)
filas,columnas,canales = img.shape
#pad_size = tamanyo/2
#padded_max = np.pad(img, (pad_size, pad_size),'constant',constant_values=np.inf)
max_channel = np.zeros((filas,columnas))
for r in range(1,filas):
for c in range(1,columnas):
window_b = bOri[r:r+tamanyo,c:c+tamanyo]
window_g = gOri[r:r+tamanyo,c:c+tamanyo]
window_r = rOri[r:r+tamanyo,c:c+tamanyo]
max_bg = np.max(window_b+window_g)
max_r = np.max(window_r)
max_ch = max_r-max_bg #(max_r-max_bg)+np.absolute(np.min(max_r-max_bg))
max_ch_array = np.array([max_ch])
max_channel[r,c] = max_ch_array
min_max_channel = np.min(max_channel)
background_bOri = np.mean(bOri*min_max_channel)
background_gOri = np.mean(gOri*min_max_channel)
BbOri = np.absolute(background_bOri)
BgOri = np.absolute(background_gOri)
return BbOri, BgOri #max_channel,
def maxImagen(img, tamMax):
''''''
bOri, gOri, rOri = cv2.split(img)
filas,columnas,canales = img.shape
max_channel = np.zeros((filas,columnas))
for r in range(1,filas):
for c in range(1,columnas):
window_b = bOri[r:r+tamMax,c:c+tamMax]
window_g = gOri[r:r+tamMax,c:c+tamMax]
window_r = rOri[r:r+tamMax,c:c+tamMax]
max_bg = np.max(window_b+window_g)
max_r = np.max(window_r)
max_ch = max_r-max_bg #(max_r-max_bg)+np.absolute(np.min(max_r-max_bg))
max_ch_array = np.array([max_ch])
max_channel[r,c] = max_ch_array
min_max_channel = np.min(max_channel)
background_bOri = np.mean(bOri*min_max_channel)
background_gOri = np.mean(gOri*min_max_channel)
BbOri = np.absolute(background_bOri)
BgOri = np.absolute(background_gOri)
return BbOri, BgOri #max_channel,
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))