def clipped_linscale_img(img_array,
cap=255.0,
lomult=2.0,
himult=2.0):
'''
This clips the image between the values:
[median(img_array) - lomult*stdev(img_array),
median(img_array) + himult*stdev(img_array)]
and returns a linearly scaled image using the cap given.
'''
img_med, img_stdev = np.median(img_array), np.std(img_array)
clipped_linear_img = np.clip(img_array,
img_med-lomult*img_stdev,
img_med+himult*img_stdev)
return cap*clipped_linear_img/(img_med+himult*img_stdev)
python类median()的实例源码
def svgd_kernel(self, h = -1):
sq_dist = pdist(self.theta)
pairwise_dists = squareform(sq_dist)**2
if h < 0: # if h < 0, using median trick
h = np.median(pairwise_dists)
h = np.sqrt(0.5 * h / np.log(self.theta.shape[0]+1))
# compute the rbf kernel
Kxy = np.exp( -pairwise_dists / h**2 / 2)
dxkxy = -np.matmul(Kxy, self.theta)
sumkxy = np.sum(Kxy, axis=1)
for i in range(self.theta.shape[1]):
dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy)
dxkxy = dxkxy / (h**2)
return (Kxy, dxkxy)
def reshape_array(array, newsize, pixcombine='sum'):
"""
Reshape an array to a give size using either the sum, mean or median of the pixels binned
Note that the old array dimensions have to be multiples of the new array dimensions
--- INPUT ---
array Array to reshape (combine pixels)
newsize New size of array
pixcombine The method to combine the pixels with. Choices are sum, mean and median
"""
sh = newsize[0],array.shape[0]//newsize[0],newsize[1],array.shape[1]//newsize[1]
pdb.set_trace()
if pixcombine == 'sum':
reshapedarray = array.reshape(sh).sum(-1).sum(1)
elif pixcombine == 'mean':
reshapedarray = array.reshape(sh).mean(-1).mean(1)
elif pixcombine == 'median':
reshapedarray = array.reshape(sh).median(-1).median(1)
return reshapedarray
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def get_normalized_dispersion(mat_mean, mat_var, nbins=20):
mat_disp = (mat_var - mat_mean) / np.square(mat_mean)
quantiles = np.percentile(mat_mean, np.arange(0, 100, 100 / nbins))
quantiles = np.append(quantiles, mat_mean.max())
# merge bins with no difference in value
quantiles = np.unique(quantiles)
if len(quantiles) <= 1:
# pathological case: the means are all identical. just return raw dispersion.
return mat_disp
# calc median dispersion per bin
(disp_meds, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, mat_disp, statistic='median', bins=quantiles)
# calc median absolute deviation of dispersion per bin
disp_meds_arr = disp_meds[disp_bins-1] # 0th bin is empty since our quantiles start from 0
disp_abs_dev = abs(mat_disp - disp_meds_arr)
(disp_mads, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, disp_abs_dev, statistic='median', bins=quantiles)
# calculate normalized dispersion
disp_mads_arr = disp_mads[disp_bins-1]
disp_norm = (mat_disp - disp_meds_arr) / disp_mads_arr
return disp_norm
def summarize_subsampled_matrices_cb(self, filtered_mats, subsample_type, subsample_depth):
"""
Computes simple summary metrics such as median genes detected and UMI counts on subsampled filtered matrices
Args:
filtered_mats (GeneBCMatrices): subsampled and filtered GeneBCMatrices
subsample_type (string): subsampling type
subsample_depth (int): target depth per cell for subsampling
"""
for genome in self.genomes:
if filtered_mats is not None:
matrix = filtered_mats.matrices[genome]
genes_detected = np.median(matrix._sum(matrix.m >= cr_constants.MIN_READS_PER_GENE, axis=0))
median_counts = np.median(matrix._sum(matrix.m, axis=0))
subsampled_filtered_bc_median_unique_genes_detected = self._get_metric_attr('subsampled_filtered_bcs_median_unique_genes_detected', genome, subsample_type, subsample_depth)
subsampled_filtered_bc_median_unique_genes_detected.set_value(genes_detected)
subsampled_filtered_bcs_median_counts = self._get_metric_attr('subsampled_filtered_bcs_median_counts', genome, subsample_type, subsample_depth)
subsampled_filtered_bcs_median_counts.set_value(median_counts)
def getMedianDistanceBetweenSamples(self, sampleSet=None) :
"""
Jaakkola's heuristic method for setting the width parameter of the Gaussian
radial basis function kernel is to pick a quantile (usually the median) of
the distribution of Euclidean distances between points having different
labels.
Reference:
Jaakkola, M. Diekhaus, and D. Haussler. Using the Fisher kernel method to detect
remote protein homologies. In T. Lengauer, R. Schneider, P. Bork, D. Brutlad, J.
Glasgow, H.- W. Mewes, and R. Zimmer, editors, Proceedings of the Seventh
International Conference on Intelligent Systems for Molecular Biology.
"""
numrows = sampleSet.shape[0]
samples = sampleSet
G = sum((samples * samples), 1)
Q = numpy.tile(G[:, None], (1, numrows))
R = numpy.tile(G, (numrows, 1))
distances = Q + R - 2 * numpy.dot(samples, samples.T)
distances = distances - numpy.tril(distances)
distances = distances.reshape(numrows**2, 1, order="F").copy()
return numpy.sqrt(0.5 * numpy.median(distances[distances > 0]))
def read_tagger(alignment, method='median'):
""" tag a read alignment to a genomic locus
Args:
Returns:
"""
tagger_func = {
# center of the read; must dicard junction reads
'median': lambda x: -1 if 'N' in x.cigarstring else int(np.median(x.positions))+1,
# start site of the read; trunction in iCLIP/eCLIP
'start': lambda x: x.positions[-1] if x.is_reverse else x.positions[0]+1
}
try:
tag=tagger_func[method](alignment)
except:
tag=-1
return tag
def genplot(x, y, fit, xdata=None, ydata=None, maxpts=10000):
bin_range = (0, 360)
a = (np.arange(*bin_range))
f_a = nuth_func(a, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
if xdata.size > maxpts:
import random
idx = random.sample(list(range(xdata.size)), 10000)
else:
idx = np.arange(xdata.size)
f, ax = plt.subplots()
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.plot(xdata[idx], ydata[idx], 'k.', label='Orig pixels')
ax.plot(x, y, 'ro', label='Bin median')
ax.axhline(color='k')
ax.plot(a, f_a, 'b', label=nuth_func_str)
ax.set_xlim(*bin_range)
pad = 0.2 * np.max([np.abs(y.min()), np.abs(y.max())])
ax.set_ylim(y.min() - pad, y.max() + pad)
ax.legend(prop={'size':8})
return f
#Function copied from from openPIV pyprocess
def __load_page_data(self):
self.__clearRows()
if hasattr(self,"selectChan"):
with hp.File(self.file_name,"r") as f:
sampling_rate = f["analogs"][self.selectChan]["sampling_rate"].value
start_time = f["analogs"][self.selectChan]["start_time"].value
start_point = sampling_rate*self.row_num*self.current_page
end_point = sampling_rate*self.row_num*(self.current_page+1)
self.page_data = f["analogs"][self.selectChan]["data"][start_point:end_point]
self.sigma = np.median(np.abs(self.page_data)/0.6745)
Thr = self.thresholds[self.selectChan] * self.sigma
self.sampling_rate = sampling_rate
self.row_wins_rois = [0]*self.row_num
for i in range(self.row_num):
start_point = i*sampling_rate
end_point = (i+1)*sampling_rate
if self.page_data[start_point:end_point].size:
ys = self.page_data[start_point:end_point]
xs = np.arange(ys.size)
line = MultiLine(np.array([xs]),np.array([ys]),"w")
self.row_wins[i].addItem(line)
self.row_wins_rois[i] = pg.InfiniteLine(pos=Thr,angle=0,movable=False)
self.row_wins_rois[i].setZValue(10)
self.row_wins[i].addItem(self.row_wins_rois[i])
def __load_page_data(self):
self.__clearRows()
if hasattr(self,"selectChan"):
with hp.File(self.file_name,"r") as f:
sampling_rate = f["analogs"][self.selectChan]["sampling_rate"].value
start_time = f["analogs"][self.selectChan]["start_time"].value
start_point = sampling_rate*self.row_num*self.current_page
end_point = sampling_rate*self.row_num*(self.current_page+1)
self.page_data = f["analogs"][self.selectChan]["data"][start_point:end_point]
self.sigma = np.median(np.abs(self.page_data)/0.6745)
Thr = self.thresholds[self.selectChan] * self.sigma
self.sampling_rate = sampling_rate
self.row_wins_rois = [0]*self.row_num
for i in range(self.row_num):
start_point = i*sampling_rate
end_point = (i+1)*sampling_rate
if self.page_data[start_point:end_point].size:
ys = self.page_data[start_point:end_point]
xs = np.arange(ys.size)
line = MultiLine(np.array([xs]),np.array([ys]),"w")
self.row_wins[i].addItem(line)
self.row_wins_rois[i] = pg.InfiniteLine(pos=Thr,angle=0,movable=False)
self.row_wins_rois[i].setZValue(10)
self.row_wins[i].addItem(self.row_wins_rois[i])
def svgd_kernel(self, theta, h = -1):
sq_dist = pdist(theta)
pairwise_dists = squareform(sq_dist)**2
if h < 0: # if h < 0, using median trick
h = np.median(pairwise_dists)
h = np.sqrt(0.5 * h / np.log(theta.shape[0]+1))
# compute the rbf kernel
Kxy = np.exp( -pairwise_dists / h**2 / 2)
dxkxy = -np.matmul(Kxy, theta)
sumkxy = np.sum(Kxy, axis=1)
for i in range(theta.shape[1]):
dxkxy[:, i] = dxkxy[:,i] + np.multiply(theta[:,i],sumkxy)
dxkxy = dxkxy / (h**2)
return (Kxy, dxkxy)
def get_ambient_temperature(self, n=5):
'''
Populates the self.ambient_temp property
Calculation is taken from Rs232_Comms_v100.pdf section "Converting values
sent by the device to meaningful units" item 5.
'''
self.logger.info('Getting ambient temperature')
values = []
for i in range(0, n):
try:
value = float(self.query('!T')[0]) / 100.
except:
pass
else:
self.logger.debug(' Ambient Temperature Query = {:.1f}'.format(value))
values.append(value)
if len(values) >= n - 1:
self.ambient_temp = np.median(values) * u.Celsius
self.logger.info(' Ambient Temperature = {:.1f}'.format(self.ambient_temp))
else:
self.ambient_temp = None
self.logger.info(' Failed to Read Ambient Temperature')
return self.ambient_temp
def get_rain_frequency(self, n=5):
'''
Populates the self.rain_frequency property
'''
self.logger.info('Getting rain frequency')
values = []
for i in range(0, n):
try:
value = float(self.query('!E')[0]) * 100. / 1023.
self.logger.debug(' Rain Freq Query = {:.1f}'.format(value))
values.append(value)
except:
pass
if len(values) >= n - 1:
self.rain_frequency = np.median(values)
self.logger.info(' Rain Frequency = {:.1f}'.format(self.rain_frequency))
else:
self.rain_frequency = None
self.logger.info(' Failed to read Rain Frequency')
return self.rain_frequency
def score_fusion_strategy(strategy_name = 'average'):
"""Returns a function to compute a fusion strategy between different scores.
Different strategies are employed:
* ``'average'`` : The averaged score is computed using the :py:func:`numpy.average` function.
* ``'min'`` : The minimum score is computed using the :py:func:`min` function.
* ``'max'`` : The maximum score is computed using the :py:func:`max` function.
* ``'median'`` : The median score is computed using the :py:func:`numpy.median` function.
* ``None`` is also accepted, in which case ``None`` is returned.
"""
try:
return {
'average' : numpy.average,
'min' : min,
'max' : max,
'median' : numpy.median,
None : None
}[strategy_name]
except KeyError:
# warn("score fusion strategy '%s' is unknown" % strategy_name)
return None
def test_bootstrap_replicate_1d(data, seed):
np.random.seed(seed)
x = dcst.bootstrap_replicate_1d(data, np.mean)
np.random.seed(seed)
x_correct = original.bootstrap_replicate_1d(data[~np.isnan(data)], np.mean)
assert (np.isnan(x) and np.isnan(x_correct, atol=atol, equal_nan=True)) \
or np.isclose(x, x_correct, atol=atol, equal_nan=True)
np.random.seed(seed)
x = dcst.bootstrap_replicate_1d(data, np.median)
np.random.seed(seed)
x_correct = original.bootstrap_replicate_1d(data[~np.isnan(data)], np.median)
assert (np.isnan(x) and np.isnan(x_correct, atol=atol, equal_nan=True)) \
or np.isclose(x, x_correct, atol=atol, equal_nan=True)
np.random.seed(seed)
x = dcst.bootstrap_replicate_1d(data, np.std)
np.random.seed(seed)
x_correct = original.bootstrap_replicate_1d(data[~np.isnan(data)], np.std)
assert (np.isnan(x) and np.isnan(x_correct, atol=atol, equal_nan=True)) \
or np.isclose(x, x_correct, atol=atol, equal_nan=True)
def _draw_bs_reps_median(data, size=1):
"""
Generate bootstrap replicates of the median out of `data`.
Parameters
----------
data : array_like
One-dimensional array of data.
size : int, default 1
Number of bootstrap replicates to generate.
Returns
-------
output : float
Bootstrap replicates of the median computed from `data`.
"""
# Set up output array
bs_reps = np.empty(size)
# Draw replicates
n = len(data)
for i in range(size):
bs_reps[i] = np.median(np.random.choice(data, size=n))
return bs_reps
def despike(df, window=31, l=6):
"""
Remove outliers from the columns of :class:`DataFrame` by
comparing the absolute deviation from the windowed median to the
windowed robust standard deviation (see :func:`robust_std`). Use a
centered window of length *window* (must be odd). Replace values
that are *l* robust standard deviations from the absolute
difference from the median with the median.
Reference: Hampel F. R., "The influence curve and its role in
robust estimation," Journal of the American Statistical
Association, 69, 382-393, 1974.
"""
if window % 2 == 0:
raise ValueError('window length must be odd')
df_rolling = df.rolling(window, center=True)
df_rolling_median = df_rolling.median()
df_robust_std = df_rolling.apply(robust_std)
I = (df - df_rolling_median).abs() > l * df_robust_std
df_despike = df.copy()
df_despike[I] = df_rolling_median
return df_despike.iloc[(window-1):-(window-1)]
def save(self, outfile):
data = self.psd1d
header = [
"pixel: %s [%s]" % self.pixel,
"frequency: [%s^-1]" % self.pixel[1],
]
if self.meanstd:
header += [
"psd1d: *mean* powers of radial averaging annuli",
"psd1d_err: *standard deviation*",
]
else:
header += [
"psd1d: *median* powers of radial averaging annuli",
"psd1d_err: 1.4826*MAD (median absolute deviation)",
]
header += [
"n_cells: number of averaging cells",
"",
"frequency psd1d psd1d_err n_cells"
]
np.savetxt(outfile, data, header="\n".join(header))
print("Saved PSD data to: %s" % outfile)
def get_series_median_peryear(word_time_series, i_year_words, one_minus=False, start_year=1900, end_year=2000, year_inc=10, exclude_partial_missing=False):
"""
Return the mean and stderr arrays for the values of the words specified per year in i_year_words for specified years
"""
medians = []
r_word_time_series = {}
if exclude_partial_missing:
for word, time_series in word_time_series.iteritems():
if not np.isnan(np.sum(time_series.values())):
r_word_time_series[word] = time_series
else:
r_word_time_series = word_time_series
for year in xrange(start_year, end_year + 1, year_inc):
word_array = np.array([r_word_time_series[word][year] for word in i_year_words[year]
if word in r_word_time_series and not np.isnan(r_word_time_series[word][year]) and not r_word_time_series[word][year] == 0])
if len(word_array) == 0:
continue
if one_minus:
word_array = 1 - word_array
medians.append(np.median(word_array))
return np.array(medians)
def simple_slope_percentiles(res, df, target, varying, percs=[25, 50, 75]):
exog = {}
for param in res.fe_params.index:
if len(param.split(":")) != 1:
continue
if param == "Intercept":
exog[param] = 1.0
else:
exog[param] = np.median(df[param])
ret_vals = collections.OrderedDict()
for varying_perc in percs:
exog[varying] = np.percentile(df[varying], varying_perc)
ret_vals[exog[varying]] = collections.defaultdict(list)
for target_perc in [25, 75]:
exog[target] = np.percentile(df[target], target_perc)
exog_arr = np.array([exog[param] if len(param.split(":")) == 1 else exog[param.split(":")[0]] * exog[param.split(":")[1]]
for param in res.fe_params.index])
ret_vals[exog[varying]]["endog"].append(res.model.predict(res.fe_params, exog=exog_arr))
ret_vals[exog[varying]]["target"].append(exog[target])
return ret_vals
def _gene_signature(self,wm,size,key):
'''????????????????????????'''
wm = cv2.resize(wm,(size,size))
wU,_,wV = np.linalg.svd(np.mat(wm))
sumU = np.sum(np.array(wU),axis=0)
sumV = np.sum(np.array(wV),axis=0)
sumU_mid = np.median(sumU)
sumV_mid = np.median(sumV)
sumU=np.array([1 if sumU[i] >sumU_mid else 0 for i in range(len(sumU)) ])
sumV=np.array([1 if sumV[i] >sumV_mid else 0 for i in range(len(sumV)) ])
uv_xor=np.logical_xor(sumU,sumV)
np.random.seed(key)
seq=np.random.randint(2,size=len(uv_xor))
signature = np.logical_xor(uv_xor, seq)
sqrts = int(np.sqrt(size))
return np.array(signature,dtype=np.int8).reshape((sqrts,sqrts))
def _gene_signature(self,wm,key):
'''????????????????????????'''
wm = cv2.resize(wm,(256,256))
wU,_,wV = np.linalg.svd(np.mat(wm))
sumU = np.sum(np.array(wU),axis=0)
sumV = np.sum(np.array(wV),axis=0)
sumU_mid = np.median(sumU)
sumV_mid = np.median(sumV)
sumU=np.array([1 if sumU[i] >sumU_mid else 0 for i in range(len(sumU)) ])
sumV=np.array([1 if sumV[i] >sumV_mid else 0 for i in range(len(sumV)) ])
uv_xor=np.logical_xor(sumU,sumV)
np.random.seed(key)
seq=np.random.randint(2,size=len(uv_xor))
signature = np.logical_xor(uv_xor, seq)
return np.array(signature,dtype=np.int8)
def _gene_signature(self,wU,wV,key):
'''????????????????????????'''
sumU = np.sum(wU,axis=0)
sumV = np.sum(wV,axis=0)
sumU_mid = np.median(sumU)
sumV_mid = np.median(sumV)
sumU=np.array([1 if sumU[i] >sumU_mid else 0 for i in range(len(sumU)) ])
sumV=np.array([1 if sumV[i] >sumV_mid else 0 for i in range(len(sumV)) ])
uv_xor=np.logical_xor(sumU,sumV)
np.random.seed(key)
seq=np.random.randint(2,size=len(uv_xor))
signature = np.logical_xor(uv_xor, seq)
return np.array(signature,dtype=np.int8)
def get_Surface_Potentials(mtrue, survey, src, field_obj):
phi = field_obj['phi']
CCLoc = mesh.gridCC
XLoc = np.unique(mesh.gridCC[:, 0])
surfaceInd, zsurfaceLoc = get_Surface(mtrue, XLoc)
phiSurface = phi[surfaceInd]
phiScale = 0.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
refInd = Utils.closestPoints(mesh, [xmax+60., 0.], gridLoc='CC')
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return XLoc, phiSurface, phiScale
def get_Surface_Potentials(survey, src,field_obj):
phi = field_obj['phi']
CCLoc = mesh.gridCC
zsurfaceLoc = np.max(CCLoc[:,1])
surfaceInd = np.where(CCLoc[:,1] == zsurfaceLoc)
xSurface = CCLoc[surfaceInd,0].T
phiSurface = phi[surfaceInd]
phiScale = 0.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
refInd = Utils.closestPoints(mesh, [xmax+60.,0.], gridLoc='CC')
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return xSurface,phiSurface,phiScale
# Inline functions for computing apparent resistivity
#eps = 1e-9 #to stabilize division
#G = lambda A, B, M, N: 1. / ( 1./(np.abs(A-M)+eps) - 1./(np.abs(M-B)+eps) - 1./(np.abs(N-A)+eps) + 1./(np.abs(N-B)+eps) )
#rho_a = lambda VM,VN, A,B,M,N: (VM-VN)*2.*np.pi*G(A,B,M,N)
def get_Surface_Potentials(survey, src, field_obj):
phi = field_obj[src, 'phi']
CCLoc = mesh.gridCC
zsurfaceLoc = np.max(CCLoc[:,1])
surfaceInd = np.where(CCLoc[:,1] == zsurfaceLoc)
xSurface = CCLoc[surfaceInd,0].T
phiSurface = phi[surfaceInd]
phiScale = 0.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
refInd = Utils.closestPoints(mesh, [xmax+60.,0.], gridLoc='CC')
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return xSurface,phiSurface,phiScale
def get_Surface_Potentials(mtrue, survey, src, field_obj):
phi = field_obj['phi']
CCLoc = mesh.gridCC
XLoc = np.unique(mesh.gridCC[:, 0])
surfaceInd, zsurfaceLoc = get_Surface(mtrue, XLoc)
phiSurface = phi[surfaceInd]
phiScale = 0.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
refInd = Utils.closestPoints(mesh, [xmax+60., 0.], gridLoc='CC')
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return XLoc, phiSurface, phiScale
def get_Surface_Potentials(survey, src, field_obj):
phi = field_obj[src, 'phi']
CCLoc = mesh.gridCC
zsurfaceLoc = np.max(CCLoc[:,1])
surfaceInd = np.where(CCLoc[:,1] == zsurfaceLoc)
phiSurface = phi[surfaceInd]
xSurface = CCLoc[surfaceInd,0].T
phiScale = 0.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
refInd = Utils.closestPoints(mesh, [xmax+60.,0.], gridLoc='CC')
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return xSurface,phiSurface,phiScale
def get_Surface_Potentials(survey, src, field_obj):
phi = field_obj['phi']
CCLoc = mesh.gridCC
zsurfaceLoc = np.max(CCLoc[:,1])
surfaceInd = np.where(CCLoc[:,1] == zsurfaceLoc)
xSurface = CCLoc[surfaceInd,0].T
phiSurface = phi[surfaceInd]
phiScale = 0.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
refInd = Utils.closestPoints(mesh, [xmax+60.,0.], gridLoc='CC')
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return xSurface,phiSurface,phiScale
def project_verteces(self, mesh, orientation):
"""Supplement the mesh array with scalars (max and median)
for each face projected onto the orientation vector.
Args:
mesh (np.array): with format face_count x 6 x 3.
orientation (np.array): with format 3 x 3.
Returns:
adjusted mesh.
"""
mesh[:, 4, 0] = np.inner(mesh[:, 1, :], orientation)
mesh[:, 4, 1] = np.inner(mesh[:, 2, :], orientation)
mesh[:, 4, 2] = np.inner(mesh[:, 3, :], orientation)
mesh[:, 5, 1] = np.max(mesh[:, 4, :], axis=1)
mesh[:, 5, 2] = np.median(mesh[:, 4, :], axis=1)
sleep(0) # Yield, so other threads get a bit of breathing space.
return mesh