def combigauss(subtds, subtderrs, truetds, lensmodelsigma = 0.0):
"""
Give me submission delays and error bars, as well as the corresponding true delays, in form of numpy arrays.
I compute the mean and sigma of the combined posterior on the fractional time delay distance error.
"""
from scipy.stats import norm
subtdoffs = subtds - truetds
centers = subtdoffs/truetds
sigmas = subtderrs/np.fabs(truetds)
# We convolve with the lensmodelsigma:
sigmas = np.sqrt(sigmas**2 + lensmodelsigma**2)
sigma_combi = 1.0 / np.sqrt(np.sum(1.0 / (sigmas**2)))
center_combi = sigma_combi**2 * np.sum( centers/sigmas**2 )
probazero = norm.pdf(0.0, center_combi, sigma_combi)
return (center_combi, sigma_combi, probazero)
# To plot this you could do:
#plt.plot(x, norm.pdf(x, center_combi, sigma_combi), ls="--", color="black")
python类fabs()的实例源码
def tv(self):
"""
Returns the total variation of the spline. Simple !
http://en.wikipedia.org/wiki/Total_variation
"""
# Method 1 : linear approximation
ptd = 5 # point density in days ... this is enough !
a = self.t[0]
b = self.t[-1]
x = np.linspace(a, b, int((b-a) * ptd))
y = self.eval(jds = x)
tv1 = np.sum(np.fabs(y[1:] - y[:-1]))
#print "TV1 : %f" % (tv1)
return tv1
# Method 2 : integrating the absolute value of the derivative ... hmm, splint does not integrate derivatives ..
#si.splev(jds, (self.t, self.c, self.k))
def calMinTheta(M):
vecContainZero=False
zeroCount=0
Mtrans_T=np.dot(M.T,M)
u,s,v=np.linalg.svd(Mtrans_T)
eigenValue=s[-1]
minVec=v[-1,:]
for i in minVec:
if np.fabs(i) < (1e-3):
zeroCount+=1
if zeroCount!=0:
print("0 exits and discard the following vector: ")
vecContainZero=True
print(minVec)
return minVec.T,vecContainZero
#scale the returned eigenVector of float into integer and check whether the scaled theta is valid(satisfy the threshold)
#if valid, return True and the valid eigenVector, if not valid, return False and empty eigenVector
def calMinTheta(M):
vecContainZero=False
zeroCount=0
Mtrans_T=np.dot(M.T,M)
u,s,v=np.linalg.svd(Mtrans_T)
eigenValue=s[-1]
minVec=v[-1,:]
for i in minVec:
if np.fabs(i) < (1e-3):
zeroCount+=1
if zeroCount!=0:
print("0 exits and discard the following vector: ")
vecContainZero=True
print(minVec)
return minVec.T,vecContainZero
#scale the returned eigenVector of float into integer and check whether the scaled theta is valid(satisfy the threshold)
#if valid, return True and the valid eigenVector, if not valid, return False and empty eigenVector
def visit_fn(self, temperature):
factor1 = np.exp(np.log(temperature) / (self.qv - 1.0))
factor2 = np.exp((4.0 - self.qv) * np.log(self.qv - 1.0))
factor3 = np.exp((2.0 - self.qv) * np.log(2.0) / (self.qv - 1.0))
factor4 = np.sqrt(np.pi) * factor1 * factor2 / (factor3 * (
3.0 - self.qv))
factor5 = 1.0 / (self.qv - 1.0) - 0.5
d1 = 2.0 - factor5
factor6 = np.pi * (1.0 - factor5) / np.sin(
np.pi * (1.0 - factor5)) / np.exp(gammaln(d1))
sigmax = np.exp(-(self.qv - 1.0) * np.log(
factor6 / factor4) / (3.0 - self.qv))
x = sigmax * self.gaussian_fn(1)
y = self.gaussian_fn(0)
den = np.exp(
(self.qv - 1.0) * np.log((np.fabs(y))) / (3.0 - self.qv))
return x / den
def _em(X, eps=0.001):
""" EM algorithm, find weight.
X : numpy two dim ndarray.
return: weights
usage:
>>> X = np.array([[1, 2], [2, 4], [3, 1]])
>>> print em(X)
[ 0.33586597 0.66413403]
"""
N, K = X.shape
# init
W = X.sum(axis=0) / float(X.sum())
# solve
while True:
W0 = W
# E step
Y = np.tile(W, (N, 1)) * X
Q = Y / np.tile(Y.sum(axis=1), (K, 1)).T
# M step
W = Q.sum(axis=0) / N
# termination ?
if np.fabs(W - W0).sum() < eps:
break
return W
def is_coplanar(sample1, sample2):
"""
To decide if two cluster of pixels are coplanar
Args:
sample1,sample2
Returns:
True or False
"""
vec1 = fit_plane(sample1)
vec2 = fit_plane(sample2)
mag1 = np.sqrt(vec1.dot(vec1))
mag2 = np.sqrt(vec2.dot(vec2))
cosine = (vec1.dot(vec2))/(mag1 * mag2)
if np.fabs(cosine)> 0.95:
return True
else:
return False
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipes.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def __call__(self, individual):
try:
# Transform the tree expression in a callable function
func = toolbox.compile(expr=individual)
error = numpy.fabs(func(*self.inVarValues) - self.targetVarValues) / self.targetVarValues
avgerror = numpy.average(error)
return (avgerror,)
except NameError as ne:
print ne
print ne.message
except Exception as e:
return (sys.float_info.max,)
#This is a basic error function that finds the total of all the relative errors.
#Note that your data set should not contain any 0 values. That will cause a "divide by zero" error.
#At initialization time, it gets the data to compare against.
#
# The data is a list of lists, the labels give the names of interesting data.
# The config file defines which data lists are of use.
# All data lists are expected to be of equla length. Repeated values are perfectly OK.
# The config file defines a set of "inVars" which are the input variables. (e.g. rho, T)
# it also defines a single "targetVar" which is the array of function values.
def __call__(self, individual):
try:
# Transform the tree expression in a callable function
func = toolbox.compile(expr=individual)
error = numpy.fabs(func(*self.inVarValues) - self.targetVarValues) / self.targetVarValues
sumerror = numpy.sum(error)
return (sumerror,)
except NameError as ne:
print ne
print ne.message
except Exception as e:
return (sys.float_info.max,)
#This is a basic error function that finds the maximum of all the relative errors.
#Note that your data set should not contain any 0 values. That will cause a "divide by zero" error.
#At initialization time, it gets the data to compare against.
#
# The data is a list of lists, the labels give the names of interesting data.
# The config file defines which data lists are of use.
# All data lists are expected to be of equla length. Repeated values are perfectly OK.
# The config file defines a set of "inVars" which are the input variables. (e.g. rho, T)
# it also defines a single "targetVar" which is the array of function values.
def __call__(self, individual):
try:
# Transform the tree expression in a callable function
func = toolbox.compile(expr=individual)
error = numpy.fabs((func(*self.inVarValues) - self.targetVarValues)) / self.targetVarValues
maxerror = numpy.max(error)
return (maxerror,)
except NameError as ne:
print ne
print ne.message
except Exception as e:
return (sys.float_info.max,)
#R^2 is a common regression measurement to find how much variance is explained by the approximation.
#It works well early on in the calcuation, but loses percision has the approximation becomes close.
#
# The data is a list of lists, the labels give the names of interesting data.
# The config file defines which data lists are of use.
# All data lists are expected to be of equla length. Repeated values are perfectly OK.
# The config file defines a set of "inVars" which are the input variables. (e.g. rho, T)
# it also defines a single "targetVar" which is the array of function values.
def _filter_streamlines(self, streamline, close_threshold=0.05,
loop_length_range: u.cm =[2.e+9, 5.e+10]*u.cm, **kwargs):
"""
Check extracted loop to make sure it fits given criteria. Return True if it passes.
Parameters
----------
streamline : yt streamline object
close_threshold : `float`
percentage of domain width allowed between loop endpoints
loop_length_range : `~astropy.Quantity`
minimum and maximum allowed loop lengths (in centimeters)
"""
streamline = streamline[np.all(streamline != 0.0, axis=1)]
loop_length = np.sum(np.linalg.norm(np.diff(streamline, axis=0), axis=1))
if np.fabs(streamline[0, 2] - streamline[-1, 2]) > close_threshold*self.extrapolated_3d_field.domain_width[2]:
return False
elif loop_length > loop_length_range[1].to(u.cm).value or loop_length < loop_length_range[0].to(u.cm).value:
return False
else:
return True
def make_detector_array(self, field):
"""
Construct bins based on desired observing area.
"""
delta_x = np.fabs(field.clipped_hmi_map.xrange[1] - field.clipped_hmi_map.xrange[0])
delta_y = np.fabs(field.clipped_hmi_map.yrange[1] - field.clipped_hmi_map.yrange[0])
min_z = min(field.extrapolated_3d_field.domain_left_edge[2].value,
self.total_coordinates[:,2].min().value)
max_z = max(field.extrapolated_3d_field.domain_right_edge[2].value,
self.total_coordinates[:,2].max().value)
delta_z = field._convert_angle_to_length(max(self.resolution.x, self.resolution.y)).value
self.bins = Pair(int(np.ceil((delta_x/self.resolution.x).decompose()).value),
int(np.ceil((delta_y/self.resolution.y).decompose()).value),
int(np.ceil(np.fabs(max_z - min_z)/delta_z)))
self.bin_range = Pair(field._convert_angle_to_length(field.clipped_hmi_map.xrange).value,
field._convert_angle_to_length(field.clipped_hmi_map.yrange).value,
np.array([min_z, max_z]))
def equilibrium_ionization(self, rate_matrix=None):
"""
Calculate the ionization equilibrium for all ions of the element.
Brief explanation and equations about how these equations are solved.
"""
if rate_matrix is None:
rate_matrix = self._rate_matrix()
# Solve system of equations using SVD and normalize
_, _, V = np.linalg.svd(rate_matrix.value)
# Select columns of V with smallest eigenvalues (returned in descending order)
ioneq = np.fabs(V[:, -1, :])
ioneq /= np.sum(ioneq, axis=1)[:, np.newaxis]
return u.Quantity(ioneq)
def emissivity(self, density: u.cm**(-3), include_energy=False, **kwargs):
"""
Calculate emissivity for all lines as a function of temperature and density
"""
populations = self.level_populations(density, include_protons=kwargs.get('include_protons', True))
if populations is None:
return (None, None)
wavelengths = np.fabs(self._wgfa['wavelength'])
# Exclude 0 wavelengths which correspond to two-photon decays
upper_levels = self._wgfa['upper_level'][wavelengths != 0*u.angstrom]
a_values = self._wgfa['A'][wavelengths != 0*u.angstrom]
wavelengths = wavelengths[wavelengths != 0*u.angstrom]
if include_energy:
energy = const.h.cgs*const.c.cgs/wavelengths.to(u.cm)
else:
energy = 1.*u.photon
emissivity = populations[:, :, upper_levels - 1]*(a_values*energy)
return wavelengths, emissivity
def _get_cci(cls, df, n_days=None):
""" Commodity Channel Index
CCI = (Typical Price - 20-period SMA of TP) / (.015 x Mean Deviation)
Typical Price (TP) = (High + Low + Close)/3
TP is also implemented as 'middle'.
:param df: data
:param n_days: N days window
:return: None
"""
if n_days is None:
n_days = 14
column_name = 'cci'
else:
n_days = int(n_days)
column_name = 'cci_{}'.format(n_days)
tp = df['middle']
tp_sma = df['middle_{}_sma'.format(n_days)]
md = df['middle'].rolling(
min_periods=1, center=False, window=n_days).apply(
lambda x: np.fabs(x - x.mean()).mean())
df[column_name] = (tp - tp_sma) / (.015 * md)
def test_load_image(self):
blob = sub_mean(self.image)
image_mean = np.mean(np.mean(self.image, axis=0), axis=0)
blob_mean = np.mean(np.mean(blob, axis=0), axis=0)
assert (np.fabs(config.RGB_MEAN - (image_mean - blob_mean)) < 1e-9).all()
def equal(a, b):
return np.fabs(a - b) < EPS
# (4), (4)
# (4), (num, 4)
# or (num, 4), (num, 4)
# (num, 4), (4)
def __init__(self, directory, is_2015=False, scale=1.0):
"""
Ground truth dataset iterator
"""
if is_2015:
left_dir, right_dir = 'image_2', 'image_3'
noc_dir, occ_dir = 'disp_noc_0', 'disp_occ_0'
calib_left, calib_right = 'P2', 'P3'
else:
left_dir, right_dir = 'image_0', 'image_1'
noc_dir, occ_dir = 'disp_noc', 'disp_occ'
calib_left, calib_right = 'P0', 'P1'
self.scale = scale
# Stereo is only evaluated on the _10.png images
self.stereo = StereoDatasetReader(os.path.expanduser(directory),
left_template=''.join([left_dir, '/%06i_10.png']),
right_template=''.join([right_dir, '/%06i_10.png']), scale=scale, grayscale=True)
self.noc = ImageDatasetReader(template=os.path.join(os.path.expanduser(directory), noc_dir, '%06i_10.png'))
self.occ = ImageDatasetReader(template=os.path.join(os.path.expanduser(directory), occ_dir, '%06i_10.png'))
def calib_read(fn, scale):
db = AttrDict.load_yaml(fn)
P0 = np.float32(db[calib_left].split(' '))
P1 = np.float32(db[calib_right].split(' '))
fx, cx, cy = P0[0], P0[2], P0[6]
baseline_px = np.fabs(P1[3])
return StereoCamera.from_calib_params(fx, fx, cx, cy, baseline_px=baseline_px)
self.calib = DatasetReader(template=os.path.join(os.path.expanduser(directory), 'calib/%06i.txt'),
process_cb=lambda fn: calib_read(fn, scale))
self.poses = repeat(None)
def im_resize(im, shape=None, scale=0.5, interpolation=cv2.INTER_AREA):
if shape is not None:
return cv2.resize(im, dsize=shape, fx=0., fy=0., interpolation=interpolation)
else:
if np.fabs(scale-1.0) < 1e-2:
return im
elif scale <= 1.0:
return cv2.resize(im, None, fx=scale, fy=scale, interpolation=interpolation)
else:
shape = (int(im.shape[1]*scale), int(im.shape[0]*scale))
return im_resize(im, shape)