def compute_beta_splines(TT, minT, splinesoptions={}):
"""
This function computes the volumetric thermal expansion as a numerical
derivative of the volume as a function of temperature V(T) given in the
input array *minT*. This array can obtained
from the free energy minimization which should be done before.
This function uses splines for smoothing the derivative.
"""
betaT = np.zeros(len(TT))
x = np.array(TT)
y0 = np.array(minT)
if (splinesoptions == {}):
tck0 = interpolate.splrep(x, y0)
else:
tck0 = interpolate.splrep(x, y0, k=splinesoptions['k0'], s=splinesoptions['s0'])
ynew0 = interpolate.splev(x, tck0, der=0)
betaT = interpolate.splev(x, tck0, der=1)
return betaT/minT
python类splev()的实例源码
def fPoincare(s):
"""
Parametric interpolation to the Poincare section
Inputs:
s: Arc length which parametrizes the curve, a float or dx1-dim numpy
array
Outputs:
xy = x and y coordinates on the Poincare section, 2-dim numpy array
or (dx2)-dim numpy array
"""
interpolation = interpolate.splev(s, tckPoincare)
xy = np.array([interpolation[0], interpolation[1]], float).transpose()
return xy
#Compute the interpolation:
#Create an array of arc lengths for which the Poincare section will be
#interpolated:
def resample_photosphere(opacities, photosphere, opacity_index):
""" Resample photospheric quantities onto a new opacity scale. """
if opacity_index is None:
return photosphere
resampled_photosphere = np.zeros(photosphere.shape)
n_quantities = photosphere.shape[1]
for i in range(n_quantities):
if i == opacity_index: continue
# Create spline function.
tk = \
interpolate.splrep(photosphere[:, opacity_index], photosphere[:, i])
# Evaluate photospheric quantities at the new opacities
resampled_photosphere[:, i] = interpolate.splev(opacities.flatten(), tk)
# Update photosphere with new opacities
resampled_photosphere[:, opacity_index] = opacities
return resampled_photosphere
def resample_nd(curve, npoints, smooth = 0, periodic = False, derivative = 0):
"""Resample n points using n equidistant points along a curve
Arguments:
points (mxd array): coordinate of the reference points for the curve
npoints (int): number of resamples equidistant points
smooth (number): smoothness factor
periodic (bool): if True assumes the curve is a closed curve
Returns:
(nxd array): resampled equidistant points
"""
cinterp, u = splprep(curve.T, u = None, s = smooth, per = periodic);
if npoints is all:
npoints = curve.shape[0];
us = np.linspace(u.min(), u.max(), npoints)
curve = splev(us, cinterp, der = derivative);
return np.vstack(curve).T;
def resample_1d(data, npoints, smooth = 0, periodic = False, derivative = 0):
"""Resample 1d data using n equidistant points
Arguments:
data (array): data points
npoints (int): number of points in equidistant resampling
smooth (number): smoothness factor
periodic (bool): if True assumes the curve is a closed curve
Returns:
(array): resampled data points
"""
x = np.linspace(0, 1, data.shape[0]);
dinterp = splrep(x, data, s = smooth, per = periodic);
if npoints is all:
npoints = data.shape[0];
x2 = np.linspace(0, 1, npoints);
return splev(x2, dinterp, der = derivative)
def phi(self, points = None):
"""Returns a Spline representing the tangent angle along a 2d curve
Arguments:
points (int, array or None): sample points used to determine the phi
Returns:
Spline: spline of phi
"""
if self.ndim != 2:
raise RuntimeError('phi angle can only be computed for 2d curves');
points = self.get_points(points, error = 'cannot determine sample points needed for the calculation of phi');
#get the tangents and tangent angles
tgs = splev(points, self.tck(), der = 1);
tgs = np.vstack(tgs).T;
phi = np.arctan2(tgs[:,1], tgs[:,0]);
phi = np.mod(phi + np.pi, 2 * np.pi) - np.pi;
#return Spline(phi, points = self.points, knots = self.knots, degree = self.degree - 1);
tck = splrep(points, phi, s = 0.0, k = self.degree + 1);
return Spline(tck = tck, points = points);
def projection_matrix(self, points = None, derivative = 0, extrapoltaion = 1):
"""Projection matrix to calucalte spline coefficients via dot product
Arguments:
points (array or None): projection matrix for this set of sample points (if None use internal points)
derivative (int): if n>0 the projection matrix of the n-th derivative is returned
extrapolation (int): projection matrix with 0=extrapolated value, 1=return 0, 3=boundary value
"""
points = self.get_points(points, error = 'sample points need to be specified for the calculation of the projection matrix!')
projection = np.zeros((self._nparameter, points.shape[0]));
for i in range(self._nparameter):
p = np.zeros(self._nparameter);
p[i] = 1;
pp = np.pad(p, (0,self.degree+1), 'constant'); # splev wants parameter
tck = (self.knots_all, pp, self.degree);
projection[i] = splev(points, tck, der = derivative, ext = extrapoltaion);
return projection.T;
def projection_matrix(self, points = None, derivative = 0, extrapoltaion = 1):
"""Projection matrix to calucalte spline coefficients via dot product
Arguments:
points (array or None): projection matrix for this set of sample points (if None use internal points)
derivative (int): if n>0 the projection matrix of the n-th derivative is returned
extrapolation (int): projection matrix with 0=extrapolated value, 1=return 0, 3=boundary value
"""
points = self.get_points(points, error = 'sample points need to be specified for the calculation of the projection matrix!')
projection = np.zeros((self._nparameter, points.shape[0]));
for i in range(self._nparameter):
p = np.zeros(self._nparameter);
p[i] = 1;
pp = np.pad(p, (0,self.degree+1), 'constant'); # splev wants parameter
tck = (self.knots_all, pp, self.degree);
projection[i] = splev(points, tck, der = derivative, ext = extrapoltaion);
return projection.T;
def tv(self):
"""
Returns the total variation of the spline. Simple !
http://en.wikipedia.org/wiki/Total_variation
"""
# Method 1 : linear approximation
ptd = 5 # point density in days ... this is enough !
a = self.t[0]
b = self.t[-1]
x = np.linspace(a, b, int((b-a) * ptd))
y = self.eval(jds = x)
tv1 = np.sum(np.fabs(y[1:] - y[:-1]))
#print "TV1 : %f" % (tv1)
return tv1
# Method 2 : integrating the absolute value of the derivative ... hmm, splint does not integrate derivatives ..
#si.splev(jds, (self.t, self.c, self.k))
def eval(self, jds = None, nostab = True):
"""
Evaluates the spline at jds, and returns the corresponding mags-like vector.
By default, we exclude the stabilization points !
If jds is not None, we use them instead of our own jds (in this case excludestab makes no sense)
"""
if jds is None:
if nostab:
jds = self.datapoints.jds[self.datapoints.mask]
else:
jds = self.datapoints.jds
else:
# A minimal check for non-extrapolation condition should go here !
pass
fitmags = si.splev(jds, (self.t, self.c, self.k))
# By default ext=0 : we do return extrapolated values
return fitmags
def resample_spline(points, smooth=.001, count=None):
from scipy.interpolate import splprep, splev
if count is None:
count = len(points)
points = np.asanyarray(points)
closed = np.linalg.norm(points[0] - points[-1]) < tol.merge
tpl = splprep(points.T, s=smooth)[0]
i = np.linspace(0.0, 1.0, count)
resampled = np.column_stack(splev(i, tpl))
if closed:
shared = resampled[[0,-1]].mean(axis=0)
resampled[0] = shared
resampled[-1] = shared
return resampled
def klgfbInterp(self, wvl, n, l):
'''A Python version of the CHIANTI IDL procedure karzas_xs.
Interpolates free-bound gaunt factor of Karzas and Latter, (1961, Astrophysical Journal
Supplement Series, 6, 167) as a function of wavelength (wvl).
'''
try:
klgfb = self.Klgfb
except:
self.Klgfb = ch_io.klgfbRead()
klgfb = self.Klgfb
# get log of photon energy relative to the ionization potential
sclE = np.log(self.Ip/(wvl*ch_const.ev2ang))
thisGf = klgfb['klgfb'][n-1, l]
spl = splrep(klgfb['pe'], thisGf)
gf = splev(sclE, spl)
return gf
def ioneq_one(self, stage, **kwargs):
"""
Calculate the equilibrium fractional ionization of the ion as a function of temperature.
Uses the `ChiantiPy.core.ioneq` module and does a first-order spline interpolation to the data. An
ionization equilibrium file can be passed as a keyword argument, `ioneqfile`. This can
be passed through as a keyword argument to any of the functions that uses the
ionization equilibrium.
Parameters
----------
stage : int
Ionization stage, e.g. 25 for Fe XXV
"""
tmp = ioneq(self.Z)
tmp.load(ioneqName=kwargs.get('ioneqfile', None))
ionization_equilibrium = splev(self.Temperature,
splrep(tmp.Temperature, tmp.Ioneq[stage-1,:], k=1), ext=1)
return np.where(ionization_equilibrium < 0., 0., ionization_equilibrium)
get_smooth_path.py 文件源码
项目:Mobile_Robotics_Platform
作者: Ridhwanluthra
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def interpolate_path(list_of_xys):
x = list_of_xys[0]
y = list_of_xys[1]
#print x, y
#x = np.array([0,1,2,3,4,5,6,7,6,5,4,3,2,1,0])
#y = np.array([0,1,2,3,4,4,5,6,7,8,7,8,9,9,9])
tck, u = interpolate.splprep([x, y], s = 0.03)
unew = np.arange(0,9,0.03)
#print unew
interpolated_path = interpolate.splev(unew, tck, ext = 1)
return interpolated_path
#a = interpolate_path(array)
#plot_path(array, a)
def setStabilityData(self, heelAngles, GZ):
self.heelAngles = heelAngles
self.GZ = GZ
self.restoringMoment = self.GZ*self.Volume*self.rho*self.g
self.restoringMomentSpl = interpolate.splrep(self.heelAngles, self.restoringMoment)
# Calculate maximum heel angle
nTest = 100
heelAnglesTest = np.linspace(0, np.max(self.heelAngles), nTest)
restoringMomentTest = interpolate.splev(heelAnglesTest, self.restoringMomentSpl)
iMax = np.argmax(restoringMomentTest)
self.maxHeelAngle = heelAnglesTest[iMax]
def alt_sky_model(self):
fac = 10
mn = 1e9
mx = 0.
for fiber in self.good_fibers:
mn = np.min([mn, fiber.wavelength.min()])
mx = np.max([mx, fiber.wavelength.max()])
xs = np.linspace(0, 1, self.D*fac)
A = np.zeros((len(xs), len(self.good_fibers)))
for i, fiber in enumerate(self.good_fibers):
y = fiber.spectrum / fiber.fiber_to_fiber
xp = np.interp(fiber.wavelength, np.linspace(mn, mx, self.D*fac),
xs, left=0.0, right=0.0)
tck = splrep(xp, y)
A[:, i] = splev(xs, tck)
ys = biweight_location(A, axis=(1,))
self.masterwave = np.linspace(mn, mx, self.D*fac)
B, c = bspline_x0(self.masterwave, nknots=self.D)
sol = np.linalg.lstsq(c, ys)[0]
self.mastersky = np.dot(c, sol)
def derivatives(xs,ys,ders=(1,)):
'''
Calculate the numerical derivatives of `ys` at points `xs` by use of the third-order spline interpolation.
Parameters
----------
xs : 1d ndarray
The sample points of the argument.
ys: 1d ndarray
The corresponding sample points of the function.
ders : tuple of integer
The derivatives to calculate.
Returns
-------
2d ndarray
The derivatives at the sample points of the argument.
'''
assert len(xs)==len(ys)
xs,ys=np.asarray(xs),np.asarray(ys)
result=np.zeros((len(ders),len(xs)),dtype=ys.dtype)
tck=ip.splrep(xs,ys,s=0)
for i,der in enumerate(ders):
result[i]=ip.splev(xs,tck,der=der)
return result
def flatten_emissivities(channel, emission_model):
"""
Compute product between wavelength response and emissivity for all ions
"""
flattened_emissivities = []
for ion in emission_model:
wavelength, emissivity = emission_model.get_emissivity(ion)
if wavelength is None or emissivity is None:
flattened_emissivities.append(None)
continue
interpolated_response = splev(wavelength.value, channel['wavelength_response_spline'], ext=1)
em_summed = np.dot(emissivity.value, interpolated_response)
unit = emissivity.unit*u.count/u.photon*u.steradian/u.pixel*u.cm**2
flattened_emissivities.append(u.Quantity(em_summed, unit))
return flattened_emissivities
def simple_poly_fit(x , y , number_of_uni_point, smoothness):
number_of_point = len(x)
t = np.zeros_like(x)
for i in range(0 , number_of_point):
if i > 0:
t[i] = t[i - 1] + np.sqrt((x[i] - x[i - 1]) ** 2 + (y[i] - y[i - 1]) ** 2)
else:
t[i] = 0
k = min(3 , number_of_point - 1) # spline order
nest = -1 # estimate of number of knots needed (-1 = maximal)
tckp , u = splprep([x , y , t] , s = smoothness , k = k , nest = -1)
x_new , y_new, t_new = splev(linspace(0,1,number_of_uni_point),tckp)
return x_new , y_new
########################################################################
########################################################################
def getCoonsPatchPointBez(bounds,x,y,width,height, densities = None):
p00 = np.array(interpolate.splev( 0.0,bounds['s_top'])).flatten()
p10 = np.array(interpolate.splev( 1.0,bounds['s_top'])).flatten()
p11 = np.array(interpolate.splev( 0.0,bounds['s_bottom'])).flatten()
p01 = np.array(interpolate.splev( 1.0,bounds['s_bottom'])).flatten()
u = 1.0 * x / (width-1)
v = 1.0 * y / (height-1)
iu = 1.0 - u
iv = 1.0 - v
if densities is None:
pu0 = np.array(interpolate.splev( u,bounds['s_top'])).flatten()
pu1 = np.array(interpolate.splev(iu,bounds['s_bottom'])).flatten()
pv0 = np.array(interpolate.splev(iv,bounds['s_left'])).flatten()
pv1 = np.array(interpolate.splev( v,bounds['s_right'])).flatten()
else:
ut = 0.0
ub = 0.0
for i in range(x):
ut+=densities['top'][i]
ub+=densities['bottom'][i]
vl = 0.0
vr = 0.0
for i in range(y):
vl+=densities['left'][i]
vr+=densities['right'][i]
pu0 = np.array(interpolate.splev( ut,bounds['s_top'])).flatten()
pu1 = np.array(interpolate.splev(1.0-ub,bounds['s_bottom'])).flatten()
pv0 = np.array(interpolate.splev(1-0-vl,bounds['s_left'])).flatten()
pv1 = np.array(interpolate.splev( vr,bounds['s_right'])).flatten()
return iv * pu0 + v * pu1 + iu * pv0 + u * pv1 - iu * iv * p00 - u * iv * p10 - iu * v * p01 - u * v * p11
def _interpolate(xy, num_points):
tck,u = splprep([
xy[:,0],
xy[:,1]],
s=0
)
unew = linspace(0, 1, num_points)
out = splev(unew, tck)
return column_stack(out)
def _rnd_interpolate(xy, num_points, ordered=False):
tck,u = splprep([
xy[:,0],
xy[:,1]],
s=0
)
unew = random(num_points)
if ordered:
unew = sort(unew)
out = splev(unew, tck)
return column_stack(out)
def lininterp2(x1, y1, x):
"""Linear interpolation at points x between numpy arrays (x1, y1).
Only y1 is allowed to be two-dimensional. The x1 values should be sorted
from low to high. Returns a numpy.array of y values corresponding to
points x.
"""
return splev(x, splrep(x1, y1, s=0, k=1))
def compute_alpha_splines(TT,minT,ibrav,splinesoptions):
"""
This function calculates the thermal expansions alphaT at different temperatures
as the previous function but using spline interpolation as implemented in
scipy.interpolate.
"""
alphaT = np.zeros(len(TT)*6)
alphaT.shape = (len(TT),6)
x = np.array(TT)
y0 = np.array(minT[:,0])
y1 = np.array(minT[:,1])
y2 = np.array(minT[:,2])
if (splinesoptions=={}):
tck0 = interpolate.splrep(x, y0)
tck1 = interpolate.splrep(x, y1)
tck2 = interpolate.splrep(x, y2)
else:
tck0 = interpolate.splrep(x, y0, k=splinesoptions['k0'], s=splinesoptions['s0'])
tck1 = interpolate.splrep(x, y1, k=splinesoptions['k1'], s=splinesoptions['s1'])
tck2 = interpolate.splrep(x, y2, k=splinesoptions['k2'], s=splinesoptions['s2'])
ynew0 = interpolate.splev(x, tck0, der=0)
alphaT[:,0] = interpolate.splev(x, tck0, der=1)
ynew1 = interpolate.splev(x, tck1, der=0)
alphaT[:,1] = interpolate.splev(x, tck1, der=1)
ynew2 = interpolate.splev(x, tck2, der=0)
alphaT[:,2] = interpolate.splev(x, tck2, der=1)
# now normalize the alphaTs properly. It must be different for different ibrav
# to avoid a divide by 0 error (minT is zero for lattice parameters not defined
# in the system)
if ibrav==4:
alphaT[:,0] = alphaT[:,0]/minT[:,0]
alphaT[:,2] = alphaT[:,2]/minT[:,2]
return alphaT
################################################################################
def getBoundaryPoints(x , y):
tck,u = interpolate.splprep([x, y], s=0, per=1)
unew = np.linspace(u.min(), u.max(), 10000)
xnew,ynew = interpolate.splev(unew, tck, der=0)
tup = c_[xnew.astype(int),ynew.astype(int)].tolist()
coord = list(set(tuple(map(tuple, tup))))
coord = np.array([list(elem) for elem in coord])
return coord[:,0],coord[:,1]
def getBoundaryPoints(x = [], y = []):
tck,u = interpolate.splprep([x, y], s=0, per=1)
unew = np.linspace(u.min(), u.max(), 1000)
xnew,ynew = interpolate.splev(unew, tck, der=0)
tup = c_[xnew.astype(int),ynew.astype(int)].tolist()
coord = list(set(tuple(map(tuple, tup))))
coord = np.array([list(elem) for elem in coord])
return coord[:,0],coord[:,1]
def plot_contour(self, xv, yv, cost_grid):
"""
Function constructs contour lines
"""
contour = Contour(xv, yv, cost_grid)
contour_lines = contour.contours(
np.linspace(np.min(cost_grid), np.max(cost_grid), 20))
series = []
count = 0
for key, value in contour_lines.items():
for line in value:
if len(line) > 3:
tck, u = splprep(np.array(line).T, u=None, s=0.0, per=0)
u_new = np.linspace(u.min(), u.max(), 100)
x_new, y_new = splev(u_new, tck, der=0)
interpol_line = np.c_[x_new, y_new]
else:
interpol_line = line
series.append(dict(data=interpol_line,
color=self.contour_color,
type="spline",
lineWidth=0.5,
marker=dict(enabled=False),
name="%g" % round(key, 2),
enableMouseTracking=False
))
count += 1
return series
def callable_from_trajectory(t, curves):
"""
Use scipy.interpolate splprep to build cubic b-spline interpolating
functions over a set of curves.
Parameters
----------
t : 1D array_like
Array of m time indices of trajectory
curves : 2D array_like
Array of m x n vector samples at the time indices. First dimension
indexes time, second dimension indexes vector components
Returns
-------
interpolated_callable : callable
Callable which interpolates the given curve/trajectories
"""
tck_splprep = interpolate.splprep(
x=[curves[:, i] for i in range(curves.shape[1])], u=t, s=0)
def interpolated_callable(t, *args):
return np.array(
interpolate.splev(t, tck_splprep[0], der=0)
).T.squeeze()
return interpolated_callable
def compute_blackbody_response(self, Teffs=None):
"""
Computes blackbody intensities across the entire range of
effective temperatures.
@Teffs: an array of effective temperatures. If None, a default
array from ~300K to ~500000K with 97 steps is used. The default
array is uniform in log10 scale.
Returns: n/a
"""
if Teffs == None:
log10Teffs = np.linspace(2.5, 5.7, 97) # this corresponds to the 316K-501187K range.
Teffs = 10**log10Teffs
# Energy-weighted intensities:
log10ints_energy = np.array([np.log10(self._bb_intensity(Teff, photon_weighted=False)) for Teff in Teffs])
self._bb_func_energy = interpolate.splrep(Teffs, log10ints_energy, s=0)
self._log10_Inorm_bb_energy = lambda Teff: interpolate.splev(Teff, self._bb_func_energy)
# Photon-weighted intensities:
log10ints_photon = np.array([np.log10(self._bb_intensity(Teff, photon_weighted=True)) for Teff in Teffs])
self._bb_func_photon = interpolate.splrep(Teffs, log10ints_photon, s=0)
self._log10_Inorm_bb_photon = lambda Teff: interpolate.splev(Teff, self._bb_func_photon)
self.content.append('blackbody')
self.atmlist.append('blackbody')
def burgess_tully_descale(x, y, energy_ratio, c, scaling_type):
"""
Convert scaled Burgess-Tully parameters to physical quantities. For more details see
[1]_.
References
----------
.. [1] Burgess, A. and Tully, J. A., 1992, A&A, `254, 436 <http://adsabs.harvard.edu/abs/1992A%26A...254..436B>`_
"""
nots = splrep(x, y, s=0)
if scaling_type == 1:
x_new = 1.0 - np.log(c)/np.log(energy_ratio + c)
upsilon = splev(x_new, nots, der=0)*np.log(energy_ratio + np.e)
elif scaling_type == 2:
x_new = energy_ratio/(energy_ratio + c)
upsilon = splev(x_new, nots, der=0)
elif scaling_type == 3:
x_new = energy_ratio/(energy_ratio + c)
upsilon = splev(x_new, nots, der=0)/(energy_ratio + 1.0)
elif scaling_type == 4:
x_new = 1.0 - np.log(c)/np.log(energy_ratio + c)
upsilon = splev(x_new, nots, der=0)*np.log(energy_ratio + c)
elif scaling_type == 5:
# dielectronic
x_new = energy_ratio/(energy_ratio + c)
upsilon = splev(x_new, nots, der=0)/energy_ratio
elif scaling_type == 6:
# protons
x_new = energy_ratio/(energy_ratio + c)
upsilon = 10**splev(x_new, nots, der=0)
else:
raise ValueError('Unrecognized BT92 scaling option.')
return upsilon