def __call__(self, vals, fill_value=np.nan):
"""
Evaluate interpolator for values given at the source points.
Parameters
----------
vals : ndarray of float, shape (numsourcepoints, ...)
Values at the source points which to interpolate
fill_value : float
is needed if linear interpolation fails; defaults to np.nan
Returns
-------
output : ndarray of float with shape (numtargetpoints,...)
"""
self._check_shape(vals)
ip = LinearNDInterpolator(self.src, vals, fill_value=fill_value)
return ip(self.trg)
# -----------------------------------------------------------------------------
# Covariance routines needed for Kriging
# -----------------------------------------------------------------------------
python类interpolate()的实例源码
def apply_grouping(self, energy_channel, grouping, verbose=False):
"""
Group the ARF channels (INTERPOLATED with respect to the spectral
channels) by the supplied grouping specification.
Arguments:
* energy_channel: energies of the spectral channel
* grouping: spectral grouping specification
Return: `self.specresp_grp'
"""
if self.groupped:
return
if verbose:
print("INFO: Grouping ARF '%s' ..." % self.filename)
self.energy_channel = energy_channel
self.grouping = grouping
# interpolate the ARF w.r.t the spectral channel energies
arf_interp = self.interpolate(x=energy_channel, verbose=verbose)
self.specresp_grp = group_data(arf_interp, grouping)
self.groupped = True
# class ARF }}}
def getGriddata(x,y,z,extend):
''' data x,y,z and boundbox to print '''
(xmin,xmax,ymin,ymax)=extend
grid_y, grid_x = np.mgrid[xmin:xmax:(xmax-xmin)*10j, ymin:ymax:(ymax-ymin)*10j]
points=[]
for i in range(x.shape[0]):
points.append([y[i],x[i]])
values=z
# see http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
from scipy.interpolate import griddata
# grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
# grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
grid_z2 = scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='cubic')
return grid_z2
def _prepare_interpolators(self):
"""
Updates the interpolator functions the user calls
to interpolate the current plan.
"""
if len(self.x_seq) == 1:
self.get_state = lambda t: self.x_seq[0]
self.get_effort = lambda t: np.zeros(self.ncontrols)
else:
self.get_state = interp1d(self.t_seq, np.array(self.x_seq), axis=0, assume_sorted=True,
bounds_error=False, fill_value=self.x_seq[-1][:])
self.get_effort = interp1d(self.t_seq, np.array(self.u_seq), axis=0, assume_sorted=True,
bounds_error=False, fill_value=self.u_seq[-1][:])
#################################################
def regrid(x,y,z,xnew,ynew,method='cubic'):
"""
Regrid 1D arrays (x,y,z) -- where z is some scalar field mapped at positions
x,y -- to a 2d array Z defined in the cartesian grids xnew,ynew (1D arrays with
new grid).
For the interpolation method, choose nearest, linear or cubic.
>>> rho=regrid(d.x,d.y,d.rho,xnew,ynew)
.. todo:: need to create a 3d version of this method, paving the road for the 3d simulations.
"""
import scipy.interpolate
# regrid the data to a nice cartesian grid
Z = scipy.interpolate.griddata((x, y), z, (xnew[None,:], ynew[:,None]), method=method)
# get rid of NaNs
return nanzero(Z)
def lambda_interp(lambdau, s):
# lambda is the index sequence that is produced by the model
# s is the new vector at which evaluations are required.
# the value is a vector of left and right indices, and a vector of fractions.
# the new values are interpolated bewteen the two using the fraction
# Note: lambda decreases. you take:
# sfrac*left+(1-sfrac*right)
if len(lambdau) == 1:
nums = len(s)
left = scipy.zeros([nums, 1], dtype = scipy.integer)
right = left
sfrac = scipy.zeros([nums, 1], dtype = scipy.float64)
else:
s[s > scipy.amax(lambdau)] = scipy.amax(lambdau)
s[s < scipy.amin(lambdau)] = scipy.amin(lambdau)
k = len(lambdau)
sfrac = (lambdau[0] - s)/(lambdau[0] - lambdau[k - 1])
lambdau = (lambdau[0] - lambdau)/(lambdau[0] - lambdau[k - 1])
coord = scipy.interpolate.interp1d(lambdau, range(k))(sfrac)
left = scipy.floor(coord).astype(scipy.integer, copy = False)
right = scipy.ceil(coord).astype(scipy.integer, copy = False)
#
tf = left != right
sfrac[tf] = (sfrac[tf] - lambdau[right[tf]])/(lambdau[left[tf]] - lambdau[right[tf]])
sfrac[~tf] = 1.0
#if left != right:
# sfrac = (sfrac - lambdau[right])/(lambdau[left] - lambdau[right])
#else:
# sfrac[left == right] = 1.0
result = dict()
result['left'] = left
result['right'] = right
result['frac'] = sfrac
return(result)
# end of lambda_interp
# =========================================
test_generic.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
test_generic.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
test_generic.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
test_generic.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 35
收藏 0
点赞 0
评论 0
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
test_generic.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 42
收藏 0
点赞 0
评论 0
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
test_generic.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def __call__(self, vals):
"""
Evaluate interpolator for values given at the source points.
Parameters
----------
vals : ndarray of float, shape (numsources, ...)
Values at the source points which to interpolate
Returns
-------
output : None
"""
self._check_shape(vals)
return None
def __call__(self, vals, maxdist=None):
"""
Evaluate interpolator for values given at the source points.
Parameters
----------
vals : ndarray of float, shape (numsourcepoints, ...)
Values at the source points which to interpolate
maxdist : the maximum distance up to which an interpolated values is
assigned - if maxdist is exceeded, np.nan will be assigned
If maxdist==None, values will be assigned everywhere
Returns
-------
output : ndarray of float with shape (numtargetpoints,...)
"""
self._check_shape(vals)
out = vals[self.ix]
if maxdist is None:
return out
else:
return np.where(self.dists > maxdist, np.nan, out)
def interpolate(self, x=None, verbose=False):
"""
Interpolate the ARF curve using `scipy.interpolate'
If the requested point is outside of the data range, the
fill value of *zero* is returned.
Arguments:
* x: points at which the interpolation to be calculated.
Return:
If x is None, then the interpolated function is returned,
otherwise, the interpolated data are returned.
"""
if not hasattr(self, "f_interp") or self.f_interp is None:
arf = self.get_data(copy=False)
if verbose:
print("INFO: Interpolating ARF '%s' (may take a while) ..." %
self.filename)
f_interp = scipy.interpolate.interp1d(
self.energy, arf, kind="quadratic", bounds_error=False,
fill_value=0.0, assume_sorted=True)
self.f_interp = f_interp
if x is not None:
return self.f_interp(x)
else:
return self.f_interp
def interpolate(x,y,z, gridsize,mode='thin_plate',rbfmode=True,shape=None):
grids=gridsize
dx=np.max(x)-np.min(x)
dy=np.max(y)-np.min(y)
if dx>dy:
gridx=grids
gridy=int(round(dy/dx*grids))
else:
gridy=grids
gridx=int(round(dx/dy*grids))
if shape<>None:
(gridy,gridx)=shape
xi, yi = np.linspace(np.min(x), np.max(x), gridx), np.linspace(np.min(y), np.max(y), gridy)
xi, yi = np.meshgrid(xi, yi)
if rbfmode:
rbf = scipy.interpolate.Rbf(x, y, z, function=mode)
rbf2 = scipy.interpolate.Rbf( y,x, z, function=mode)
else:
print "interp2d nicht implementiert"
rbf = scipy.interpolate.interp2d(x, y, z, kind=mode)
zi=rbf2(yi,xi)
return [rbf,xi,yi,zi]
def interpolate(coords, var, interp_coords, missing_value=None, fill=True, kind="linear"):
"""Interpolate globally defined data to a different (regular) grid.
Arguments:
coords: Tuple of coordinate arrays for each dimension.
var (:obj:`ndarray` of dim (nx1, ..., nxd)): Variable data to interpolate.
interp_coords: Tuple of coordinate arrays to interpolate to.
missing_value (optional): Value denoting cells of missing data in ``var``.
Is replaced by `NaN` before interpolating. Defaults to `None`, which means
no replacement is taking place.
fill (bool, optional): Whether `NaN` values should be replaced by the nearest
finite value after interpolating. Defaults to ``True``.
kind (str, optional): Order of interpolation. Supported are `nearest` and
`linear` (default).
Returns:
:obj:`ndarray` containing the interpolated values on the grid spanned by
``interp_coords``.
"""
if len(coords) != len(interp_coords) or len(coords) != var.ndim:
raise ValueError("Dimensions of coordinates and values do not match")
var = np.array(var)
if missing_value is not None:
invalid_mask = np.isclose(var, missing_value)
var[invalid_mask] = np.nan
if var.ndim > 1 and coords[0].ndim == 1:
interp_grid = np.rollaxis(np.array(np.meshgrid(
*interp_coords, indexing="ij", copy=False)), 0, len(interp_coords) + 1)
else:
interp_grid = coords
var = scipy.interpolate.interpn(coords, var, interp_grid,
bounds_error=False, fill_value=np.nan, method=kind)
if fill:
var = fill_holes(var)
return var
def get_periodic_interval(current_time, cycle_length, rec_spacing, n_rec):
"""Used for linear interpolation between periodic time intervals.
One common application is the interpolation of external forcings that are defined
at discrete times (e.g. one value per month of a standard year) to the current
time step.
Arguments:
current_time (float): Time to interpolate to.
cycle_length (float): Total length of one periodic cycle.
rec_spacing (float): Time spacing between each data record.
n_rec (int): Total number of records available.
Returns:
:obj:`tuple` containing (n1, f1), (n2, f2): Indices and weights for the interpolated
record array.
Example:
The following interpolates a record array ``data`` containing 12 monthly values
to the current time step:
>>> year_in_seconds = 60. * 60. * 24. * 365.
>>> current_time = 60. * 60. * 24. * 45. # mid-february
>>> print(data.shape)
(360, 180, 12)
>>> (n1, f1), (n2, f2) = get_periodic_interval(current_time, year_in_seconds, year_in_seconds / 12, 12)
>>> data_at_current_time = f1 * data[..., n1] + f2 * data[..., n2]
"""
locTime = current_time - rec_spacing * 0.5 + \
cycle_length * (2 - round(current_time / cycle_length))
tmpTime = locTime % cycle_length
tRec1 = 1 + int(tmpTime / rec_spacing)
tRec2 = 1 + tRec1 % int(n_rec)
wght2 = (tmpTime - rec_spacing * (tRec1 - 1)) / rec_spacing
wght1 = 1.0 - wght2
return (tRec1 - 1, wght1), (tRec2 - 1, wght2)
def map_profile_onto_turb_grid(self, profileTango):
"""Since Tango's domain is larger than GENE's in both directions, we can use a simple interpolating spline to
resample the profile on GENE's grid.
"""
interpolate = scipy.interpolate.InterpolatedUnivariateSpline(self.psiTango, profileTango)
profileGene = interpolate(self.psiGene)
return profileGene
def map_profile_onto_turb_grid(self, profileTango):
"""Since Tango's domain is larger than GENE's in both directions, we can use a simple interpolating spline to
resample the profile on GENE's grid.
"""
interpolate = scipy.interpolate.InterpolatedUnivariateSpline(self.xTango, profileTango)
profileTurb = interpolate(self.xTurb)
return profileTurb
def extend_with_zeros_both_sides(xSmall, fSmall, xLarge, enforcePositive=False):
"""Extending a function to a larger domain, with zeros where it was not originally defined.
The domain xSmall should be fully contained within xLarge. That is, xLarge extends farther outward
on both sides of the domain.
This function operates by resampling within the overlapping region xSmall, and then extending with zeros.
Sometimes, interpolation might produce negative values when zero is the minimum for physical reasons.
The diffusion coefficient is one example where one wants to maintain positivity. In this case, one
can optionally enforce positivity of the returned value by zeroing out negative values.
Inputs:
xSmall independent variable on the smaller domain (array)
fSmall dependent variable on the smaller domain (array)
xLarge independent variable on the larger domain (array)
enforcePositive (optional) If True, set any negative values to zero before returning (boolean)
Outputs:
fLarge dependent variable on the larger domain (array)
"""
assert xLarge[0] <= xSmall[0] and xLarge[-1] >= xSmall[-1]
# resample within the overlapping region
fLarge = np.zeros_like(xLarge) # initialize with zeros
ind = np.where(xLarge > xSmall[0])
indstart = ind[0][0]
ind = np.where(xLarge < xSmall[-1])
indfinal = ind[0][-1]
xLargeTemp = xLarge[indstart : indfinal + 1]
interpolate = scipy.interpolate.InterpolatedUnivariateSpline(xSmall, fSmall)
fLarge[indstart : indfinal+1] = interpolate(xLargeTemp)
# extend with zeros -- automatically performed because fLarge was initialized with zeros
if enforcePositive == True:
ind = fLarge < 0
fLarge[ind] = 0
return fLarge
def extend_with_zeros_left_side(xIn, fIn, xOut, enforcePositive=False):
"""Extending a function to another domain, with zeros where it was not originally defined.
The domains xIn and xOut should satsify xOut[0] < xIn[0] and xOut[-1] < xIn[0]. The output
domain is "to the left" of the input domain.
This function operates by resampling within the overlapping region, and then extending with zeros.
Sometimes, interpolation might produce negative values when zero is the minimum for physical reasons.
The diffusion coefficient is one example where one wants to maintain positivity. In this case, one
can optionally enforce positivity of the returned value by zeroing out negative values.
Inputs:
xIn independent variable on the input domain (array)
fIn dependent variable on the input domain (array)
xOut independent variable on the new domain (array)
enforcePositive (optional) If True, set any negative values to zero before returning (boolean)
Outputs:
fOut dependent variable on the new domain (array)
"""
assert xOut[0] <= xIn[0] and xOut[-1] <= xIn[-1]
fOut = np.zeros_like(xOut) # initialize with zeros
# resample within the overlapping region
ind = np.where(xOut > xIn[0])
indstart = ind[0][0]
xOutTemp = xOut[indstart:]
interpolate = scipy.interpolate.InterpolatedUnivariateSpline(xIn, fIn)
fOut[indstart:] = interpolate(xOutTemp)
# extend with zeros -- automatically performed because fOut was initialized with zeros
if enforcePositive == True:
ind = fOut < 0
fOut[ind] = 0
return fOut
###################################################
#### Functions for extrapolation ####
def make_extrapolator_fixed_slope(xSmall, fSmall, outwardSlope):
"""Create an extrapolator that uses cubic interpolation within the given domain xSmall, and an
imposed linear fit with imposed slope outside the given domain xSmall. Data must be sorted.
Inputs:
xSmall independent variable on the smaller domain (array)
fSmall dependent variable on the smaller domain (array)
outwardSlope imposed slope outside the domain xSmall
Outputs:
extrapolator function that can be evaluated on a domain, like interpolators
"""
def extrapolator(xLarge):
fLarge = np.zeros_like(xLarge, dtype=np.float)
# exterior region: left side
indLeftExterior = xLarge < xSmall[0]
fLarge[indLeftExterior] = outwardSlope * (xLarge[indLeftExterior] - xSmall[0]) + fSmall[0]
#exterior region: right side
indRightExterior = xLarge > xSmall[-1]
fLarge[indRightExterior] = outwardSlope * (xLarge[indRightExterior] - xSmall[-1]) + fSmall[-1]
# interpolated points in the interior using cubic interpolation
interpolatorInterior = scipy.interpolate.InterpolatedUnivariateSpline(xSmall, fSmall, k=3) # cubic
indInterior = (xLarge >= xSmall[0]) & (xLarge <= xSmall[-1])
fLarge[indInterior] = interpolatorInterior(xLarge[indInterior])
return fLarge
return extrapolator
def interp1d(*args, **kwargs):
kwargs.pop('assume_sorted', None)
return scipy.interpolate.interp1d(*args, **kwargs)
def lambda_interp(lambdau, s):
# lambda is the index sequence that is produced by the model
# s is the new vector at which evaluations are required.
# the value is a vector of left and right indices, and a vector of fractions.
# the new values are interpolated bewteen the two using the fraction
# Note: lambda decreases. you take:
# sfrac*left+(1-sfrac*right)
if len(lambdau) == 1:
nums = len(s)
left = scipy.zeros([nums, 1], dtype = scipy.integer)
right = left
sfrac = scipy.zeros([nums, 1], dtype = scipy.float64)
else:
s[s > scipy.amax(lambdau)] = scipy.amax(lambdau)
s[s < scipy.amin(lambdau)] = scipy.amin(lambdau)
k = len(lambdau)
sfrac = (lambdau[0] - s)/(lambdau[0] - lambdau[k - 1])
lambdau = (lambdau[0] - lambdau)/(lambdau[0] - lambdau[k - 1])
coord = scipy.interpolate.interp1d(lambdau, range(k))(sfrac)
left = scipy.floor(coord).astype(scipy.integer, copy = False)
right = scipy.ceil(coord).astype(scipy.integer, copy = False)
#
tf = left != right
sfrac[tf] = (sfrac[tf] - lambdau[right[tf]])/(lambdau[left[tf]] - lambdau[right[tf]])
sfrac[~tf] = 1.0
#if left != right:
# sfrac = (sfrac - lambdau[right])/(lambdau[left] - lambdau[right])
#else:
# sfrac[left == right] = 1.0
result = dict()
result['left'] = left
result['right'] = right
result['frac'] = sfrac
return(result)
# end of lambda_interp
# =========================================
def lambda_interp(lambdau, s):
# lambda is the index sequence that is produced by the model
# s is the new vector at which evaluations are required.
# the value is a vector of left and right indices, and a vector of fractions.
# the new values are interpolated bewteen the two using the fraction
# Note: lambda decreases. you take:
# sfrac*left+(1-sfrac*right)
if len(lambdau) == 1:
nums = len(s)
left = scipy.zeros([nums, 1], dtype = scipy.integer)
right = left
sfrac = scipy.zeros([nums, 1], dtype = scipy.float64)
else:
s[s > scipy.amax(lambdau)] = scipy.amax(lambdau)
s[s < scipy.amin(lambdau)] = scipy.amin(lambdau)
k = len(lambdau)
sfrac = (lambdau[0] - s)/(lambdau[0] - lambdau[k - 1])
lambdau = (lambdau[0] - lambdau)/(lambdau[0] - lambdau[k - 1])
coord = scipy.interpolate.interp1d(lambdau, range(k))(sfrac)
left = scipy.floor(coord).astype(scipy.integer, copy = False)
right = scipy.ceil(coord).astype(scipy.integer, copy = False)
#
tf = left != right
sfrac[tf] = (sfrac[tf] - lambdau[right[tf]])/(lambdau[left[tf]] - lambdau[right[tf]])
sfrac[~tf] = 1.0
#if left != right:
# sfrac = (sfrac - lambdau[right])/(lambdau[left] - lambdau[right])
#else:
# sfrac[left == right] = 1.0
result = dict()
result['left'] = left
result['right'] = right
result['frac'] = sfrac
return(result)
# end of lambda_interp
# =========================================
def lambda_interp(lambdau, s):
# lambda is the index sequence that is produced by the model
# s is the new vector at which evaluations are required.
# the value is a vector of left and right indices, and a vector of fractions.
# the new values are interpolated bewteen the two using the fraction
# Note: lambda decreases. you take:
# sfrac*left+(1-sfrac*right)
if len(lambdau) == 1:
nums = len(s)
left = scipy.zeros([nums, 1], dtype = scipy.integer)
right = left
sfrac = scipy.zeros([nums, 1], dtype = scipy.float64)
else:
s[s > scipy.amax(lambdau)] = scipy.amax(lambdau)
s[s < scipy.amin(lambdau)] = scipy.amin(lambdau)
k = len(lambdau)
sfrac = (lambdau[0] - s)/(lambdau[0] - lambdau[k - 1])
lambdau = (lambdau[0] - lambdau)/(lambdau[0] - lambdau[k - 1])
coord = scipy.interpolate.interp1d(lambdau, range(k))(sfrac)
left = scipy.floor(coord).astype(scipy.integer, copy = False)
right = scipy.ceil(coord).astype(scipy.integer, copy = False)
#
tf = left != right
sfrac[tf] = (sfrac[tf] - lambdau[right[tf]])/(lambdau[left[tf]] - lambdau[right[tf]])
sfrac[~tf] = 1.0
#if left != right:
# sfrac = (sfrac - lambdau[right])/(lambdau[left] - lambdau[right])
#else:
# sfrac[left == right] = 1.0
result = dict()
result['left'] = left
result['right'] = right
result['frac'] = sfrac
return(result)
# end of lambda_interp
# =========================================
def interpolate(self):
if self._interpolate is None:
try:
import scipy.interpolate as interpolate
except ImportError:
interpolate = NotAModule(self._name)
self._interpolate = interpolate
return self._interpolate
def interpolate(self, stellar_photosphere, transitions):
"""
Interpolate non-LTE corrections to the equivalent widths of many
transitions for a single stellar photosphere.
:param stellar_photosphere:
A stellar atmosphere model.
:param transitions:
A table of atomic transitions.
"""
# A convenience function.
raise NotImplementedError
def __init__(self,cqt,Ls):
from scipy.interpolate import interp1d
self.intp = [interp1d(np.linspace(0,Ls,len(r)),r) for r in cqt]