def calibrate(self):
# generate sequence
self.set()
# run and load normalized data
data, _ = self.run(norm_pts = {self.qubit_names[0]: (0, 1), self.qubit_names[1]: (0, 2)})
# select target qubit
data_t = data[self.qubit_names[1]]
# fit
self.opt_par, all_params_0, all_params_1 = fit_CR([self.lengths, self.phases, self.amps], data_t, self.cal_type)
# plot the result
xaxis = self.lengths if self.cal_type==CR_cal_type.LENGTH else self.phases if self.cal_type==CR_cal_type.PHASE else self.amps
finer_xaxis = np.linspace(np.min(xaxis), np.max(xaxis), 4*len(xaxis))
self.plot["Data 0"] = (xaxis, data_t[:len(data_t)//2])
self.plot["Fit 0"] = (finer_xaxis, np.polyval(all_params_0, finer_xaxis) if self.cal_type == CR_cal_type.AMP else sinf(finer_xaxis, *all_params_0))
self.plot["Data 1"] = (xaxis, data_t[len(data_t)//2:])
self.plot["Fit 1"] = (finer_xaxis, np.polyval(all_params_1, finer_xaxis) if self.cal_type == CR_cal_type.AMP else sinf(finer_xaxis, *all_params_1))
return (str.lower(self.cal_type.name), self.opt_par)
python类polyval()的实例源码
def band_center(spectrum, low_endmember=None, high_endmember=None, degree=3):
x = spectrum.index
y = spectrum
if not low_endmember:
low_endmember = x[0]
if not high_endmember:
high_endmember = x[-1]
ny = y[low_endmember:high_endmember]
fit = np.polyfit(ny.index, ny, degree)
center_fit = Series(np.polyval(fit, ny.index), ny.index)
center = band_minima(center_fit)
return center, center_fit
def plot(self, dataset, path, show=False):
with PdfPages(path) as pdf:
x_vals = dataset.data['T'].tolist()
y_vals = dataset.data[self.symbol].tolist()
plt.plot(x_vals, y_vals, 'ro', alpha=0.4, markersize=4)
x_vals2 = np.linspace(min(x_vals), max(x_vals), 80)
fx = np.polyval(self._coeffs, x_vals2)
plt.plot(x_vals2, fx, linewidth=0.3, label='')
plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 4))
plt.legend(loc=3, bbox_to_anchor=(0, 0.8))
plt.title('$%s$ vs $T$' % self.display_symbol)
plt.xlabel('$T$ (K)')
plt.ylabel('$%s$ (%s)' % (self.display_symbol, self.units))
fig = plt.gcf()
pdf.savefig(fig)
plt.close()
if show:
webbrowser.open_new(path)
def fit_cubic(y0, y1, g0, g1):
"""Fit cubic polynomial to function values and derivatives at x = 0, 1.
Returns position and function value of minimum if fit succeeds. Fit does
not succeeds if
1. polynomial doesn't have extrema or
2. maximum is from (0,1) or
3. maximum is closer to 0.5 than minimum
"""
a = 2*(y0-y1)+g0+g1
b = -3*(y0-y1)-2*g0-g1
p = np.array([a, b, g0, y0])
r = np.roots(np.polyder(p))
if not np.isreal(r).all():
return None, None
r = sorted(x.real for x in r)
if p[0] > 0:
maxim, minim = r
else:
minim, maxim = r
if 0 < maxim < 1 and abs(minim-0.5) > abs(maxim-0.5):
return None, None
return minim, np.polyval(p, minim)
def __call__(self, dispersion, *parameters):
"""
Generate data at the dispersion points, given the parameters.
:param dispersion:
An array of dispersion points to calculate the data for.
:param parameters:
Keyword arguments of the model parameters and their values.
"""
function, profile_parameters = self._profiles[self.metadata["profile"]]
N = len(profile_parameters)
y = 1.0 - function(dispersion, *parameters[:N])
# Assume rest of the parameters are continuum coefficients.
if parameters[N:]:
y *= np.polyval(parameters[N:], dispersion)
return y
def meyeraux(x):
"""meyer wavelet auxiliary function.
v(x) = 35*x^4 - 84*x^5 + 70*x^6 - 20*x^7.
Parameters
----------
x : array
grid points
Returns
-------
y : array
values at x
"""
# Auxiliary def values.
y = np.polyval([-20, 70, -84, 35, 0, 0, 0, 0], x) * (x >= 0) * (x <= 1)
y += (x > 1)
return y
def get_bias_coeff_from_T(self, master_bias_temp, master_bias_level,
frame_temp, calibrated_params):
"""
:param master_bias_temp: Temperature of the master bias frame.
:param master_bias_level: Median of the master bias frame.
:param frame_temp: Temperature of the frame to correct.
:param calibrated_params: parameters [a, b] of the
function bias_level(T) = aT + b. T is in degrees C and
bias_level(T) is the median of the bias frame at the
given temperature.
"""
calib_master_bias_level = np.polyval(
calibrated_params, master_bias_temp)
calib_frame_bias_level = np.polyval(
calibrated_params, frame_temp)
return calib_frame_bias_level / calib_master_bias_level
def csalbr(tau):
# Previously 3 functions csalbr fintexp1, fintexp3
a = [-.57721566, 0.99999193, -0.24991055, 0.05519968, -0.00976004,
0.00107857]
#xx = a[0] + a[1] * tau + a[2] * tau**2 + a[3] * tau**3 + a[4] * tau**4 + a[5] * tau**5
#xx = np.polyval(a[::-1], tau)
# xx = a[0]
# xftau = 1.0
# for i in xrange(5):
# xftau = xftau*tau
# xx = xx + a[i] * xftau
fintexp1 = np.polyval(a[::-1], tau) - np.log(tau)
fintexp3 = (np.exp(-tau) * (1.0 - tau) + tau**2 * fintexp1) / 2.0
return (3.0 * tau - fintexp3 *
(4.0 + 2.0 * tau) + 2.0 * np.exp(-tau)) / (4.0 + 3.0 * tau)
# From crefl.1.7.1
def fit_dimensions(self, data, fit_TTmax = True):
"""
if fit_max is True, use blade length profile to adjust dHS_max
"""
if (fit_TTmax and 'L_blade' in data.columns):
dat = data.dropna(subset=['L_blade'])
xn = self.xn(dat['rank'])
fit = numpy.polyfit(xn,dat['L_blade'],7)
x = numpy.linspace(min(xn), max(xn), 500)
y = numpy.polyval(fit,x)
self.xnmax = x[numpy.argmax(y)]
self.TTmax = self.TTxn(self.xnmax)
self.scale = {k: numpy.mean(data[k] / self.predict(k,data['rank'], data['nff'])) for k in data.columns if k in self.ref.columns}
return self.scale
def __init__(self, points, values, *args, **kwds):
"""
Parameters
----------
points : nd array (npoints, ndim)
values : 1d array (npoints,)
**kwds : keywords to [avg]polyfit()
"""
self.points = self._fix_shape_init(points)
assert self.points.ndim == 2, "points is not 2d array"
self.values = values
if self._has_keys(kwds, ['degrange', 'degmin', 'degmax', 'levels']):
self.fitfunc = avgpolyfit
self.evalfunc = avgpolyval
else:
self.fitfunc = polyfit
self.evalfunc = polyval
self.fit = self.fitfunc(self.points, self.values, *args, **kwds)
def vp2dp(vp,p=[],temp=[],enhance=False):
"""
Convert a volume mixing ratio to a dew point ( and vapour pressure )
Using ITS-90 correction of Wexler's formula
Optional enhancement factors for non ideal
"""
vp=np.atleast_1d(vp)
c=np.array([-9.2288067e-06, 0.46778925, -20.156028, 207.98233],dtype='f8')
d=np.array([-7.5172865e-05, 0.0056577518, -0.13319669, 1],dtype='f8')
lnes=np.log(vp*1e2)
dp=np.polyval(c,lnes)/np.polyval(d,lnes)
if(enhance and len(p)>0) :
if(len(temp)==0):
temp=dp
A=np.array([8.5813609e-09, -6.7703064e-06, 0.001807157, -0.16302041],dtype='f8')
B=np.array([6.3405286e-07, -0.00077326396, 0.34378043, -59.890467],dtype='f8')
alpha=np.polyval(A,temp)
beta=np.exp(np.polyval(B,temp))
ef=np.exp(alpha*(1-vp/p)+beta*(p/vp-1))
vp=vp/ef
lnes=np.log(vp*1e2)
dp=np.polyval(c,lnes)/np.polyval(d,lnes)
return dp
def eval_trace_poly(self, use_poly=False, smoothing_length=25):
sel = self.trace_x != self.flag
if use_poly:
self.trace = np.polyval(self.trace_polyvals,
1. * np.arange(self.D) / self.D)
else:
self.trace = np.zeros((self.D,))
init_x = np.where(sel)[0][0]
fin_x = np.where(sel)[0][-1]
self.trace[init_x:fin_x] = np.interp(np.arange(init_x,fin_x),
self.trace_x[sel],
self.trace_y[sel])
self.trace[init_x:fin_x] = biweight_filter(self.trace[init_x:fin_x],
smoothing_length)
ix = int(init_x+smoothing_length/2+1)
fx = int(init_x+smoothing_length/2+1 + smoothing_length*2)
p1 = np.polyfit(np.arange(ix,fx), self.trace[ix:fx], 1)
self.trace[:ix] = np.polyval(p1, np.arange(ix))
ix = int(fin_x-smoothing_length/2-1 - smoothing_length*2)
fx = int(fin_x-smoothing_length/2)
pf = np.polyfit(np.arange(ix,fx), self.trace[ix:fx], 1)
self.trace[fx:self.D] = np.polyval(pf, np.arange(fx,self.D))
def calculate_wavelength_new(x, solar_spec, fibers, fibn, group,
smooth_length=21, init_lims=None, order=3,
init_sol=None, debug=False, interactive=False,
nbins=21, wavebuff=100, plotbuff=85,
fixscale=True, use_leastsq=False, res=1.9):
L = len(x)
if init_lims is None:
init_lims = [np.min(solar_spec[:,0]), np.max(solar_spec[:,0])]
if init_sol is not None:
init_wave_sol = np.polyval(init_sol, 1. * x / L)
y_sun = solar_spec[:,1]
lowfib = np.max([0,fibn-group])
highfib = np.min([len(fibers)-1,fibn+group])
y = np.array([biweight_filter(fibers[i].spectrum, smooth_length)
/ fibers[i].spectrum
for i in xrange(lowfib,highfib)])
y = biweight_location(y,axis=(0,))
bins = np.linspace(init_lims[0], init_lims[1], nbins)
bins = bins[1:-1]
scale = 1.*(init_lims[1] - init_lims[0])/L
wv0 = init_lims[0]
wave0_save = []
scale_save = []
x_save = []
wave_save = []
def calc_omega(cp):
cp.insert
a=[]
for i in range(len(cp)):
ptmp = []
tmp = 0
for j in range(len(cp)):
if j != i:
row = []
row.insert(0,1/(cp[i]-cp[j]))
row.insert(1,-cp[j]/(cp[i]-cp[j]))
ptmp.insert(tmp,row)
tmp += 1
p=[1]
for j in range(len(cp)-1):
p = conv(p,ptmp[j])
pint = numpy.polyint(p)
arow = []
for j in range(len(cp)):
arow.append(numpy.polyval(pint,cp[j]))
a.append(arow)
return a
def calc_adot(cp,order=1):
a=[]
for i in range(len(cp)):
ptmp = []
tmp = 0
for j in range(len(cp)):
if j != i:
row = []
row.insert(0,1/(cp[i]-cp[j]))
row.insert(1,-cp[j]/(cp[i]-cp[j]))
ptmp.insert(tmp,row)
tmp += 1
p=[1]
for j in range(len(cp)-1):
p = conv(p,ptmp[j])
pder = numpy.polyder(p,order)
arow = []
for j in range(len(cp)):
arow.append(numpy.polyval(pder,cp[j]))
a.append(arow)
return a
def calc_afinal(cp):
afinal=[]
for i in range(len(cp)):
ptmp = []
tmp = 0
for j in range(len(cp)):
if j != i:
row = []
row.insert(0,1/(cp[i]-cp[j]))
row.insert(1,-cp[j]/(cp[i]-cp[j]))
ptmp.insert(tmp,row)
tmp += 1
p=[1]
for j in range(len(cp)-1):
p = conv(p,ptmp[j])
afinal.append(numpy.polyval(p,1.0))
return afinal
def test_polyfit(self):
c = np.array([3., 2., 1.])
x = np.linspace(0, 2, 7)
y = np.polyval(c, x)
err = [1, -1, 1, -1, 1, -1, 1]
weights = np.arange(8, 1, -1)**2/7.0
# check 1D case
m, cov = np.polyfit(x, y+err, 2, cov=True)
est = [3.8571, 0.2857, 1.619]
assert_almost_equal(est, m, decimal=4)
val0 = [[2.9388, -5.8776, 1.6327],
[-5.8776, 12.7347, -4.2449],
[1.6327, -4.2449, 2.3220]]
assert_almost_equal(val0, cov, decimal=4)
m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True)
assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
val = [[8.7929, -10.0103, 0.9756],
[-10.0103, 13.6134, -1.8178],
[0.9756, -1.8178, 0.6674]]
assert_almost_equal(val, cov2, decimal=4)
# check 2D (n,1) case
y = y[:, np.newaxis]
c = c[:, np.newaxis]
assert_almost_equal(c, np.polyfit(x, y, 2))
# check 2D (n,2) case
yy = np.concatenate((y, y), axis=1)
cc = np.concatenate((c, c), axis=1)
assert_almost_equal(cc, np.polyfit(x, yy, 2))
m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True)
assert_almost_equal(est, m[:, 0], decimal=4)
assert_almost_equal(est, m[:, 1], decimal=4)
assert_almost_equal(val0, cov[:, :, 0], decimal=4)
assert_almost_equal(val0, cov[:, :, 1], decimal=4)
def var(x):
"""
Creates a polynomial that describes the kernel width
"""
p=[0.2,0.02]
return np.polyval(p,x)
#x sampling:
def extrap1d_constrained_linear_regression(x, y, xEval, side='right', numPts=10):
"""Perform extrapolation using constrained linear regression on part of the data (x,y). Use numPts
from either the left or right side of the data (specified by input variable side) as the input data.
The linear regression is constrained to pass through the final point (x0, y0) (rightmost point if
side=='right', leftmost if side=='left'). Data MUST be sorted.
Inputs:
x independent variable on the smaller domain (array)
y dependent variable on the smaller domain (array)
xEval values of x at which to evaluate the linear regression model
side side of the data from which to perform linear regression ('left' or 'right')
numPts number of points to use in the linear regression (scalar)
Outputs:
yEval values of dependent variable in the linear regression model evaluated at x_eval (array)
"""
assert side=='left' or side=='right'
if side=='left':
xSide = x[:numPts]
ySide = y[:numPts]
x0 = x[0]
y0 = y[0]
elif side=='right':
xSide = x[-numPts:]
ySide = y[-numPts:]
x0 = x[-1]
y0 = y[-1]
a = least_squares_slope(xSide, ySide, x0, y0) # determine model (a, x0, y0)
b = y0 - a*x0
#y_eval = scipy.polyval([a,b], x_eval)
yEval = a*(xEval - x0) + y0 # evaluate model on desired points
return yEval
operations.py 文件源码
项目:algorithm-reference-library
作者: SKA-ScienceDataProcessor
项目源码
文件源码
阅读 43
收藏 0
点赞 0
评论 0
def remove_continuum_blockvisibility(vis: BlockVisibility, degree=1, mask=None) -> BlockVisibility:
""" Fit and remove continuum visibility
Fit a polynomial in frequency of the specified degree where mask is True
:param vis:
:param degree: Degree of polynomial
:param mask:
:return:
"""
assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), vis
if mask is not None:
assert numpy.sum(mask) > 2 * degree, "Insufficient channels for fit"
nchan = len(vis.frequency)
x = (vis.frequency - vis.frequency[nchan // 2]) / (vis.frequency[0] - vis.frequency[nchan // 2])
for row in range(vis.nvis):
for ant2 in range(vis.nants):
for ant1 in range(vis.nants):
for pol in range(vis.polarisation_frame.npol):
wt = numpy.sqrt(vis.data['weight'][row, ant2, ant1, :, pol])
if mask is not None:
wt[mask] = 0.0
fit = numpy.polyfit(x, vis.data['vis'][row, ant2, ant1, :, pol], w=wt, deg=degree)
prediction = numpy.polyval(fit, x)
vis.data['vis'][row, ant2, ant1, :, pol] -= prediction
return vis
operations.py 文件源码
项目:algorithm-reference-library
作者: SKA-ScienceDataProcessor
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def remove_continuum_image(im: Image, degree=1, mask=None):
""" Fit and remove continuum visibility in place
Fit a polynomial in frequency of the specified degree where mask is True
:param im:
:param deg:
:param mask:
:return:
"""
assert isinstance(im, Image)
if mask is not None:
assert numpy.sum(mask) > 2 * degree, "Insufficient channels for fit"
nchan, npol, ny, nx = im.shape
channels = numpy.arange(nchan)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
frequency = im.wcs.sub(['spectral']).wcs_pix2world(channels, 0)[0]
frequency -= frequency[nchan // 2]
frequency /= numpy.max(frequency)
wt = numpy.ones_like(frequency)
if mask is not None:
wt[mask] = 0.0
for pol in range(npol):
for y in range(ny):
for x in range(nx):
fit = numpy.polyfit(frequency, im.data[:, pol, y, x], w=wt, deg=degree)
prediction = numpy.polyval(fit, frequency)
im.data[:, pol, y, x] -= prediction
return im
def phaserate(plotar, ms2mappings):
if plotar.plotType!='phatime':
raise RuntimeError, "phaserate() cannot run on plot type {0}".format( plotar.plotType )
spm = ms2mappings.spectralMap
# iterate over all plots and all data sets within the plots
for k in plotar.keys():
for d in plotar[k].keys():
# get a reference to the data set
dsref = plotar[k][d]
# get the full data set label - we have access to all the data set's properties (FQ, SB, POL etc)
n = plots.join_label(k, d)
# fit a line through the unwrapped phase
unw = numpy.unwrap(numpy.deg2rad(dsref.yval))
coeffs = numpy.polyfit(dsref.xval, unw, 1)
# evaluate the fitted polynomial at the x-loci
extray = numpy.polyval(coeffs, dsref.xval)
# here we could compute the reliability of the fit
diff = unw - extray
ss_tot = numpy.sum(numpy.square(unw - unw.mean()))
ss_res = numpy.sum(numpy.square(diff))
r_sq = 1.0 - ss_res/ss_tot
# compare std deviation and variance in the residuals after fit
std_r = numpy.std(diff)
var_r = numpy.var(diff)
f = spm.frequencyOfFREQ_SB(n.FQ, n.SB)
rate = coeffs[0]
if var_r<std_r:
print "{0}: {1:.8f} ps/s @ {2:5.4f}MHz [R2={3:.3f}]".format(n, rate/(2.0*numpy.pi*f*1.0e-12), f/1.0e6, r_sq )
# before plotting wrap back to -pi,pi and transform to degrees
dsref.extra = [ drawline_fn(n, dsref.xval, numpy.rad2deg(do_wrap(extray))) ]
def phasedbg(plotar, ms2mappings):
global cache
if plotar.plotType!='phatime':
raise RuntimeError, "phasedbg() cannot run on plot type {0}".format( plotar.plotType )
store = len(cache)==0
# iterate over all plots and all data sets within the plots
for k in plotar.keys():
for d in plotar[k].keys():
# get a reference to the data set
dsref = plotar[k][d]
# get the full data set label - we have access to all the data set's properties (FQ, SB, POL etc)
n = plots.join_label(k, d)
# fit a line through the unwrapped phase
unw = numpy.unwrap(numpy.deg2rad(dsref.yval))
#coeffs = numpy.polyfit(dsref.xval, unw, 1)
coeffs = numpy.polyfit(xrange(len(dsref.yval)), unw, 1)
# evaluate the fitted polynomial at the x-loci
extray = numpy.polyval(coeffs, dsref.xval)
# here we could compute the reliability of the fit
diff = unw - extray
# compare std deviation and variance in the residuals after fit
std_r = numpy.std(diff)
var_r = numpy.var(diff)
coeffs = numpy.rad2deg(coeffs)
if var_r<std_r:
# decide what to do
if store:
cache[n] = coeffs
else:
# check if current key exists in cache; if so
# do differencing
otherCoeffs = cache.get(n, None)
if otherCoeffs is None:
print "{0}: not found in cache".format( n )
else:
delta = otherCoeffs - coeffs
print "{0.BL} {0.SB} {0.P}: dRate={1:5.4f} dOff={2:4.1f}".format(n, delta[0], delta[1]+360.0 if delta[1]<0.0 else delta[1])
# before plotting wrap back to -pi,pi and transform to degrees
#dsref.extra = [ drawline_fn(n, dsref.xval, numpy.rad2deg(do_wrap(extray))) ]
if not store:
cache = {}
def estimate_tau0(T0, atm):
T0_fit = np.array([600,1000])
tau0_fit = np.array([5,0.1])
x = 1000.0/T0_fit
y = 1.0*np.log10(tau0_fit)
coeff_low = np.polyfit(x,y,1)
T0_fit = np.array([1000,1600])
tau0_fit = np.array([0.1,1e-4])
x = 1000.0/T0_fit
y = 1.0*np.log10(tau0_fit)
coeff_high = np.polyfit(x,y,1)
atm_fit = 1.0*np.array([1, 10, 20])
mtp_fit = 1.0*np.array([1, 0.1, 0.02])
x = np.sqrt(atm_fit)
y = np.log10(mtp_fit)
coeff_p = np.polyfit(x,y,1)
if T0 < 1000:
tau_1atm = (10 ** np.polyval(coeff_low, 1000.0/T0))
else:
tau_1atm = (10 ** np.polyval(coeff_high, 1000.0/T0))
mtp = 10 ** np.polyval(coeff_p, np.sqrt(atm))
tau = tau_1atm * mtp
#print 'tau = '+str(tau)
tau_level = 10 ** (np.floor(np.log10(tau)))
#print 'tau_level = '+str(tau_level)
tau_rounded = tau_level * np.round(tau/tau_level * 1.0)/1.0
#print 'tau_rounded = '+str(tau_rounded)
#print tau_level
#print str(tau_rounded)
#print tau_rounded
tau_rounded = max(1e-2, tau_rounded)
return float(str(tau_rounded))
def compose_functions(cls,
x,
number_of_compositions=1,
functions=(np.sin, np.exp, np.square, np.polyval,
np.tan, ),
clip_big_values=True,
clip_value=1e6):
"""Compose functions from an iterable of functions.
This is a helper function to cover a more real life scenario of
plottings.
Arguments:
x (numpy.array): array for which composed values will be computed.
number_of_compositions (int): number of compositions of functions.
functions (tuple): an iterable of functions.
clip_big_values (bool): whether or not to limit function extremes.
clip_value (float): limit values for function composition.
Returns:
y (numpy.array): array of composed functions
"""
i = 0
y = x
while i < number_of_compositions:
func = np.random.choice(functions)
if func == np.polyval:
n_coefs = np.random.randint(0, 10)
coefs = np.random.randint(-50, 50, size=n_coefs)
y = func(coefs, x)
else:
y = func(y)
if clip_big_values:
y = np.clip(y, -clip_value, clip_value)
i += 1
return y
# Goes with 'fast' parameters by default.
def plot_fit(self):
"""Plot the training data in X array along with decision boundary
"""
from matplotlib import pyplot as plt
x1 = np.linspace(self.table.min(), self.table.max(), 100)
#reverse self.theta as it requires coeffs from highest degree to constant term
x2 = np.polyval(np.poly1d(self.theta[::-1]),x1)
plt.plot(x1, x2, color='r', label='decision boundary');
plt.scatter(self.X[:, 1], self.X[:, 2], s=40, c=self.y, cmap=plt.cm.Spectral)
plt.legend()
plt.show()
def ployfit( y, x=None, order = 20 ):
'''
fit data (one-d array) by a ploynominal function
return the fitted one-d array
'''
if x is None:
x = range(len(y))
pol = np.polyfit(x, y, order)
return np.polyval(pol, x)
def construct_lanes(data, pts_per_lane):
poly_x, poly_y = data[:,:4], data[:,4:]
lane_x = np.vstack(
[np.polyval(poly_x[t], np.linspace(0, 50, pts_per_lane))
for t in xrange(poly_x.shape[0])])
lane_y = np.vstack(
[np.polyval(poly_y[t], np.linspace(0, 50, pts_per_lane))
for t in xrange(poly_y.shape[0])])
lane = np.hstack([lane_x, lane_y])
nnz = lane_x.sum(axis=1) > 0.
return lane, nnz
def test_polyfit(self):
c = np.array([3., 2., 1.])
x = np.linspace(0, 2, 7)
y = np.polyval(c, x)
err = [1, -1, 1, -1, 1, -1, 1]
weights = np.arange(8, 1, -1)**2/7.0
# check 1D case
m, cov = np.polyfit(x, y+err, 2, cov=True)
est = [3.8571, 0.2857, 1.619]
assert_almost_equal(est, m, decimal=4)
val0 = [[2.9388, -5.8776, 1.6327],
[-5.8776, 12.7347, -4.2449],
[1.6327, -4.2449, 2.3220]]
assert_almost_equal(val0, cov, decimal=4)
m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True)
assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
val = [[8.7929, -10.0103, 0.9756],
[-10.0103, 13.6134, -1.8178],
[0.9756, -1.8178, 0.6674]]
assert_almost_equal(val, cov2, decimal=4)
# check 2D (n,1) case
y = y[:, np.newaxis]
c = c[:, np.newaxis]
assert_almost_equal(c, np.polyfit(x, y, 2))
# check 2D (n,2) case
yy = np.concatenate((y, y), axis=1)
cc = np.concatenate((c, c), axis=1)
assert_almost_equal(cc, np.polyfit(x, yy, 2))
m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True)
assert_almost_equal(est, m[:, 0], decimal=4)
assert_almost_equal(est, m[:, 1], decimal=4)
assert_almost_equal(val0, cov[:, :, 0], decimal=4)
assert_almost_equal(val0, cov[:, :, 1], decimal=4)
def calculate(self, **state):
"""
Calculate the material physical property at the specified temperature
in the units specified by the object's 'property_units' property.
:param T: [K] temperature
:returns: physical property value
"""
super().calculate(**state)
return np.polyval(self._coeffs, state['T'])