def nan_helper(y):
"""
Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= NP.interp(x(nans), x(~nans), y[~nans])
"""
# Source: http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
return NP.isnan(y), lambda z: z.nonzero()[0]
python类interp()的实例源码
def scale(value, src_min, src_max, dst_min, dst_max, round_=False):
"""
Scale a value from one range to another.
:param value: Input value
:param src_min: Min value of input range
:param src_max: Max value of input range
:param dst_min: Min value of output range
:param dst_max: Max value of output range
:param round_: True if the scale value should be rounded to an integer
:return: The scaled value
"""
scaled = interp(clamp(value, src_min, src_max), [src_min, src_max], [dst_min, dst_max])
if round_:
scaled = int(round(scaled))
return scaled
def predict(self, x):
"""
Predict labels for provided features.
Using a piecewise linear function.
1) If x exactly matches a boundary then associated prediction
is returned. In case there are multiple predictions with the
same boundary then one of them is returned. Which one is
undefined (same as java.util.Arrays.binarySearch).
2) If x is lower or higher than all boundaries then first or
last prediction is returned respectively. In case there are
multiple predictions with the same boundary then the lowest
or highest is returned respectively.
3) If x falls between two values in boundary array then
prediction is treated as piecewise linear function and
interpolated value is returned. In case there are multiple
values with the same boundary then the same rules as in 2)
are used.
:param x:
Feature or RDD of Features to be labeled.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
return np.interp(x, self.boundaries, self.predictions)
def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accumulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist()
def convolve(self, lam, flux):
'''
Convolve flux with normalized filter throughput.
:param array_like lam: High-res wavelength grid [:math:`\mu \mathrm{m}`]
:param array_like flux: High-res flux grid \
[:math:`\mathrm{W/m}^2 / \mu \mathrm{m}`]
:returns array_like F: Flux convolved with normalized throughput
'''
# interpolate filter throughout to HR grid
T = np.interp(lam, self.wl, self.throughput)
# Convolve with normalized throughput
F = np.sum(flux * T) / np.sum(T)
return F
def compute_Cv(T,Vmin,V,Cvib):
"""
This function computes the isocoric heat capacity as a function of temperature.
From *Cvib*, which is a matrix with *Cvib(T,V)* as from the harmonic calculations
determines the *Cv* at each temperature by linear interpolation between the values
at the two volumes closest to Vmin(T). Vmin(T) is from the minimization of F(V,T)
and *V* is the array of volumes used for it.
Returns *Cv(T)*.
Work in progress... for now it uses all volumes in the interpolation.
"""
Cv = np.zeros(len(T))
for iT in range(0,len(T)):
Cv_interpolated = np.interp(Vmin[iT], V, Cvib[iT,:])
Cv[iT] = Cv_interpolated
return Cv
def dose_to_volume(dvh, volume, *roi_volume):
# if an roi_volume is not given, volume is assumed to be fractional
if roi_volume:
if isinstance(roi_volume[0], basestring):
return 0
roi_volume = roi_volume[0]
else:
roi_volume = 1
dose_high = np.argmax(dvh < (volume / roi_volume))
y = volume / roi_volume
x_range = [dose_high - 1, dose_high]
y_range = [dvh[dose_high - 1], dvh[dose_high]]
dose = np.interp(y, y_range, x_range) * 0.01
return dose
def join_data(dates_list, data_list):
""" This functions makes heterogenous time series data align
with one time series axis
dates : list of date-lists
data : list of data-lists_lock
Returns:
dates, and data, but this time, data shares the same
date-points
"""
# first get all unique dates from every sublist and make one list out of them
rdates = sorted(list(set([date for sublist in dates_list for date in sublist])))
rdata = []
# go through each vector and interpolate data if necessary
for dates, data_vecs in zip(dates_list, data_list):
for data in data_vecs:
if len(data) > 0:
rdata.append(np.interp(rdates,dates, data).tolist())
else: # if data is empty, then just create a zero-length vector
rdata.append(np.zeros(len(rdates)))
return rdates, rdata
def power_curve_query(ws,TI,opt="normal_TI"):
import numpy as np
hub_height_ws = np.arange(3,13.5,0.5)
power_normal_TI = np.array([0,20,63,116,177,248,331,428,540,667,812,972,1141,1299,1448,1561,1633,1661,1677,1678,1680])
power_low_TI = np.array([0,18,61,114,174,244,325,421,532,657,801,961,1134,1304,1463,1585,1654,1675,1680,1680,1680])
power_high_TI = np.array([0,24,68,123,185,258,344,446,562,693,841,994,1148,1287,1419,1519,1589,1637,1665,1679,1680])
if "var_TI" not in opt:
if "normal_TI" in opt:
power = power_normal_TI
if "low_TI" in opt:
power = power_low_TI
if "high_TI" in opt:
power = power_high_TI
power_interp = np.interp(ws, hub_height_ws, power)
else:
from power_curve_query_func import power_curve_var_TI
power_interp = power_curve_var_TI(ws,TI)
return power_interp
def power_curve_var_TI(ws,TI):
import numpy as np
hub_height_ws = np.arange(3,13.5,0.5)
power_normal_TI = np.array([0,20,63,116,177,248,331,428,540,667,812,972,1141,1299,1448,1561,1633,1661,1677,1678,1680])
power_low_TI = np.array([0,18,61,114,174,244,325,421,532,657,801,961,1134,1304,1463,1585,1654,1675,1680,1680,1680])
power_high_TI = np.array([0,24,68,123,185,258,344,446,562,693,841,994,1148,1287,1419,1519,1589,1637,1665,1679,1680])
power_interp = np.zeros(len(ws))
power_interp[:] = np.nan
index = 0
for i,j in zip(ws,TI):
if j < 10:
power_interp[index] = np.interp(i, hub_height_ws, power_low_TI)
if j >= 10 and j < 15:
power_interp[index] = np.interp(i, hub_height_ws, power_normal_TI)
if j >= 15 and j < 20:
power_interp[index] = np.interp(i, hub_height_ws, power_high_TI)
index += 1
return power_interp
def _get_estimated_counts_all_names(self,
sex,
current_year=datetime.now().year,
minimum_age=0):
'''
:param sex: str, m or f for sex.
:param current_year: int, optional, defaults to current year
:param minimum_age: int, optional, defaults to 0
:return: pd.Series, with int indices indicating years of
birth, and estimated counts of total population with that name and birth year
'''
sex = self._check_and_normalize_gender(sex)
cur_df = (self._year_of_birth_df[
self._birth_year_df_mask(current_year=current_year,
first_name=None, minimum_age=minimum_age, sex=sex)
][['first_name', 'year_of_birth', 'count']])
year_stats = (self._mortality_df[self._mortality_df.as_of_year == current_year]
[['year_of_birth', sex + '_prob_alive']])
cur_df['prob_alive'] = np.interp(cur_df.year_of_birth,
year_stats.year_of_birth,
year_stats[sex + '_prob_alive'])
cur_df['estimated_count'] = cur_df['prob_alive'] * cur_df['count']
return cur_df # .set_index('year_of_birth')['estimated_count']
def decide(self, img_arr):
if config.camera.crop_top or config.camera.crop_bottom:
h, w, _ = img_arr.shape
t = config.camera.crop_top
l = h - config.camera.crop_bottom
img_arr = img_arr[t:l, :]
img_arr = np.interp(img_arr, config.camera.output_range,
config.model.input_range)
img_arr = np.expand_dims(img_arr, axis=0)
prediction = self.model.predict(img_arr)
if len(prediction) == 2:
yaw = methods.angle_to_yaw(prediction[0][0])
throttle = prediction[1][0]
else:
yaw = methods.angle_to_yaw(prediction[0][0])
throttle = 0
avf = config.model.yaw_average_factor
yaw = avf * self.yaw + (1.0 - avf) * yaw
self.yaw = yaw
return methods.yaw_to_angle(yaw), throttle
def Pbias(self,TES):
'''
find the Pbias at 90% Rn
'''
filterinfo=self.filterinfo(TES)
if filterinfo==None:return None
Rn_ratio=self.Rn_ratio(TES)
if not isinstance(Rn_ratio,np.ndarray):return None
istart,iend=self.selected_iv_curve(TES)
Rn_ratio=Rn_ratio[istart:iend]
Ptes=self.Ptes(TES)
Ptes=Ptes[istart:iend]
# check that Rn_ratio is increasing
increasing=np.diff(Rn_ratio).mean()
if increasing<0:
Pbias=np.interp(90., np.flip(Rn_ratio,0), np.flip(Ptes,0))
else:
Pbias=np.interp(90., Rn_ratio, Ptes)
return Pbias
def VIsmooth_ref(x):
#the size of EVIgood is 92*5760000, the size of the reference data is 46*5760000
x[x == -9999] = np.nan
EVIgood = x[0:92]
reference = np.concatenate([x[115:], x[92:], x[92:115]])
if np.sum(np.isnan(EVIgood)) == 92:
return np.concatenate([x[92:], x[23:69], x[92:]])
############################
#here require complicated algorithm
#first get the difference between these two
diff = EVIgood - reference
#fun = cdll.LoadLibrary(os.getcwd() + '/bise.so')
#outdiff = (c_double * len(EVIgood))()
#nans, y = nan_helper(diff)
#diff[nans] = np.interp(y(nans), y(~nans), diff[~nans])
diff[reference == 0] = 0
diff = pd.Series(diff)
reconstructVI = reference+diff.interpolate()
SGVI = savgol_filter(np.array(reconstructVI[23:69]), window_length=5, polyorder=3)
SGVI[SGVI < 0] = 0
return np.concatenate([SGVI, x[23:69], x[92:]])
def spectrum_analysis(model:Model.fem_model,n,spec):
"""
sepctrum analysis\n
n: number of modes to use\n
spec: a list of tuples (period,acceleration response)
"""
freq,mode=eigen_mode(model,n)
M_=np.dot(mode.T,model.M)
M_=np.dot(M_,mode)
K_=np.dot(mode.T,model.K)
K_=np.dot(K_,mode)
C_=np.dot(mode.T,model.C)
C_=np.dot(C_,mode)
d_=[]
for (m_,k_,c_) in zip(M_.diag(),K_.diag(),C_.diag()):
sdof=SDOFSystem(m_,k_)
T=sdof.omega_d()
d_.append(np.interp(T,spec[0],spec[1]*m_))
d=np.dot(d_,mode)
#CQC
return d
def __init__(self,alpha_max,Tg,xi):
gamma=0.9+(0.05-xi)/(0.3+6*xi)
eta1=0.02+(0.05-xi)/(4+32*xi)
eta1=eta1 if eta1>0 else 0
eta2=1+(0.05-xi)/(0.08+1.6*xi)
eta2=eta2 if eta2>0.55 else 0.55
T=np.linspace(0,6,601)
alpha=[]
for t in T:
if t<0.1:
alpha.append(np.interp(t,[0,0.1],[0.45*alpha_max,eta2*alpha_max]))
elif t<Tg:
alpha.append(eta2*alpha_max)
elif t<5*Tg:
alpha.append((Tg/t)**gamma*eta2*alpha_max)
else:
alpha.append((eta2*0.2**gamma-eta1*(t-5*Tg))*alpha_max)
self.__spectrum={'T':T,'alpha':alpha}
def _update_tsg_metrics(self, y_true, y_pred, prob):
self.tsg_gene_pred = pd.Series(y_pred, self.y.index)
self.tsg_gene_score = pd.Series(prob, self.y.index)
# compute metrics for classification
self.tsg_gene_count[self.num_pred] = sum(y_pred)
prec, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred)
tsg_col = 1 # column for metrics relate to tsg
self.tsg_precision[self.num_pred] = prec[tsg_col]
self.tsg_recall[self.num_pred] = recall[tsg_col]
self.tsg_f1_score[self.num_pred] = fscore[tsg_col]
self.logger.debug('Tsg Iter %d: Precission=%s, Recall=%s, f1_score=%s' % (
self.num_pred + 1, str(prec), str(recall), str(fscore)))
# compute ROC curve metrics
fpr, tpr, thresholds = metrics.roc_curve(y_true, prob)
self.tsg_tpr_array[self.num_pred, :] = interp(self.tsg_fpr_array, fpr, tpr)
#self.tsg_tpr_array[0] = 0.0
# compute Precision-Recall curve metrics
p, r, thresh = metrics.precision_recall_curve(y_true, prob)
p, r, thresh = p[::-1], r[::-1], thresh[::-1] # reverse order of results
self.tsg_precision_array[self.num_pred, :] = interp(self.tsg_recall_array, r, p)
def findlum(self,x):
"""
Given the input frequency in units of log(nu/Hz), returns the list [log(nu),
Lnu] with the frequency and luminosity closest to the specified value.
>>> lognu,lum=s.findlum(14)
will look for the freq. and lum. nearest nu=10^14 Hz
"""
# Performs interpolation before the search
if not hasattr(self, 'nlnui'): self.interp()
# Looks for the frequency
i=lsd.search(x,self.lognui) # index
return [self.lognui[i], self.nlnui[i]/self.nui[i]]
def ion(self):
"""
Calculates the rate of ionizing photons in the SED.
>>> q=s.ion()
"""
import scipy.integrate
import scipy.stats
h=6.62607e-27 # Planck constant in CGS
# Performs interpolation before integrating. This is a precaution in
# case the user specify weird integration limits.
if not hasattr(self, 'nlnui'): self.interp()
# 13.6 eV - "infty"
xi, xf = 15.52, 22.
# Gets only the elements corresponding to ionizing frequencies
i=numpy.where((self.lognui>=xi) & (self.lognui<=xf))
x,y = self.lognui[i],self.lli[i] # easier notation
# Calculates ionizing rate using integration (trapezoidal rule)
q=scipy.integrate.trapz(self.nlnui[i]/self.nui[i]/(h*self.nui[i]), self.nui[i])
return q
def sum(seds):
"""
Given a list of SEDs previously interpolated in the same binning,
sums their luminosities and returns a SED object with the sum.
>>> ss=sum([s,s1,s2])
returns the SED ss <- [lognu, s+s1+s2],
where s+s1+s2 -> log10[nuLnu(s)+nuLnu(s1)+nuLnu(s2)], lognu being the common
units of frequency for the SEDs after interpolation.
The method is designed to automatically call the interp method for each SED
if needed.
"""
# Precaution in case the user did not use the interp method
seds[0].interp(seds)
sums=numpy.zeros_like(seds[0].lognui) # initializes the sum
for sed in seds:
sums=sums+sed.nlnui
return SED(lognu=seds[0].lognui, ll=numpy.log10(sums), logfmt=1)
def _soviet_summary(actual_yield, scale_range):
keys = list(_soviet_summary_x.keys())
keys.sort() # keys may be returned in arbitrary order
if keys[-1] < actual_yield or actual_yield < keys[0]:
raise ValueOutsideGraphError(actual_yield)
for k in range(len(keys)):
k1 = keys[k]
k2 = keys[k + 1]
if k1 <= actual_yield <= k2:
xs1 = _soviet_summary_x[k1]
ys1 = _soviet_summary_y[k1]
xs2 = _soviet_summary_x[k2]
ys2 = _soviet_summary_y[k2]
if xs1[-1] < scale_range or scale_range < xs1[0] or xs2[-1] < scale_range or scale_range < xs2[0]:
raise ValueOutsideGraphError(scale_range)
y1 = np.interp(scale_range, xs1, ys1)
y2 = np.interp(scale_range, xs2, ys2)
return 10**np.interp(actual_yield, [k1, k2], [y1, y2])
def _soviet_neutron(actual_yield, scale_range):
keys = list(_soviet_neutron_x.keys())
keys.sort() # keys may be returned in arbitrary order
if keys[-1] < actual_yield or actual_yield < keys[0]:
raise ValueOutsideGraphError(actual_yield)
for k in range(len(keys)):
k1 = keys[k]
k2 = keys[k + 1]
if k1 <= actual_yield <= k2:
xs1 = _soviet_neutron_x[k1]
ys1 = _soviet_neutron_y[k1]
xs2 = _soviet_neutron_x[k2]
ys2 = _soviet_neutron_y[k2]
if xs1[-1] < scale_range or scale_range < xs1[0] or xs2[-1] < scale_range or scale_range < xs2[0]:
raise ValueOutsideGraphError(scale_range)
y1 = np.interp(scale_range, xs1, ys1)
y2 = np.interp(scale_range, xs2, ys2)
return 10**np.interp(actual_yield, [k1, k2], [y1, y2])
def _soviet_gamma(actual_yield, scale_range):
keys = list(_soviet_gamma_x.keys())
keys.sort() # keys may be returned in arbitrary order
if keys[-1] < actual_yield or actual_yield < keys[0]:
raise ValueOutsideGraphError(actual_yield)
for k in range(len(keys)):
k1 = keys[k]
k2 = keys[k + 1]
if k1 <= actual_yield <= k2:
xs1 = _soviet_gamma_x[k1]
ys1 = _soviet_gamma_y[k1]
xs2 = _soviet_gamma_x[k2]
ys2 = _soviet_gamma_y[k2]
if xs1[-1] < scale_range or scale_range < xs1[0] or xs2[-1] < scale_range or scale_range < xs2[0]:
raise ValueOutsideGraphError(scale_range)
y1 = np.interp(scale_range, xs1, ys1)
y2 = np.interp(scale_range, xs2, ys2)
return 10**np.interp(actual_yield, [k1, k2], [y1, y2])
# These functions adjust doses on the basis of the season-dependent scales
# found in the graphs
def __call__(self):
# Read in the ds
ds = load(self.data_file)
ds.setup_deprecated_fields()
exact = self.get_analytical_solution()
ad = ds.all_data()
position = ad['x']
for k in self.fields:
field = ad[k].d
for xmin, xmax in zip(self.left_edges, self.right_edges):
mask = (position >= xmin)*(position <= xmax)
exact_field = np.interp(position[mask], exact['pos'], exact[k])
myname = "ShockTubeTest_%s" % k
# yield test vs analytical solution
yield AssertWrapper(myname, assert_allclose, field[mask],
exact_field, self.rtol, self.atol)
def interpolate_ages(data, file_stars, interp_tb=None, interp_ages=None,
current_time=None):
if interp_tb is None:
t_stars, a_stars = read_star_field(file_stars,
field="t_stars")
# timestamp of file should match amr timestamp
if current_time:
tdiff = YTQuantity(b2t(t_stars), 'Gyr') - current_time.in_units('Gyr')
if np.abs(tdiff) > 1e-4:
mylog.info("Timestamp mismatch in star " +
"particle header: %s", tdiff)
mylog.info("Interpolating ages")
interp_tb, interp_ages = b2t(data)
interp_tb = YTArray(interp_tb, 'Gyr')
interp_ages = YTArray(interp_ages, 'Gyr')
temp = np.interp(data, interp_tb, interp_ages)
return interp_tb, interp_ages, temp
def magic3(self):
time_end = 5123
real_hofong_fix_pts = pd.read_csv('20161012_HoFong/control_points_coodination.csv').sort(ascending=False)
real_hofong_fix_pts['N'] = real_hofong_fix_pts['N'] - real_hofong_fix_pts['N'][129]
real_hofong_fix_pts['E'] = real_hofong_fix_pts['E'] - real_hofong_fix_pts['E'][129] # last data name=2717, index=129
N_diff = np.diff(real_hofong_fix_pts['N'])
E_diff = np.diff(real_hofong_fix_pts['E'])
hofong_deg = np.rad2deg(np.arctan2(N_diff, E_diff))
hofong_deg = hofong_deg - hofong_deg[0]
hofong_deg_diff = np.cumsum(np.diff(hofong_deg))
interp_hofong = np.interp(np.arange(100), np.arange(hofong_deg_diff.size), hofong_deg_diff)
#plt.plot(hofong_deg, label='hahaxd')
#plt.plot(hofong_deg_diff, label='hehexd')
plt.plot(interp_hofong)
plt.legend()
plt.show()
def generate_dist_per_sec(self):
time_end= int(np.amax(self.raw_data['time']))
#===== acc =====
#??? x, y ????????????????????????
ax_interp_10ms = self.acc_normalize(np.interp(np.arange(0.0,time_end,0.01), self.raw_data['time'], self.raw_data['ax']))
ay_interp_10ms = self.acc_normalize(np.interp(np.arange(0.0,time_end,0.01), self.raw_data['time'], self.raw_data['ay']))
rxy_interp_10ms = np.sqrt(ax_interp_10ms**2 + ay_interp_10ms**2)
plt.plot(ax_interp_10ms, c='b')
plt.plot(ay_interp_10ms, c='g')
plt.plot(self.detrend_1d(rxy_interp_10ms, time_lst=np.arange(0.0,time_end,0.01)), c='k')
plt.show()
axy, vxy, sxy = self.another_integral(rxy_interp_10ms, time_lst= np.arange(0.0,time_end,0.01))
return axy, vxy, sxy
def convolve(self, wavelengths, densities):
# define short names for the involved wavelength grids
wa = wavelengths
wb = self._Wavelengths
# create a combined wavelength grid, restricted to the overlapping interval
w1 = wa[ (wa>=wb[0]) & (wa<=wb[-1]) ]
w2 = wb[ (wb>=wa[0]) & (wb<=wa[-1]) ]
w = np.unique(np.hstack((w1,w2)))
if len(w) < 2: return 0
# log-log interpolate SED and transmission on the combined wavelength grid
# (use scipy interpolation function for SED because np.interp does not support broadcasting)
F = np.exp(interp1d(np.log(wa), _log(densities), copy=False, bounds_error=False, fill_value=0.)(np.log(w)))
T = np.exp(np.interp(np.log(w), np.log(wb), _log(self._Transmission), left=0., right=0.))
# perform the integration
if self._PhotonCounter:
return np.trapz(x=w, y=w*F*T) / self._IntegratedTransmission
else:
return np.trapz(x=w, y=F*T) / self._IntegratedTransmission
## This function calculates and returns the integrated value for a given spectral energy distribution over the
# filter's wavelength range,
def integrate(self, wavelengths, densities):
# define short names for the involved wavelength grids
wa = wavelengths
wb = self._Wavelengths
# create a combined wavelength grid, restricted to the overlapping interval
w1 = wa[(wa >= wb[0]) & (wa <= wb[-1])]
w2 = wb[(wb >= wa[0]) & (wb <= wa[-1])]
w = np.unique(np.hstack((w1, w2)))
if len(w) < 2: return 0
# log-log interpolate SED and transmission on the combined wavelength grid
# (use scipy interpolation function for SED because np.interp does not support broadcasting)
F = np.exp(interp1d(np.log(wa), _log(densities), copy=False, bounds_error=False, fill_value=0.)(np.log(w)))
T = np.exp(np.interp(np.log(w), np.log(wb), _log(self._Transmission), left=0., right=0.))
# perform the integration
if self._PhotonCounter: return np.trapz(x=w, y=w * F * T)
else: return np.trapz(x=w, y=F * T)
## This private helper function returns the natural logarithm for positive values, and a large negative number
# (but not infinity) for zero or negative values.
def convolve(self, wavelengths, densities):
# define short names for the involved wavelength grids
wa = wavelengths
wb = self._Wavelengths
# create a combined wavelength grid, restricted to the overlapping interval
w1 = wa[ (wa>=wb[0]) & (wa<=wb[-1]) ]
w2 = wb[ (wb>=wa[0]) & (wb<=wa[-1]) ]
w = np.unique(np.hstack((w1,w2)))
if len(w) < 2: return 0
# log-log interpolate SED and transmission on the combined wavelength grid
# (use scipy interpolation function for SED because np.interp does not support broadcasting)
F = np.exp(interp1d(np.log(wa), _log(densities), copy=False, bounds_error=False, fill_value=0.)(np.log(w)))
T = np.exp(np.interp(np.log(w), np.log(wb), _log(self._Transmission), left=0., right=0.))
# perform the integration
if self._PhotonCounter:
return np.trapz(x=w, y=w*F*T) / self._IntegratedTransmission
else:
return np.trapz(x=w, y=F*T) / self._IntegratedTransmission
## This function calculates and returns the integrated value for a given spectral energy distribution over the
# filter's wavelength range,