def _integrate(self, y, f):
"""
Integrate `y` along axis=1, i.e. over freq axis for all T.
Parameters
----------
y : 2d array (nT, ndos) where nT = len(self.T), ndos = len(self.dos)
f : self.f, (len(self.dos),)
Returns
-------
array (nT,)
"""
mask = np.isnan(y)
if mask.any():
self._printwarn("HarmonicThermo._integrate: warning: "
" %i NaNs found in y!" %len(mask))
if self.fixnan:
self._printwarn("HarmonicThermo._integrate: warning: "
"fixing %i NaNs in y!" %len(mask))
y[mask] = self.nanfill
# this call signature works for scipy.integrate,{trapz,simps}
return self.integrator(y, x=f, axis=1)
python类simps()的实例源码
def calc_l1_norm_itae(meas_values, desired_values, step_width):
"""
Calculate the L1-Norm of the ITAE (Integral of Time-multiplied Absolute
value of Error).
Args:
step_width (float): Time difference between measurements.
desired_values (array-like): Desired values.
meas_values (array-like): Measured values.
"""
def e_func(_t):
_idx = np.floor_divide(_t, step_width).astype(int)
e = t * np.abs(desired_values[_idx, ..., 0]
- meas_values[_idx, ..., 0])
return e
t = np.array([x * step_width for x in range(len(desired_values))])
err = e_func(t)
l1norm_itae = simps(err, t)
return l1norm_itae
def calc_l1_norm_abs(meas_values, desired_values, step_width):
"""
Calculate the L1-Norm of the absolute error.
Args:
step_width (float): Time difference between measurements.
desired_values (array-like): Desired values.
meas_values (array-like): Measured values.
"""
def e_func(_t):
_idx = np.floor_divide(_t, step_width).astype(int)
e = np.abs(desired_values[_idx, ..., 0]
- meas_values[_idx, ..., 0])
return e
t = np.array([x * step_width for x in range(len(desired_values))])
err = e_func(t)
l1norm_abs = simps(err, t)
return l1norm_abs
def cumulative_log_smooth(self, x_min, x_max, npoints=200):
"""
This function ...
:param x_min:
:param x_max:
:param npoints:
:return:
"""
x_smooth, y_smooth = self.smooth_values_log(x_min, x_max, npoints)
# Normalize y by calculating the integral
#total = simps(y_smooth, x_smooth)
# NO, by calculating the sum (why?)
total = np.sum(y_smooth)
# Now, y should be normalized within x_min:x_max
y_smooth /= total
# Return the cumulative distribution
return x_smooth, np.cumsum(y_smooth)
# -----------------------------------------------------------------
def cumulative_log_smooth(self, x_min, x_max, npoints=200):
"""
This function ...
:param x_min:
:param x_max:
:param npoints:
:return:
"""
x_smooth, y_smooth = self.smooth_values_log(x_min, x_max, npoints)
# Normalize y by calculating the integral
#total = simps(y_smooth, x_smooth)
# NO, by calculating the sum (why?)
total = np.sum(y_smooth)
# Now, y should be normalized within x_min:x_max
y_smooth /= total
# Return the cumulative distribution
return x_smooth, np.cumsum(y_smooth)
# -----------------------------------------------------------------
def delta_star(y, v):
"""Compute the displacement thickness using Simpson's method.
Parameters
----------
y : ndarray
The independent variable.
v : ndarray
The velocity values.
Returns
-------
float
The value of the displacement thickness.
"""
return simps((1-v/v[-1]), x=y)
def momentum_thickness(y, v):
"""Compute the momentum thickness using Simpson's method.
Parameters
----------
y : ndarray
The independent variable.
v : ndarray
The velocity values.
Returns
-------
float
The value of the momentum thickness.
"""
return simps(v/v[-1]*(1-v/v[-1]), x=y)
def integrate(self, wavelength_grid):
"""
Integrate the spectrum flux over the specified grid of wavelengths.
Parameters
----------
wavelength_grid : quantity_like
Returns
-------
integrated_flux : :class:`~astropy.units.Quantity`
"""
grid = u.Quantity(wavelength_grid)
grid = grid.to(self.wavelength.unit)
interpolator = interp1d(self.wavelength.value, self.flux.value,
kind='cubic')
new_flux = interpolator(grid.value)
return simps(new_flux, x=grid.value) * self.flux.unit * grid.unit
def blade_elt_area(self, leaf_key, Lshape, Lwshape, sr_base, sr_top):
""" surface of a blade element, positioned with two relative curvilinear absisca"""
S=0
sr_base = min([1,max([0,sr_base])])
sr_top = min([1,max([sr_base,sr_top])])
if leaf_key is not None:
s,r = self.get_sr(leaf_key)
sre = [sr for sr in zip(s,r) if (sr_base < sr[0] < sr_top)]
if len(sre) > 0:
se,re = zip(*sre)
snew = [sr_base] + list(se) + [sr_top]
rnew = [numpy.interp(sr_base,s,r)] + list(re) + [numpy.interp(sr_top,s,r)]
else:
snew = [sr_base, sr_top]
rnew = [numpy.interp(sr_base,s,r), numpy.interp(sr_top,s,r)]
S = simps(rnew,snew) * Lshape * Lwshape
return S
def calculate_auc(self, y, dx):
""" calculate the Area Under the Curve using the average of the
estimated areas by the two composites the Simpsons's and the
trapezoidal rules.
"""
AUC = (simps(y, dx=dx) + trapz(y, dx=dx)) / 2.
return AUC
def simpson_area(sefl, y, dx):
""" Calculate the area under a curve using the composite Simpson's rule.
"""
return simps(y, dx=dx)
def _get_smoothed_histogram(self, chain, parameter):
data = chain.get_data(parameter)
smooth = chain.config["smooth"]
if chain.grid:
bins = get_grid_bins(data)
else:
bins = chain.config['bins']
bins, smooth = get_smoothed_bins(smooth, bins, data, chain.weights)
hist, edges = np.histogram(data, bins=bins, normed=True, weights=chain.weights)
edge_centers = 0.5 * (edges[1:] + edges[:-1])
xs = np.linspace(edge_centers[0], edge_centers[-1], 10000)
if smooth:
hist = gaussian_filter(hist, smooth, mode=self.parent._gauss_mode)
kde = chain.config["kde"]
if kde:
kde_xs = np.linspace(edge_centers[0], edge_centers[-1], max(200, int(bins.max())))
ys = MegKDE(data, chain.weights, factor=kde).evaluate(kde_xs)
area = simps(ys, x=kde_xs)
ys = ys / area
ys = interp1d(kde_xs, ys, kind="linear")(xs)
else:
ys = interp1d(edge_centers, hist, kind="linear")(xs)
cs = ys.cumsum()
cs /= cs.max()
return xs, ys, cs
def cumulative_smooth(self, x_min, x_max, npoints=200):
"""
This function ...
:param x_min:
:param x_max:
:param npoints:
:return:
"""
if self._cum_smooth is None:
x_smooth, y_smooth = self.smooth_values(x_min, x_max, npoints)
# Set negative values to zero
y_smooth[y_smooth < 0.0] = 0.0
# Normalize y by calculating the integral
#total = simps(y_smooth, x_smooth)
# NO, by calculating the sum (why?)
total = np.sum(y_smooth)
# Now, y should be normalized within x_min:x_max
y_smooth /= total
# Return the cumulative distribution
#return x_smooth, np.cumsum(y_smooth)
self._cum_smooth = (x_smooth, np.cumsum(y_smooth))
return self._cum_smooth
# -----------------------------------------------------------------
def convolveFilter2D(cube,wl,filter):
xaxis = len(cube[0,0,0:])
yaxis = len(cube[0,0:,0])
zaxis = len(cube[0:,0,0])
input = np.loadtxt(filter)
response_wl = input[:,0] * 1.e-4
response_trans = input[:,1]
minwl = response_wl[0]
maxwl = response_wl[len(response_wl)-1]
intwl = np.copy(wl)
intwl[intwl > maxwl] = -1
intwl[intwl < minwl] = -1
wlrange = intwl > 0
intwl = intwl[wlrange]
transmission = np.zeros(len(wl))
interpfunc = interpolate.interp1d(response_wl,response_trans, kind='linear')
transmission[wlrange] = interpfunc(intwl)
tot_trans = integrate.simps(transmission,wl)
slice = np.zeros((yaxis,xaxis))
for i in range(0,yaxis):
for j in range(0,xaxis):
sed = cube[0:,i,j]
slice[i,j] = integrate.simps(transmission*sed,wl)
return slice/tot_trans
#plt.plot(wl,transmission,'bo')
#plt.plot(wl,transmission,'k-')
#plt.plot(response_wl,response_trans,'r+')
#plt.xscale('log')
#plt.show()
def integrateHeating(cube,dustwls):
xaxis = len(cube[0,0,0:])
yaxis = len(cube[0,0:,0])
zaxis = len(cube[0:,0,0])
slice = np.zeros((yaxis,xaxis))
for i in range(0,yaxis):
for j in range(0,xaxis):
sed = cube[0:,i,j]
slice[i,j] = integrate.simps(sed,dustwls)
return slice
def convolveFilter(flux,wl,filter):
input = np.loadtxt(filter)
response_wl = input[:,0] * 1.e-4
response_trans = input[:,1]
minwl = response_wl[0]
maxwl = response_wl[len(response_wl)-1]
intwl = np.copy(wl)
intwl[intwl > maxwl] = -1
intwl[intwl < minwl] = -1
wlrange = intwl > 0
intwl = intwl[wlrange]
transmission = np.zeros(len(flux))
interpfunc = interpolate.interp1d(response_wl,response_trans, kind='linear')
transmission[wlrange] = interpfunc(intwl)
tot_trans = integrate.simps(transmission,wl)
tot_flux = integrate.simps(transmission*flux,wl)
return tot_flux/tot_trans
#plt.plot(wl,transmission,'bo')
#plt.plot(wl,transmission,'k-')
#plt.plot(response_wl,response_trans,'r+')
#plt.xscale('log')
#plt.show()
def cumulative_smooth(self, x_min, x_max, npoints=200):
"""
This function ...
:param x_min:
:param x_max:
:param npoints:
:return:
"""
if self._cum_smooth is None:
x_smooth, y_smooth = self.smooth_values(x_min, x_max, npoints)
# Set negative values to zero
y_smooth[y_smooth < 0.0] = 0.0
# Normalize y by calculating the integral
#total = simps(y_smooth, x_smooth)
# NO, by calculating the sum (why?)
total = np.sum(y_smooth)
# Now, y should be normalized within x_min:x_max
y_smooth /= total
# Return the cumulative distribution
#return x_smooth, np.cumsum(y_smooth)
self._cum_smooth = (x_smooth, np.cumsum(y_smooth))
return self._cum_smooth
# -----------------------------------------------------------------
def NPV_prob(rho,g,eff,H,delta_t,p,Q,mean,std,a,b):
t = np.arange(0,100000,0.5)
PV = simps(np.exp(-t*r)*rho*g*eff*H*delta_t*p/1000*EV_flow(mean,std,Q), t)
NPVal = PV - cost(a, b, Q)
return -1*NPVal
def NPV_prob(rho,g,eff,H,delta_t,p,Q,mean,std,a,b):
t = np.arange(0,100000,0.5)
PV = simps(np.exp(-t*r)*rho*g*eff*H*delta_t*p/1000*EV_flow(mean,std,Q), t)
NPVal = PV - cost(a, b, Q)
return NPVal
# declare matrices to store data
def NPV_prob(rho,g,eff,H,delta_t,p,Q,mean,std,a,b):
t = np.arange(0,100000,0.5)
PV = simps(np.exp(-t*r)*rho*g*eff*H*delta_t*p/1000*EV_flow(mean,std,Q), t)
NPVal = PV - cost(a, b, Q)
return -1*NPVal
def LMPressureGradientAvg(x_min,x_max,Ref,G,D,Tbubble,Tdew,C=None,satTransport=None):
"""
Returns the average pressure gradient between qualities of x_min and x_max.
To obtain the pressure gradient for a given value of x, pass it in as x_min and x_max
Required parameters:
* x_min : The minimum quality for the range [-]
* x_max : The maximum quality for the range [-]
* Ref : String with the refrigerant name
* G : Mass flux [kg/m^2/s]
* D : Diameter of tube [m]
* Tbubble : Bubblepoint temperature of refrigerant [K]
* Tdew : Dewpoint temperature of refrigerant [K]
Optional parameters:
* C : The coefficient in the pressure drop
* satTransport : A dictionary with the keys 'mu_f','mu_g,'v_f','v_g' for the saturation properties. So they can be calculated once and passed in for a slight improvement in efficiency
"""
def LMFunc(x):
dpdz,alpha=LockhartMartinelli(Ref,G,D,x,Tbubble,Tdew,C,satTransport)
return dpdz
## Use Simpson's Rule to calculate the average pressure gradient
## Can't use adapative quadrature since function is not sufficiently smooth
## Not clear why not sufficiently smooth at x>0.9
if x_min==x_max:
return LMFunc(x_min)
else:
#Calculate the tranport properties once
satTransport={}
satTransport['v_f']=1/PropsSI('D','T',Tbubble,'Q',0.0,Ref)
satTransport['v_g']=1/PropsSI('D','T',Tdew,'Q',1.0,Ref)
satTransport['mu_f']=PropsSI('V','T',Tbubble,'Q',0.0,Ref)
satTransport['mu_g']=PropsSI('V','T',Tdew,'Q',1.0,Ref)
xx=np.linspace(x_min,x_max,30)
DP=np.zeros_like(xx)
for i in range(len(xx)):
DP[i]=LMFunc(xx[i])
return -simps(DP,xx)/(x_max-x_min)
def LMPressureGradientAvg(x_min,x_max,Ref,G,D,Tbubble,Tdew,C=None,satTransport=None):
"""
Returns the average pressure gradient between qualities of x_min and x_max.
To obtain the pressure gradient for a given value of x, pass it in as x_min and x_max
Required parameters:
* x_min : The minimum quality for the range [-]
* x_max : The maximum quality for the range [-]
* Ref : String with the refrigerant name
* G : Mass flux [kg/m^2/s]
* D : Diameter of tube [m]
* Tbubble : Bubblepoint temperature of refrigerant [K]
* Tdew : Dewpoint temperature of refrigerant [K]
Optional parameters:
* C : The coefficient in the pressure drop
* satTransport : A dictionary with the keys 'mu_f','mu_g,'v_f','v_g' for the saturation properties. So they can be calculated once and passed in for a slight improvement in efficiency
"""
def LMFunc(x):
dpdz,alpha=LockhartMartinelli(Ref,G,D,x,Tbubble,Tdew,C,satTransport)
return dpdz
## Use Simpson's Rule to calculate the average pressure gradient
## Can't use adapative quadrature since function is not sufficiently smooth
## Not clear why not sufficiently smooth at x>0.9
if x_min==x_max:
return LMFunc(x_min)
else:
#Calculate the tranport properties once
satTransport={}
satTransport['v_f']=1/PropsSI('D','T',Tbubble,'Q',0.0,Ref)
satTransport['v_g']=1/PropsSI('D','T',Tdew,'Q',1.0,Ref)
satTransport['mu_f']=PropsSI('V','T',Tbubble,'Q',0.0,Ref)
satTransport['mu_g']=PropsSI('V','T',Tdew,'Q',1.0,Ref)
xx=np.linspace(x_min,x_max,30)
DP=np.zeros_like(xx)
for i in range(len(xx)):
DP[i]=LMFunc(xx[i])
return -simps(DP,xx)/(x_max-x_min)
def energy_radiated_approx2(self,trajectory, gamma, x, y, distance):
N = trajectory.nb_points()
n_chap = np.array([x - trajectory.x * codata.c, y - trajectory.y * codata.c, distance - trajectory.z * codata.c])
# R = np.zeros(n_chap.shape[1])
# for i in range(n_chap.shape[1]):
# R[i] = np.linalg.norm(n_chap[:, i])
# n_chap[:, i] /= R[i]
R = np.sqrt( n_chap[0]**2 + n_chap[1]**2 + n_chap[2]**2 )
n_chap[0,:] /= R
n_chap[1,:] /= R
n_chap[2,:] /= R
E = np.zeros((3,), dtype=np.complex)
integrand = np.zeros((3, N), dtype=np.complex)
A1 = (n_chap[0] * trajectory.a_x + n_chap[1] * trajectory.a_y + n_chap[2] * trajectory.a_z)
A2 = (n_chap[0] * (n_chap[0] - trajectory.v_x) + n_chap[1] * (n_chap[1] - trajectory.v_y)
+ n_chap[2] * (n_chap[2] - trajectory.v_z))
Alpha2 = np.exp(0. + 1j * self.photon_frequency * (trajectory.t + R / codata.c))
Alpha1 = (1.0 / (1.0 - n_chap[0] * trajectory.v_x
- n_chap[1] * trajectory.v_y - n_chap[2] * trajectory.v_z)) ** 2
integrand[0] += ((A1 * (n_chap[0] - trajectory.v_x) - A2 * trajectory.a_x)
) * Alpha2 * Alpha1
integrand[1] += ((A1 * (n_chap[1] - trajectory.v_y) - A2 * trajectory.a_y)
) * Alpha2 * Alpha1
integrand[2] += ((A1 * (n_chap[2] - trajectory.v_z) - A2 * trajectory.a_z)
) * Alpha2 * Alpha1
for k in range(3):
E[k] = np.trapz(integrand[k], trajectory.t)
#E[k] = integrate.simps(integrand[k], trajectory.t)
return E
def energy_radiated_approx(self, trajectory, gamma, x, y, distance):
N = trajectory.nb_points()
n_chap = np.array([x - trajectory.x * codata.c, y - trajectory.y * codata.c, distance - trajectory.z * codata.c])
R = np.sqrt( n_chap[0]**2 + n_chap[1]**2 + n_chap[2]**2 )
n_chap[0,:] /= R
n_chap[1,:] /= R
n_chap[2,:] /= R
E = np.zeros((3,), dtype=np.complex)
integrand = np.zeros((3, N), dtype=np.complex)
A1 = (n_chap[1] * trajectory.v_z - n_chap[2] * trajectory.v_y)
A2 = (-n_chap[0] * trajectory.v_z + n_chap[2] * trajectory.v_x)
A3 = (n_chap[0] * trajectory.v_y - n_chap[1] * trajectory.v_x)
Alpha2 = np.exp(0. + 1j * self.photon_frequency * (trajectory.t + R / codata.c))
integrand[0] -= (n_chap[1] * A3 - n_chap[2] * A2) * Alpha2
integrand[1] -= (- n_chap[0] * A3 + n_chap[2] * A1) * Alpha2
integrand[2] -= (n_chap[0] * A2 - n_chap[1] * A1) * Alpha2
for k in range(3):
E[k] = np.trapz(integrand[k], trajectory.t)
#E[k] = integrate.simps(integrand[k], trajectory.t)
E *= self.photon_frequency * 1j
terme_bord = np.full((3), 0. + 1j * 0., dtype=np.complex)
Alpha_1 = (1.0 / (1.0 - n_chap[0][-1] * trajectory.v_x[-1]
- n_chap[1][-1] * trajectory.v_y[-1] - n_chap[2][-1] * trajectory.v_z[-1]))
Alpha_0 = (1.0 / (1.0 - n_chap[0][0] * trajectory.v_x[0]
- n_chap[1][0] * trajectory.v_y[0] - n_chap[2][0] * trajectory.v_z[0]))
terme_bord += ((n_chap[1][-1] * A3[-1] - n_chap[2][-1] * A2[-1]) * Alpha_1 *
np.exp(1j * self.photon_frequency * (trajectory.t[-1] + R[-1] / codata.c)))
terme_bord -= ((n_chap[1][0] * A3[0] - n_chap[2][0] * A2[0]) * Alpha_0 *
np.exp(1j * self.photon_frequency * (trajectory.t[0] + R[0] / codata.c)))
E += terme_bord
return E
# energy radiated without the the far filed approxiamation
def integration(self,is_quadrant=0,use_flux_per_mrad2_or_mm2=1):
if (self.X is None or self.Y is None):
raise Exception(" X and Y must be array for integration")
if self.X.shape != self.Y.shape:
raise Exception(" X and Y must have the same shape")
if len(self.X.shape)==2 :
X = np.linspace(self.X.min(), self.X.max(), self.X.shape[0])
Y = np.linspace(self.Y.min(), self.Y.max(), self.Y.shape[1])
res1=integrate.trapz(self.intensity, X)
res = integrate.trapz(res1, Y)
# res = integrate.simps(integrate.simps(self.intensity, X), Y)
# res = self.intensity.sum() * (self.X[1,0] - self.X[0,0]) * (self.Y[0,1] - self.X[0,0]) # regular grid only
else : # X and Y are 1d array
if len(self.X) == 1:
res = self.intensity[0]
else: # choix arbitraire
XY = np.zeros_like(self.X)
for i in range(1, len(self.X)):
XY[i] = XY[i-1]+np.sqrt((self.X[i] - self.X[i-1]) * 2 + (self.Y[i] - self.Y[i-1]) ** 2)
res = np.trapz(self.intensity, XY)
# Note that the value of flux is in phot/s/0.1%bw/mrad2 (or .../mm2) and
# our grid is in rad (or m), therefore we must account for this:
if use_flux_per_mrad2_or_mm2:
res *= 1e6
# in case the calculation is for a quadrant, the integral is four times the calculated value
if is_quadrant:
res *= 4
return res
def hist_with_err(x, xerr, bins=None, normed=False, step=False, *kwargs):
from scipy import integrate
# check inputs
assert( len(x) == len(xerr) ), 'data size mismatch'
_x = np.asarray(x).astype(float)
_xerr = np.asarray(xerr).astype(float)
# def the evaluation points
if (bins is None) | (not hasattr(bins, '__iter__')):
m = (_x - _xerr).min()
M = (_x + _xerr).max()
dx = M - m
m -= 0.2 * dx
M += 0.2 * dx
if bins is not None:
N = int(bins)
else:
N = 10
_xp = np.linspace(m, M, N)
else:
_xp = 0.5 * (bins[1:] + bins[:-1])
def normal(v, mu, sig):
norm_pdf = 1. / (np.sqrt(2. * np.pi) * sig ) * np.exp( - ( (v - mu ) / (2. * sig) ) ** 2 )
return norm_pdf / integrate.simps(norm_pdf, v)
_yp = np.array([normal(_xp, xk, xerrk) for xk, xerrk in zip(_x, _xerr) ]).sum(axis=0)
if normed:
_yp /= integrate.simps(_yp, _xp)
if step:
return steppify(_xp, _yp)
else:
return _xp, _yp
def fit3(x, y, s, r, nb_points):
leaf, leaf_surface = fit2(x, y, s, r)
xn, yn, sn, rn = simplify(leaf, nb_points)
new_surface = simps(rn, sn)
scale_radius = leaf_surface / new_surface
rn *= scale_radius
return (xn, yn, sn, rn)
def form_factor(self):
"""
return form factor for each key in sr_db
"""
try:
return {k:simps(self.srdb[k]['r'], self.srdb[k]['s']) for k in self.srdb}
except TypeError:
return {k: simps(self.srdb[k][1], self.srdb[k][0]) for k in
self.srdb}
def integrate( self, age ):
# calculate range to integrate over
lower_bound = age - self.maxage
upper_bound = age - self.minage
# if upper_bound is negative then this is a later region and we are working on early ages
# If so, return zeros
if upper_bound < 0: return ( np.zeros( self.ls.size ), 0 )
# find things in the age range (include a little extra to account for rounding errors)
inds = np.where( (self.ages >= lower_bound-age*1e-5) & (self.ages <= upper_bound+age*1e-5) )[0]
# simpsons rule is based on intervals, so include the SED one age lower if it exists
# otherwise one interval will be missed at every boundary
if inds[0] > 0 and np.abs( self.ages[inds[0]] - lower_bound ) > 1e-5*age:
inds = np.append( inds[0]-1, inds )
weights = self.sfh_func( age-self.ages[inds] )
# if weights are all zero then there is no star formation in this region and therefore no need to integrate
if max( weights ) <= 0:
return ( np.zeros( self.ls.size ), 0 )
if self.has_dust:
# integrate weights*sed*dust
seds = integrate.simps( weights*self.seds[:,inds]*self.dust_func( self.ages[inds], self.ls ), x=self.ages[inds], even='avg' )
else:
# integrate weights*sed
seds = integrate.simps( weights*self.seds[:,inds], x=self.ages[inds], even='avg' )
# integrate weights*mass
mass = integrate.simps( weights*self.masses[inds], x=self.ages[inds], even='avg' ) if self.has_masses else 0
return ( seds, mass )
def norm_int(y, x, area=1.0, scale=True, func=simps):
"""Normalize integral area of y(x) to `area`.
Parameters
----------
x,y : numpy 1d arrays
area : float
scale : bool, optional
Scale x and y to the same order of magnitude before integration.
This may be necessary to avoid numerical trouble if x and y have very
different scales.
func : callable
Function to do integration (like scipy.integrate.{simps,trapz,...}
Called as ``func(y,x)``. Default: simps
Returns
-------
scaled y
Notes
-----
The argument order y,x might be confusing. x,y would be more natural but we
stick to the order used in the scipy.integrate routines.
"""
if scale:
fx = np.abs(x).max()
fy = np.abs(y).max()
sx = x / fx
sy = y / fy
else:
fx = fy = 1.0
sx, sy = x, y
# Area under unscaled y(x).
_area = func(sy, sx) * fx * fy
return y*area/_area