def min_on_call(F1, F2, dv1, dv2, rho, K1, K2, T):
v1 = dv1 * np.sqrt(T)
v2 = dv2 * np.sqrt(T)
def int_func1(x):
return scipy.stats.norm.cdf(((F1-K1)-(F2-K2) + (v1 * rho - v2) * x)/(v1 * np.sqrt(1-rho**2))) \
* (v2 * x + F2- K2) * scipy.stats.norm.pdf(x)
def int_func2(x):
return scipy.stats.norm.cdf(((F2-K2)-(F1-K1) + (v2 * rho - v1) * x)/(v2 * np.sqrt(1-rho**2))) \
* (v1 * x + F1- K1) * scipy.stats.norm.pdf(x)
res1 = quad(int_func1, (K2-F2)/v2, np.inf)
res2 = quad(int_func2, (K1-F1)/v1, np.inf)
return res1[0] + res2[0]
python类quad()的实例源码
def __init__(self, mcmc_samples, bin_edges):
"""
Histogram Inference a la Dan Foreman-Mackey
Parameters:
===========
- mcmc_samples: numpy array of shape (Nobs, Nsamples)
MCMC samples for the thing you want to histogram
- bin_edges: numpy.ndarray array
The edges of the histogram bins to use.
"""
self.mcmc_samples = mcmc_samples
self.bin_edges = bin_edges
self.bin_centers = (self.bin_edges[:-1] + self.bin_edges[1:]) / 2
self.bin_widths = np.diff(self.bin_edges)
self.Nbins = self.bin_widths.size
self.Nobs = self.mcmc_samples.shape[0]
# Find which bin each q falls in
self.bin_idx = np.digitize(self.mcmc_samples, self.bin_edges) - 1
# Determine the censoring function for each bin (used in the integral)
self.censor_integrals = np.array([quad(func=self.censoring_fcn,
a=left, b=right)[0] for (left, right) in
zip(self.bin_edges[:-1], self.bin_edges[1:])])
# Set values needed for multinest fitting
self.n_params = self.Nbins
self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)]
def integrate_gauss(x1, x2, amp, mean, sigma):
"""
Integrate a gaussian between the points x1 and x2
"""
gauss = lambda x, A, mu, sig: A*np.exp(-(x-mu)**2 / (2.0*sig**2))
if x1 < -1e6:
x1 = -np.inf
if x2 > 1e6:
x2 = np.inf
result = quad(gauss, x1, x2, args=(amp, mean, sigma))
return result[0]
def _sanity_test(self):
# Marginal of x integrates to one.
print quad(lambda x: np.exp(self.logpdf_x(x)), self.D[0], self.D[1])
# Marginal of y integrates to one.
print quad(lambda y: np.exp(self.logpdf_y(y)), -1 ,1)
# Joint of x,y integrates to one; quadrature will fail for small noise.
print dblquad(
lambda y,x: np.exp(self.logpdf_xy(x,y)), self.D[0], self.D[1],
lambda x: -1, lambda x: 1)
def NPV(rho,g,eff,H,q_process,delta_t,p,Q,a,b):
integrand = lambda t: np.exp(-r*t)*power(rho,g,eff,H,q_process)*delta_t*p
PV = quad(integrand, 0, np.inf)
NPVal = PV[0] - cost(a, b, Q)
return NPVal
# check the function, it does not look right!!!
def EV_flow(mean,std,Q):
EV = quad(lambda x: lognorm_pdf(x,mean,std)*x, 0, Q)
EV_2 = (1 - lognorm_cdf(Q,mean,std))*Q
EV_total = EV[0] + EV_2
return EV_total
def NPV(rho,g,eff,H,q_process,delta_t,p,Q,a,b):
integrand = lambda t: np.exp(-r*t)*power(rho,g,eff,H,q_process)*delta_t*p
PV = quad(integrand, 0, np.inf)
NPVal = PV[0] - cost(a, b, Q)
return NPVal
def EV_flow(mean,std,Q):
EV = quad(lambda x: lognorm_pdf(x,mean,std)*x, 0, Q)
EV_2 = (1 - lognorm_cdf(Q,mean,std))*Q
EV_total = EV[0] + EV_2
return EV_total
def NPV(rho,g,eff,H,q_process,delta_t,p,Q,a,b):
integrand = lambda t: np.exp(-r*t)*power(rho,g,eff,H,q_process)*delta_t*p
PV = quad(integrand, 0, np.inf)
NPVal = PV[0] - cost(a, b, Q)
return NPVal
def Bertsch_MC_Average(x_min,x_max,Ref,G,Dh,q_flux,L,TsatL,TsatV):
'''
Returns the average heat transfer coefficient
between qualities of x_min and x_max.
for Bertsch two-phase evaporation in mico-channel HX
'''
if not x_min==x_max:
#A proper range is given
return quad(Bertsch_MC,x_min,x_max,args=(Ref,G,Dh,q_flux,L))[0]/(x_max-x_min)
else:
#A single value is given
return Bertsch_MC(x_min,Ref,G,Dh,q_flux,L,TsatL,TsatV)
def Bertsch_MC_Average(x_min,x_max,Ref,G,Dh,q_flux,L,TsatL,TsatV):
'''
Returns the average heat transfer coefficient
between qualities of x_min and x_max.
for Bertsch two-phase evaporation in mico-channel HX
'''
if not x_min==x_max:
#A proper range is given
return quad(Bertsch_MC,x_min,x_max,args=(Ref,G,Dh,q_flux,L))[0]/(x_max-x_min)
else:
#A single value is given
return Bertsch_MC(x_min,Ref,G,Dh,q_flux,L,TsatL,TsatV)
def consumer_surp(self):
"Compute consumer surplus"
# == Compute area under inverse demand function == #
integrand = lambda x: (self.ad/self.bd) - (1/self.bd)* x
area, error = quad(integrand, 0, self.quantity())
return area - self.price() * self.quantity()
def producer_surp(self):
"Compute producer surplus"
# == Compute area above inverse supply curve, excluding tax == #
integrand = lambda x: -(self.az/self.bz) + (1/self.bz) * x
area, error = quad(integrand, 0, self.quantity())
return (self.price() - self.tax) * self.quantity() - area
def __init__(self, ap_paramList, ra1,dec1, ra2, dec2, background_density, z):
'''
@param ap_paramList[seed]: Seed for random number generator
@param ra1: Left right ascension
@param dec1: Bottom declination
@param ra2: Right right ascension
@param dec2: Top declination
@param background_density: galaxy background density in galaxies/square degree
@param z: Redshift of galaxy cluster
'''
self.ra1 = ra1
self.dec1 = dec1
self.ra2 = ra2
self.dec2 = dec2
self.background_density = background_density
self.z = z
self.__H0 = 70
self.__Omega_m = 0.3
self.__R0 = 2
self.__Rs = 0.15 / 0.7
self.__Rcore = 0.1 / 0.7
self.__norm = 1/quad(nfw,0,self.__R0,args=(1,self.__Rs,self.__Rcore))[0]
super(CatalogGenerator, self).__init__(ap_paramList)
def nfw_cumulative(self,R):
'''
Cumulative radial NFW distribution
@param R: Radius
@return float: Probability of being within R
'''
R0 = self.__R0
norm = self.__norm
Rs = self.__Rs
Rcore = self.__Rcore
def integrate(z):
return quad(nfw,0,z,args=(norm,Rs,Rcore))[0]
if np.isscalar(R):
R = np.array([float(R)])
else:
R = R.astype(np.float)
res = np.piecewise(R, [R < 0.0, np.logical_and(R >= 0.0, R < R0), R >= R0],
[lambda z: z,
lambda z: np.fromiter(map(integrate,z),np.float),
lambda z: z/R0])
if len(res) == 1:
return res[0]
else:
return res
def get_Kstar_max(self):
""" value of K*(dimensionless vortex constant) for which weight flow in maximum
See also:
NASA TN D-4421 eq. 23-25
"""
max_val = 0
while(1):
try:
f = lambda Mstar: (1 - (max_val/self.Mlstar)**2 * Mstar**2) ** (1/(self.gamma - 1))
integrate.quad(f,self.Mlstar,self.Mustar)
except:
max_val -= 0.1
break
max_val += 0.1
def func(Kstar_max):
a1 = (1 - Kstar_max**2)**(1/(self.gamma -1))
a2 = (1 - (Kstar_max**2) * (self.Mustar/self.Mlstar)**2)
a3 = (1/(self.gamma - 1))
# This if is to avoid the bug in python3 it self
if(a2<0):
a2 = ((Kstar_max**2) * (self.Mustar/self.Mlstar)**2 - 1)
a = a1 + a2**a3
else :
a = a1 - a2 ** a3
f = lambda Mstar: (1 - (Kstar_max/self.Mlstar)**2 * Mstar**2) ** (1/(self.gamma - 1))
b = integrate.quad(f,self.Mlstar,self.Mustar)
return a - b[0]
Kstar_max = optimize.brentq(func,0.1,max_val)
return Kstar_max
def get_C(self):
""" get reduction in maximum weight flow due to two-dimensional flow
See also:
NASA TN D-4421 eq. 34b
"""
Kstar_max = self.get_Kstar_max()
a = 1 - np.sqrt((self.gamma + 1)/(self.gamma - 1)) * ((self.gamma + 1) / 2)**(1 / (self.gamma - 1)) * (self.Mustar / (self.Mustar - self.Mlstar))
f = lambda Mstar: Kstar_max * (1 - (Kstar_max / self.Mlstar)**2 * Mstar**2)**(1 / (self.gamma - 1))
b = integrate.quad(f, self.Mlstar, self.Mustar)
C = a * b[0]
return C
def get_Q(self):
""" get vortex flow parameter Q
See also:
NASA TN D-4421 eq. 34a
"""
f = lambda Mstar: ((self.gamma + 1) / 2 - (self.gamma - 1) / 2 * Mstar**2)**(1 / (self.gamma - 1))
a = integrate.quad(f, self.Mlstar, self.Mustar)
Q = (self.Mlstar * self.Mustar) / (self.Mustar - self.Mlstar) * a[0]
return Q
def maximal_age(z):
z = np.double(z)
#Cosmological Constants
O_m = 0.266
O_r = 0.
O_k= 0.
O_L = 1. - O_m
H_0 = 74.3 #km/s/Mpc
H_sec = H_0 / 3.0857e19
secondsinyear = 31556926
ageoftheuniverse = 13.798e9
# Equation for the time elapsed since z and now
a = 1/(1+z)
E = O_m * (1+z)**3 + O_r *(1+z)**4 + O_k *(1+z) + O_L
integrand = lambda z : 1 / (1+z) / sqrt( O_m * (1+z)**3 + O_r *(1+z)**4 + O_k *(1+z) + O_L )
#Integration
z_obs = z
z_cmb = 1089 #As Beta (not cmb). But 1089 (cmb) would be the exagerated maximun possible redshift for the birth
z_now = 0
integral, error = quad( integrand , z_obs, z_cmb) #
#t = ageoftheuniverse - (integral * (1 / H_sec) / secondsinyear)
t = (integral * (1 / H_sec)) / secondsinyear
return t
def z2Dlum(z):
"""
Calculate luminosity distance from redshift.
"""
#Cosmo Constants
O_m = 0.266
O_r = 0.
O_k= 0.
O_L = 1. - O_m
H_0 = 70. #km/s/Mpc
H_sec = H_0 / 3.0857e19
# equation
a = 1/(1+z)
E = O_m * (1+z)**3 + O_r *(1+z)**4 + O_k *(1+z) + O_L
integrand = lambda z : 1 / sqrt(O_m * (1+z)**3 + O_r *(1+z)**4 + O_k *(1+z) + O_L)
#integration
z_obs = z
z_now = 0
c_cm = 2.997e10
integral = quad( integrand , z_now, z_obs)
dlum_cm = (1+z)*c_cm/ H_sec * integral[0]
dlum_Mpc = dlum_cm/3.08567758e24
return dlum_cm