def compute_log_lik_exp(self, m, v, y):
if m.ndim == 2:
gh_x, gh_w = self._gh_points(GH_DEGREE)
gh_x = gh_x[:, np.newaxis, np.newaxis]
gh_w = gh_w[:, np.newaxis, np.newaxis] / np.sqrt(np.pi)
v_expand = v[np.newaxis, :, :]
m_expand = m[np.newaxis, :, :]
ts = gh_x * np.sqrt(2 * v_expand) + m_expand
logcdfs = norm.logcdf(ts * y)
prods = gh_w * logcdfs
loglik = np.sum(prods)
pdfs = norm.pdf(ts * y)
cdfs = norm.cdf(ts * y)
grad_cdfs = y * gh_w * pdfs / cdfs
dts_dm = 1
dts_dv = 0.5 * gh_x * np.sqrt(2 / v_expand)
dm = np.sum(grad_cdfs * dts_dm, axis=0)
dv = np.sum(grad_cdfs * dts_dv, axis=0)
else:
gh_x, gh_w = self._gh_points(GH_DEGREE)
gh_x = gh_x[:, np.newaxis, np.newaxis, np.newaxis]
gh_w = gh_w[:, np.newaxis, np.newaxis, np.newaxis] / np.sqrt(np.pi)
v_expand = v[np.newaxis, :, :, :]
m_expand = m[np.newaxis, :, :, :]
ts = gh_x * np.sqrt(2 * v_expand) + m_expand
logcdfs = norm.logcdf(ts * y)
prods = gh_w * logcdfs
prods_mean = np.mean(prods, axis=1)
loglik = np.sum(prods_mean)
pdfs = norm.pdf(ts * y)
cdfs = norm.cdf(ts * y)
grad_cdfs = y * gh_w * pdfs / cdfs
dts_dm = 1
dts_dv = 0.5 * gh_x * np.sqrt(2 / v_expand)
dm = np.sum(grad_cdfs * dts_dm, axis=0) / m.shape[0]
dv = np.sum(grad_cdfs * dts_dv, axis=0) / m.shape[0]
return loglik, dm, dv
python类logcdf()的实例源码
def __init__(self, K, Y, init=None, threshold=1e-9):
N = np.shape(K)[0]
f = np.zeros((N,1))
converged = False
k = 0
innerC = 0
for i in xrange(N):
pdfDiff = norm.logpdf(f) - norm.logcdf(Y*f)
W = np.exp(2*pdfDiff) + Y*f*np.exp(pdfDiff)
Wsqrt = np.sqrt(W)
Wdiag= np.diag(Wsqrt.flatten())
B = np.identity(N) + np.dot(Wdiag, np.dot(K, Wdiag))
grad = Y*np.exp(pdfDiff)
b = W*f + grad
interim = np.dot(Wdiag, np.dot(K, b))
cgRes = Cg(B, interim, threshold=threshold)
s1 = cgRes.result
innerC = innerC + cgRes.iterations
a = b - Wsqrt*s1
if(converged):
break
f_prev = f
f = np.dot(K, a)
diff = f - f_prev
if (np.dot(diff.T,diff).flatten() < threshold*N or innerC>15000):
converged = True
k = k+1
self.result = f
self.iterations = k + innerC
def __init__(self, K, Y, P, init=None, threshold=1e-9, precon=None):
N = np.shape(K)[0]
f = np.zeros((N,1))
converged = False
k = 0
innerC = 0
for i in xrange(N):
pdfDiff = norm.logpdf(f) - norm.logcdf(Y*f)
W = np.exp(2*pdfDiff) + Y*f*np.exp(pdfDiff)
Wsqrt = np.sqrt(W)
Wdiag= np.diag(Wsqrt.flatten())
B = np.identity(N) + np.dot(Wdiag, np.dot(K, Wdiag))
grad = Y*np.exp(pdfDiff)
b = W*f + grad
interim = np.dot(Wdiag, np.dot(K, b))
pcgRes = RegularPcg(B, interim, None, threshold=threshold, preconInv=P.get_laplace_inversion(W,Wsqrt))
s1 = pcgRes.result
innerC = innerC + pcgRes.iterations
a = b - Wsqrt*s1
if(converged):
break
f_prev = f
f = np.dot(K, a)
diff = f - f_prev
if (np.dot(diff.T,diff).flatten() < threshold*N or innerC>15000):
converged = True
k = k+1
self.result = f
self.iterations = k + innerC
def __init__(self, K, Y, init=None, threshold=1e-9):
N = np.shape(K)[0]
f = np.zeros((N,1))
converged = False
k = 0
innerC = 0
for i in xrange(N):
pdfDiff = norm.logpdf(f) - norm.logcdf(Y*f)
W = np.exp(2*pdfDiff) + Y*f*np.exp(pdfDiff)
Wsqrt = np.sqrt(W)
Wdiag= np.diag(Wsqrt.flatten())
B = np.identity(N) + np.dot(Wdiag, np.dot(K, Wdiag))
grad = Y*np.exp(pdfDiff)
b = W*f + grad
interim = np.dot(Wdiag, np.dot(K, b))
cgRes = Cg(B, interim, threshold=threshold)
s1 = cgRes.result
innerC = innerC + cgRes.iterations
a = b - Wsqrt*s1
if(converged):
break
f_prev = f
f = np.dot(K, a)
diff = f - f_prev
if (np.dot(diff.T,diff).flatten() < threshold*N or innerC>15000):
converged = True
k = k+1
self.result = f
self.iterations = k + innerC
def __init__(self, K, Y, P, init=None, threshold=1e-9, precon=None):
N = np.shape(K)[0]
f = np.zeros((N,1))
converged = False
k = 0
innerC = 0
for i in xrange(N):
pdfDiff = norm.logpdf(f) - norm.logcdf(Y*f)
W = np.exp(2*pdfDiff) + Y*f*np.exp(pdfDiff)
Wsqrt = np.sqrt(W)
Wdiag= np.diag(Wsqrt.flatten())
B = np.identity(N) + np.dot(Wdiag, np.dot(K, Wdiag))
grad = Y*np.exp(pdfDiff)
b = W*f + grad
interim = np.dot(Wdiag, np.dot(K, b))
pcgRes = RegularPcg(B, interim, None, threshold=threshold, preconInv=P.get_laplace_inversion(W,Wsqrt))
s1 = pcgRes.result
innerC = innerC + pcgRes.iterations
a = b - Wsqrt*s1
if(converged):
break
f_prev = f
f = np.dot(K, a)
diff = f - f_prev
if (np.dot(diff.T,diff).flatten() < threshold*N or innerC>15000):
converged = True
k = k+1
self.result = f
self.iterations = k + innerC
def integrateNormalDensity(lb,ub,mu = 0,sigma = 1):
from scipy.stats import norm
assert not (ub < lb)
lessThanUpper = norm.logcdf(ub,loc = mu,scale = sigma)
lessThanLower = norm.logcdf(lb,loc = mu,scale = sigma)
#print lessThanUpper,lessThanLower,lessThanUpper-lessThanLower,1 - math.exp(lessThanLower - lessThanUpper)
return lessThanUpper + np.log1p(-math.exp(lessThanLower - lessThanUpper))
def _scores(self, range_, k, cumsum, m_log_combs, n_log_combs):
"""Calculates the score function for all possible numbers of rows (or columns)."""
avgs = cumsum / (range_ * k)
log_probs = norm.logcdf(-avgs * np.sqrt(range_ * k))
return - log_probs - m_log_combs - n_log_combs[k-1]
def _setup(self):
super(MinValueEntropySearch, self)._setup()
# Apply Gumbel sampling
m = self.models[0]
valid = self.feasible_data_index()
# Work with feasible data
X = self.data[0][valid, :]
N = np.shape(X)[0]
Xrand = RandomDesign(self.gridsize, self._domain).generate()
fmean, fvar = m.predict_f(np.vstack((X, Xrand)))
idx = np.argmin(fmean[:N])
right = fmean[idx].flatten()# + 2*np.sqrt(fvar[idx]).flatten()
left = right
probf = lambda x: np.exp(np.sum(norm.logcdf(-(x - fmean) / np.sqrt(fvar)), axis=0))
i = 0
while probf(left) < 0.75:
left = 2. ** i * np.min(fmean - 5. * np.sqrt(fvar)) + (1. - 2. ** i) * right
i += 1
# Binary search for 3 percentiles
q1, med, q2 = map(lambda val: bisect(lambda x: probf(x) - val, left, right, maxiter=10000, xtol=0.01),
[0.25, 0.5, 0.75])
beta = (q1 - q2) / (np.log(np.log(4. / 3.)) - np.log(np.log(4.)))
alpha = med + beta * np.log(np.log(2.))
# obtain samples from y*
mins = -np.log(-np.log(np.random.rand(self.num_samples).astype(np_float_type))) * beta + alpha
self.samples.set_data(mins)