def likelihood(self, hyp, X_batch, y_batch, monitor=False):
M = self.M
Z = self.Z
m = self.m
S = self.S
jitter = self.jitter
jitter_cov = self.jitter_cov
N = tf.shape(X_batch)[0]
logsigma_n = hyp[-1]
sigma_n = tf.exp(logsigma_n)
# Compute K_u_inv
K_u = kernel_tf(Z, Z, hyp[:-1])
L = tf.cholesky(K_u + np.eye(M)*jitter_cov)
K_u_inv = tf.matrix_triangular_solve(tf.transpose(L), tf.matrix_triangular_solve(L, np.eye(M), lower=True), lower=False)
K_u_inv_op = self.K_u_inv.assign(K_u_inv)
# Compute mu
psi = kernel_tf(Z, X_batch, hyp[:-1])
K_u_inv_m = tf.matmul(K_u_inv, m)
MU = tf.matmul(tf.transpose(psi), K_u_inv_m)
# Compute cov
Alpha = tf.matmul(K_u_inv, psi)
COV = kernel_tf(X_batch, X_batch, hyp[:-1]) - tf.matmul(tf.transpose(psi), tf.matmul(K_u_inv,psi)) + \
tf.matmul(tf.transpose(Alpha), tf.matmul(S,Alpha))
# Compute COV_inv
LL = tf.cholesky(COV + tf.eye(N, dtype=tf.float64)*sigma_n + tf.eye(N, dtype=tf.float64)*jitter)
COV_inv = tf.matrix_triangular_solve(tf.transpose(LL), tf.matrix_triangular_solve(LL, tf.eye(N, dtype=tf.float64), lower=True), lower=False)
# Compute cov(Z, X)
cov_ZX = tf.matmul(S,Alpha)
# Update m and S
alpha = tf.matmul(COV_inv, tf.transpose(cov_ZX))
m_new = m + tf.matmul(cov_ZX, tf.matmul(COV_inv, y_batch-MU))
S_new = S - tf.matmul(cov_ZX, alpha)
if monitor == False:
m_op = self.m.assign(m_new)
S_op = self.S.assign(S_new)
# Compute NLML
K_u_inv_m = tf.matmul(K_u_inv, m_new)
NLML = 0.5*tf.matmul(tf.transpose(m_new), K_u_inv_m) + tf.reduce_sum(tf.log(tf.diag_part(L))) + 0.5*np.log(2.*np.pi)*tf.cast(M, tf.float64)
train = self.optimizer.minimize(NLML)
nlml_op = self.nlml.assign(NLML[0,0])
return tf.group(*[train, m_op, S_op, nlml_op, K_u_inv_op])
parametric_GP.py 文件源码
python
阅读 34
收藏 0
点赞 0
评论 0
评论列表
文章目录