def get_neg_log_post_grad_Qu(Phi, G, MMT, q, Sigma_E, L,Sigma_J, nu, V, GL, prior_on = False):
"""
Just get the gradient for Qu, ignore other variables
"""
p = Phi.shape[0]
Qu = Phi.dot(Phi.T)
G_Sigma_G = np.zeros(MMT.shape)
for i in range(Sigma_J.size):
G_Sigma_G += Sigma_J[i]* np.outer(G[:,i], G[:,i])
cov = Sigma_E + G_Sigma_G + GL.dot(Qu).dot(GL.T)
inv_cov = np.linalg.inv(cov)
GLT_inv_cov = np.dot(GL.T, inv_cov)
invQ = np.linalg.inv(Qu)
if prior_on:
grad0 = (q* GL.T.dot(inv_cov).dot(GL) - GLT_inv_cov.dot(MMT).dot(GLT_inv_cov.T) \
+ invQ.dot( (nu+p+1) *np.eye(p) - V.dot(invQ)))
else:
grad0 = q* GL.T.dot(inv_cov).dot(GL) - GLT_inv_cov.dot(MMT).dot(GLT_inv_cov.T)
grad1 = 2.0* grad0.dot(Phi)
# cholesky decomposition is lower triangular
grad = np.tril(grad1)
return grad
#==============================================================================
# gradient descent optimization, using back track
# only update Qu
Bayesian_Source_Space_Connectivity_InvWishart_Prior.py 文件源码
python
阅读 32
收藏 0
点赞 0
评论 0
评论列表
文章目录