def for_loop_update_row_param_blockwise(self, y_csr, phi_csr, mu0, c, v, r_prev, u_prev):
nrow = y_csr.shape[0]
num_factor = v.shape[1]
prior_Phi = np.diag(np.hstack((self.prior_param['row_bias_scale'] ** -2,
np.tile(self.prior_param['factor_scale'] ** -2, num_factor))))
# Pre-allocate
r = np.zeros(nrow)
u = np.zeros((nrow, num_factor))
# NOTE: The loop through 'i' is completely parallelizable.
for i in range(nrow):
j = y_csr[i, :].indices
nnz_i = len(j)
residual_i = y_csr[i, :].data - mu0 - c[j]
phi_i = phi_csr[i, :].data.copy()
v_T = np.hstack((np.ones((nnz_i, 1)), v[j, :]))
post_Phi_i = prior_Phi + \
np.dot(v_T.T,
np.tile(phi_i[:, np.newaxis], (1, 1 + num_factor)) * v_T) # Weighted sum of v_j * v_j.T
post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T))
C, lower = scipy.linalg.cho_factor(post_Phi_i)
post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i)
# Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)),
lower=lower)
ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev[i]], u_prev[i, :])))
r[i] = ru_i[0]
u[i, :] = ru_i[1:]
return r, u
matrix_factorization.py 文件源码
python
阅读 34
收藏 0
点赞 0
评论 0
评论列表
文章目录