def train(X, y):
# This function is used for training our Bayesian model
# Returns the regression parameters w_opt, and alpha, beta, S_N
# needed for the predictive distribution
Phi = X # the measurement matrix of the input variables x (i.e., features)
t = y # the vector of observations for the target variable
(N, M) = np.shape(Phi)
# Init values for hyper-parameters alpha, beta
alpha = 5*10**(-3)
beta = 5
max_iter = 100
k = 0
PhiT_Phi = np.dot(np.transpose(Phi), Phi)
s = np.linalg.svd(PhiT_Phi, compute_uv=0) # Just get the vector of singular values s
ab_old = np.array([alpha, beta])
ab_new = np.zeros((1,2))
tolerance = 10**-3
while( k < max_iter and np.linalg.norm(ab_old-ab_new) > tolerance):
k += 1
try:
S_N = np.linalg.inv(alpha*np.eye(M) + beta*PhiT_Phi)
except np.linalg.LinAlgError as err:
print "******************************************************************************************************"
print " ALERT: LinearAlgebra Error detected!"
print " CHECK if your measurement matrix is not leading to a singular alpha*np.eye(M) + beta*PhiT_Phi"
print " GOODBYE and see you later. Exiting ..."
print "******************************************************************************************************"
sys.exit(-1)
m_N = beta * np.dot(S_N, np.dot(np.transpose(Phi), t))
gamma = sum(beta*s[i]**2 /(alpha + beta*s[i]**2) for i in range(M))
#
# update alpha, beta
#
ab_old = np.array([alpha, beta])
alpha = gamma /np.inner(m_N,m_N)
one_over_beta = 1/(N-gamma) * sum( (t[n] - np.inner(m_N, Phi[n]))**2 for n in range(N))
beta = 1/one_over_beta
ab_new = np.array([alpha, beta])
S_N = np.linalg.inv(alpha*np.eye(M) + beta*PhiT_Phi)
m_N = beta * np.dot(S_N, np.dot(np.transpose(Phi), t))
w_opt = m_N
return (w_opt, alpha, beta, S_N)
##################################################################################
评论列表
文章目录