def __log_likelihood_factor__(self, samples_q, v_noise, X, wb, y):
# Account for occasions where we're optimizing the latent weighting distributions
if wb.shape[0] == 1:
if wb.shape[1] > self.num_latent_params: # Further account
# Reshape the wb to be a full matrix and build full latent array
Wb = np.reshape(wb, [-1,self.num_latent_params])
latent_weights = np.array([Wb[int(X[tt,-1]),:] for tt in range(X.shape[0])])
outputs = self.__predict__(samples_q, np.hstack([X[:,:-1], latent_weights]))
else:
outputs = self.__predict__(samples_q, np.hstack([X, np.tile(wb,(X.shape[0],1))]))
else:
outputs = self.__predict__(samples_q, np.hstack([X, wb]))
return (-0.5*np.log(2*math.pi*v_noise)) - (0.5*(np.tile(np.expand_dims(y,axis=0), (self.num_weight_samples,1,1))-outputs)**2)/v_noise
评论列表
文章目录