def optimize_hyper_hdp(self):
# Optimize \alpha_0
m_dot = self.msampler.m_dotk.sum()
alpha_0 = self.zsampler.alpha_0
n_jdot = np.array(self.zsampler.data_dims, dtype=float) # @debug add row count + line count for masked !
#p = np.power(n_jdot / alpha_0, np.arange(n_jdot.shape[0]))
#norm = np.linalg.norm(p)
#u_j = binomial(1, p/norm)
u_j = binomial(1, alpha_0/(n_jdot + alpha_0))
#u_j = binomial(1, n_jdot/(n_jdot + alpha_0))
try:
v_j = beta(alpha_0 + 1, n_jdot)
except:
#n_jdot[n_jdot == 0] = np.finfo(float).eps
lgg.warning('Unable to optimize MMSB parameters, possible empty sequence...')
return
shape_a = self.a_alpha + m_dot - u_j.sum()
if shape_a <= 0:
lgg.warning('Unable to optimize MMSB parameters, possible empty sequence...')
return
new_alpha0 = gamma(shape_a, 1/( self.b_alpha - np.log(v_j).sum()), size=3).mean()
self.zsampler.alpha_0 = new_alpha0
# Optimize \gamma
K = self.zsampler._K
#m_dot = self.msampler.m_dotk
gmma = self.betasampler.gmma
#p = np.power(m_dot / gmma, np.arange(m_dot.shape[0]))
#norm = np.linalg.norm(p)
#u = binomial(1, p/norm)
u = binomial(1, gmma / (m_dot + gmma))
#u = binomial(1, m_dot / (m_dot + gmma))
v = beta(gmma + 1, m_dot)
new_gmma = gamma(self.a_gmma + K -1 + u, 1/(self.b_gmma - np.log(v)), size=3).mean()
self.betasampler.gmma = new_gmma
#print 'm_dot %d, alpha a, b: %s, %s ' % (m_dot, self.a_alpha + m_dot - u_j.sum(), 1/( self.b_alpha - np.log(v_j).sum()))
#print 'gamma a, b: %s, %s ' % (self.a_gmma + K -1 + u, 1/(self.b_gmma - np.log(v)))
lgg.debug('hyper sample: alpha_0: %s gamma: %s' % (new_alpha0, new_gmma))
return
评论列表
文章目录