def loss(self, x, samples):
_, proposal_output = self.forward(x, samples)
prior_mins = Variable(util.Tensor([s.distribution.prior_min for s in samples]), requires_grad=False)
prior_maxs = Variable(util.Tensor([s.distribution.prior_max for s in samples]), requires_grad=False)
batch_size = len(samples)
modes = (proposal_output[:, 0] - prior_mins) / (prior_maxs - prior_mins)
certainties = proposal_output[:, 1] + 2
alphas = modes * (certainties - 2) + 1
betas = (1 - modes) * (certainties - 2) + 1
beta_funs = util.beta(alphas, betas)
l = 0
for b in range(batch_size):
value = samples[b].value[0]
prior_min = samples[b].distribution.prior_min
prior_max = samples[b].distribution.prior_max
normalized_value = (value - prior_min) / (prior_max - prior_min)
alpha = alphas[b]
beta = betas[b]
beta_fun = beta_funs[b]
l -= (alpha - 1) * np.log(normalized_value + util.epsilon) + (beta - 1) * np.log(1 - normalized_value + util.epsilon) - torch.log(beta_fun + util.epsilon) - np.log(prior_max - prior_min + util.epsilon)
return l
评论列表
文章目录