def log_pdf(self, y, *args, **kwargs):
"""
:param y: a value sampled from the transformed distribution
:type y: torch.autograd.Variable
:returns: the score (the log pdf) of y
:rtype: torch.autograd.Variable
Scores the sample by inverting the bijector(s) and computing the score using the score
of the base distribution and the log det jacobian
"""
inverses = []
next_to_invert = y
for bijector in reversed(self.bijectors):
inverse = bijector.inverse(next_to_invert)
inverses.append(inverse)
next_to_invert = inverse
log_pdf_base = self.base_dist.log_pdf(inverses[-1], *args, **kwargs)
log_det_jacobian = self.bijectors[-1].log_det_jacobian(y, *args, **kwargs)
for bijector, inverse in zip(list(reversed(self.bijectors))[1:], inverses[:-1]):
log_det_jacobian += bijector.log_det_jacobian(inverse, *args, **kwargs)
return log_pdf_base - log_det_jacobian
评论列表
文章目录