def batch_log_pdf(self, x):
"""
Ref: :py:meth:`pyro.distributions.distribution.Distribution.batch_log_pdf`
"""
mu = self.mu.expand(self.shape(x))
sigma = self.sigma.expand(self.shape(x))
ll_1 = Variable(torch.Tensor([-0.5 * np.log(2.0 * np.pi)]).type_as(mu.data).expand_as(x))
ll_2 = -torch.log(sigma * x)
ll_3 = -0.5 * torch.pow((torch.log(x) - mu) / sigma, 2.0)
batch_log_pdf = torch.sum(ll_1 + ll_2 + ll_3, -1)
batch_log_pdf_shape = self.batch_shape(x) + (1,)
return batch_log_pdf.contiguous().view(batch_log_pdf_shape)
python类pow()的实例源码
def analytic_mean(self):
"""
Ref: :py:meth:`pyro.distributions.distribution.Distribution.analytic_mean`
"""
return torch.exp(self.mu + 0.5 * torch.pow(self.sigma, 2.0))
def analytic_var(self):
"""
Ref: :py:meth:`pyro.distributions.distribution.Distribution.analytic_var`
"""
return (torch.exp(torch.pow(self.sigma, 2.0)) - Variable(torch.ones(1))) * \
torch.pow(self.analytic_mean(), 2)
def analytic_var(self):
"""
Ref: :py:meth:`pyro.distributions.distribution.Distribution.analytic_var`
"""
return torch.pow(self.b - self.a, 2) / 12
def analytic_mean(self):
"""
Ref: :py:meth:`pyro.distributions.distribution.Distribution.analytic_mean`
"""
return torch.pow(self.lam, -1.0)
def analytic_var(self):
"""
Ref: :py:meth:`pyro.distributions.distribution.Distribution.analytic_var`
"""
return torch.pow(self.lam, -2.0)
def _k(x, y, s) :
"Returns the matrix of k(x_i,y_j)."
sq = _squared_distances(x, y) / (s**2)
return torch.exp(-sq) #torch.pow( 1. / ( 1. + sq ), .25 )
def _k(x, y, s) :
"Returns the matrix of k(x_i,y_j)."
sq = _squared_distances(x, y) / (s**2)
#return torch.exp( -sq )
return torch.pow( 1. / ( 1. + sq ), .25 )
def _k(x, y, s) :
"Returns the matrix of k(x_i,y_j)."
sq = _squared_distances(x, y) / (s**2)
#return torch.exp( -sq )
return torch.pow( 1. / ( 1. + sq ), .25 )
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
kw, kh = utils._pair(kernel_size)
out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
return out.mul(kw * kh).pow(1. / norm_type)
def pairwise_distance(x1, x2, p=2, eps=1e-6):
r"""
Computes the batchwise pairwise distance between vectors v1,v2:
.. math ::
\Vert x \Vert _p := \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}
Args:
x1: first input tensor
x2: second input tensor
p: the norm degree. Default: 2
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> output = F.pairwise_distance(input1, input2, p=2)
>>> output.backward()
"""
assert x1.size() == x2.size(), "Input sizes must be equal."
assert x1.dim() == 2, "Input must be a 2D matrix."
diff = torch.abs(x1 - x2)
out = torch.pow(diff + eps, p).sum(dim=1)
return torch.pow(out, 1. / p)
def forward(ctx, a, b):
ctx.b_size = b.size()
ctx.save_for_backward(a, b)
return a.pow(b)
def backward(ctx, grad_output):
a, b = ctx.saved_variables
grad_a = grad_output.mul(b).mul(a.pow(b - 1))
grad_b = grad_output.mul(a.pow(b)).mul(a.log())
return grad_a, maybe_view(grad_b, ctx.b_size)
def backward(ctx, grad_output):
if ctx.tensor_first:
var, = ctx.saved_variables
return grad_output.mul(ctx.constant).mul(var.pow(ctx.constant - 1)), None
else:
var_result, = ctx.saved_variables
return None, grad_output.mul(var_result).mul_(math.log(ctx.constant))
def updateOutput(self, input):
assert input.dim() == 2
input_size = input.size()
if self._output is None:
self._output = input.new()
if self.norm is None:
self.norm = input.new()
if self.buffer is None:
self.buffer = input.new()
self._output.resize_as_(input)
# specialization for the infinity norm
if self.p == float('inf'):
if not self._indices:
self._indices = torch.cuda.FloatTensor() if torch.typename(self.output) == 'torch.cuda.FloatTensor' \
else torch.LongTensor()
torch.abs(input, out=self.buffer)
torch.max(self._indices, self.buffer, 1, out=self.norm)
self.norm.add_(self.eps)
else:
if self.normp is None:
self.normp = input.new()
if self.p % 2 != 0:
torch.abs(input, out=self.buffer).pow_(self.p)
else:
torch.pow(input, self.p, out=self.buffer)
torch.sum(self.buffer, 1, out=self.normp).add_(self.eps)
torch.pow(self.normp, 1. / self.p, out=self.norm)
torch.div(input, self.norm.view(-1, 1).expand_as(input), out=self._output)
self.output = self._output.view(input_size)
return self.output
def test_reciprocal(self):
a = torch.randn(100, 89)
zeros = torch.Tensor().resize_as_(a).zero_()
res_pow = torch.pow(a, -1)
res_reciprocal = a.clone()
res_reciprocal.reciprocal_()
self.assertEqual(res_reciprocal, res_pow)
def test_cpow(self):
self._test_cop(torch.pow, lambda x, y: float('nan') if x < 0 else math.pow(x, y))
# TODO: these tests only check if it's possible to pass a return value
# it'd be good to expand them
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
kw, kh = utils._pair(kernel_size)
out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
return out.mul(kw * kh).pow(1. / norm_type)
def pairwise_distance(x1, x2, p=2, eps=1e-6):
r"""
Computes the batchwise pairwise distance between vectors v1,v2:
.. math ::
\Vert x \Vert _p := \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}
Args:
x1: first input tensor
x2: second input tensor
p: the norm degree. Default: 2
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> output = F.pairwise_distance(input1, input2, p=2)
>>> output.backward()
"""
assert x1.size() == x2.size(), "Input sizes must be equal."
assert x1.dim() == 2, "Input must be a 2D matrix."
diff = torch.abs(x1 - x2)
out = torch.pow(diff + eps, p).sum(dim=1, keepdim=True)
return torch.pow(out, 1. / p)
def forward(ctx, a, b):
ctx.b_size = b.size()
ctx.save_for_backward(a, b)
return a.pow(b)