python类log()的实例源码

autoencoder.py 文件源码 项目:ladder 作者: abhiskk 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def train_ae(self, train_X, optimizer, epochs, verbose=True):
        N = train_X.data.size()[0]
        num_batches = N / self.batch_size
        for e in range(epochs):
            agg_cost = 0.
            for k in range(num_batches):
                start, end = k * self.batch_size, (k + 1) * self.batch_size
                bX = train_X[start:end]
                optimizer.zero_grad()
                Z = self.forward(bX)
                Z = self.decode(Z)
                loss = -torch.sum(bX * torch.log(Z) + (1.0 - bX) * torch.log(1.0 - Z), 1)
                cost = torch.mean(loss)
                cost.backward()
                optimizer.step()
                agg_cost += cost
            agg_cost /= num_batches
            if verbose:
                print("Epoch:", e, "cost:", agg_cost.data[0])
box_utils.py 文件源码 项目:ssd.pytorch 作者: amdegroot 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def encode(matched, priors, variances):
    """Encode the variances from the priorbox layers into the ground truth boxes
    we have matched (based on jaccard overlap) with the prior boxes.
    Args:
        matched: (tensor) Coords of ground truth for each prior in point-form
            Shape: [num_priors, 4].
        priors: (tensor) Prior boxes in center-offset form
            Shape: [num_priors,4].
        variances: (list[float]) Variances of priorboxes
    Return:
        encoded boxes (tensor), Shape: [num_priors, 4]
    """

    # dist b/t match center and prior's center
    g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
    # encode variance
    g_cxcy /= (variances[0] * priors[:, 2:])
    # match wh / prior wh
    g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
    g_wh = torch.log(g_wh) / variances[1]
    # return target for smooth_l1_loss
    return torch.cat([g_cxcy, g_wh], 1)  # [num_priors,4]


# Adapted from https://github.com/Hakuyume/chainer-ssd
model.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _gaussian(self, enc_output):
        def latent_loss(mu, sigma):
            pow_mu = mu * mu
            pow_sigma = sigma * sigma
            return 0.5 * torch.mean(pow_mu + pow_sigma - torch.log(pow_sigma) - 1)

        mu = self._enc_mu(enc_output)
        sigma = torch.exp(.5 * self._enc_log_sigma(enc_output))
        self.latent_loss = latent_loss(mu, sigma)

        weight = next(self.parameters()).data
        std_z = Variable(weight.new(*sigma.size()), requires_grad=False)
        std_z.data.copy_(torch.from_numpy(
                np.random.normal(size=sigma.size())))

        return mu + sigma * std_z
test_inference.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def setUp(self):
        # normal-normal; known covariance
        self.lam0 = Variable(torch.Tensor([0.1, 0.1]))   # precision of prior
        self.mu0 = Variable(torch.Tensor([0.0, 0.5]))   # prior mean
        # known precision of observation noise
        self.lam = Variable(torch.Tensor([6.0, 4.0]))
        self.data = []
        self.data.append(Variable(torch.Tensor([-0.1, 0.3])))
        self.data.append(Variable(torch.Tensor([0.00, 0.4])))
        self.data.append(Variable(torch.Tensor([0.20, 0.5])))
        self.data.append(Variable(torch.Tensor([0.10, 0.7])))
        self.n_data = Variable(torch.Tensor([len(self.data)]))
        self.sum_data = self.data[0] + \
            self.data[1] + self.data[2] + self.data[3]
        self.analytic_lam_n = self.lam0 + \
            self.n_data.expand_as(self.lam) * self.lam
        self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
        self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\
            self.mu0 * (self.lam0 / self.analytic_lam_n)
        self.batch_size = 4
test_inference.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def setUp(self):
        # poisson-gamma model
        # gamma prior hyperparameter
        self.alpha0 = Variable(torch.Tensor([1.0]))
        # gamma prior hyperparameter
        self.beta0 = Variable(torch.Tensor([1.0]))
        self.data = []
        self.data.append(Variable(torch.Tensor([1.0])))
        self.data.append(Variable(torch.Tensor([2.0])))
        self.data.append(Variable(torch.Tensor([3.0])))
        self.n_data = len(self.data)
        sum_data = self.data[0] + self.data[1] + self.data[2]
        self.alpha_n = self.alpha0 + sum_data  # posterior alpha
        self.beta_n = self.beta0 + \
            Variable(torch.Tensor([self.n_data]))  # posterior beta
        self.log_alpha_n = torch.log(self.alpha_n)
        self.log_beta_n = torch.log(self.beta_n)
test_inference.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def setUp(self):
        # bernoulli-beta model
        # beta prior hyperparameter
        self.alpha0 = Variable(torch.Tensor([1.0]))
        self.beta0 = Variable(torch.Tensor([1.0]))  # beta prior hyperparameter
        self.data = []
        self.data.append(Variable(torch.Tensor([0.0])))
        self.data.append(Variable(torch.Tensor([1.0])))
        self.data.append(Variable(torch.Tensor([1.0])))
        self.data.append(Variable(torch.Tensor([1.0])))
        self.n_data = len(self.data)
        self.batch_size = None
        data_sum = self.data[0] + self.data[1] + self.data[2] + self.data[3]
        self.alpha_n = self.alpha0 + data_sum  # posterior alpha
        self.beta_n = self.beta0 - data_sum + \
            Variable(torch.Tensor([self.n_data]))
        # posterior beta
        self.log_alpha_n = torch.log(self.alpha_n)
        self.log_beta_n = torch.log(self.beta_n)
test_inference.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def setUp(self):
        # lognormal-normal model
        # putting some of the parameters inside of a torch module to
        # make sure that that functionality is ok (XXX: do this somewhere else in the future)
        self.mu0 = Variable(torch.Tensor([1.0]))  # normal prior hyperparameter
        # normal prior hyperparameter
        self.tau0 = Variable(torch.Tensor([1.0]))
        # known precision for observation likelihood
        self.tau = Variable(torch.Tensor([2.5]))
        self.n_data = 2
        self.data = Variable(torch.Tensor([[1.5], [2.2]]))  # two observations
        self.tau_n = self.tau0 + \
            Variable(torch.Tensor([self.n_data])) * self.tau  # posterior tau
        mu_numerator = self.mu0 * self.tau0 + \
            self.tau * torch.sum(torch.log(self.data))
        self.mu_n = mu_numerator / self.tau_n  # posterior mu
        self.log_mu_n = torch.log(self.mu_n)
        self.log_tau_n = torch.log(self.tau_n)
test_tracegraph_elbo.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setUp(self):
        # normal-normal; known covariance
        self.lam0 = Variable(torch.Tensor([0.1, 0.1]))   # precision of prior
        self.mu0 = Variable(torch.Tensor([0.0, 0.5]))   # prior mean
        # known precision of observation noise
        self.lam = Variable(torch.Tensor([6.0, 4.0]))
        self.data = []
        self.data.append(Variable(torch.Tensor([-0.1, 0.3])))
        self.data.append(Variable(torch.Tensor([0.00, 0.4])))
        self.data.append(Variable(torch.Tensor([0.20, 0.5])))
        self.data.append(Variable(torch.Tensor([0.10, 0.7])))
        self.n_data = Variable(torch.Tensor([len(self.data)]))
        self.sum_data = self.data[0] + \
            self.data[1] + self.data[2] + self.data[3]
        self.analytic_lam_n = self.lam0 + \
            self.n_data.expand_as(self.lam) * self.lam
        self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
        self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\
            self.mu0 * (self.lam0 / self.analytic_lam_n)
        self.verbose = True
test_tracegraph_elbo.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setUp(self):
        # normal-normal-normal; known covariance
        self.lam0 = Variable(torch.Tensor([0.1, 0.1]))  # precision of prior
        self.mu0 = Variable(torch.Tensor([0.0, 0.5]))   # prior mean
        # known precision of observation noise
        self.lam = Variable(torch.Tensor([6.0, 4.0]))
        self.data = []
        self.data.append(Variable(torch.Tensor([-0.1, 0.3])))
        self.data.append(Variable(torch.Tensor([0.00, 0.4])))
        self.data.append(Variable(torch.Tensor([0.20, 0.5])))
        self.data.append(Variable(torch.Tensor([0.10, 0.7])))
        self.n_data = Variable(torch.Tensor([len(self.data)]))
        self.sum_data = self.data[0] + \
            self.data[1] + self.data[2] + self.data[3]
        self.analytic_lam_n = self.lam0 + \
            self.n_data.expand_as(self.lam) * self.lam
        self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
        self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\
            self.mu0 * (self.lam0 / self.analytic_lam_n)
        self.verbose = True
test_tracegraph_elbo.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def setUp(self):
        # bernoulli-beta model
        # beta prior hyperparameter
        self.alpha0 = Variable(torch.Tensor([1.0]))
        self.beta0 = Variable(torch.Tensor([1.0]))  # beta prior hyperparameter
        self.data = []
        self.data.append(Variable(torch.Tensor([0.0])))
        self.data.append(Variable(torch.Tensor([1.0])))
        self.data.append(Variable(torch.Tensor([1.0])))
        self.data.append(Variable(torch.Tensor([1.0])))
        self.n_data = len(self.data)
        data_sum = self.data[0] + self.data[1] + self.data[2] + self.data[3]
        self.alpha_n = self.alpha0 + data_sum  # posterior alpha
        self.beta_n = self.beta0 - data_sum + \
            Variable(torch.Tensor([self.n_data]))
        # posterior beta
        self.log_alpha_n = torch.log(self.alpha_n)
        self.log_beta_n = torch.log(self.beta_n)
        self.verbose = True
test_tracegraph_elbo.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def setUp(self):
        # lognormal-normal model
        # putting some of the parameters inside of a torch module to
        # make sure that that functionality is ok (XXX: do this somewhere else in the future)
        self.mu0 = Variable(torch.Tensor([1.0]))  # normal prior hyperparameter
        # normal prior hyperparameter
        self.tau0 = Variable(torch.Tensor([1.0]))
        # known precision for observation likelihood
        self.tau = Variable(torch.Tensor([2.5]))
        self.n_data = 2
        self.data = Variable(torch.Tensor([[1.5], [2.2]]))  # two observations
        self.tau_n = self.tau0 + \
            Variable(torch.Tensor([self.n_data])) * self.tau  # posterior tau
        mu_numerator = self.mu0 * self.tau0 + \
            self.tau * torch.sum(torch.log(self.data))
        self.mu_n = mu_numerator / self.tau_n  # posterior mu
        self.log_mu_n = torch.log(self.mu_n)
        self.log_tau_n = torch.log(self.tau_n)
        self.verbose = True
test_tracegraph_elbo.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 54 收藏 0 点赞 0 评论 0
def setUp(self):
        # normal-normal; known covariance
        self.lam0 = Variable(torch.Tensor([0.1, 0.1]))   # precision of prior
        self.mu0 = Variable(torch.Tensor([0.0, 0.5]))   # prior mean
        # known precision of observation noise
        self.lam = Variable(torch.Tensor([6.0, 4.0]))
        self.n_outer = 3
        self.n_inner = 3
        self.n_data = Variable(torch.Tensor([self.n_outer * self.n_inner]))
        self.data = []
        self.sum_data = ng_zeros(2)
        for _out in range(self.n_outer):
            data_in = []
            for _in in range(self.n_inner):
                data_in.append(Variable(torch.Tensor([-0.1, 0.3]) + torch.randn(2) / torch.sqrt(self.lam.data)))
                self.sum_data += data_in[-1]
            self.data.append(data_in)
        self.analytic_lam_n = self.lam0 + self.n_data.expand_as(self.lam) * self.lam
        self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
        self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\
            self.mu0 * (self.lam0 / self.analytic_lam_n)
        self.verbose = True

    # this tests rao-blackwellization in elbo for nested list map_datas
normal.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def batch_log_pdf(self, x):
        """
        Diagonal Normal log-likelihood

        Ref: :py:meth:`pyro.distributions.distribution.Distribution.batch_log_pdf`
        """
        # expand to patch size of input
        mu = self.mu.expand(self.shape(x))
        sigma = self.sigma.expand(self.shape(x))
        log_pxs = -1 * (torch.log(sigma) + 0.5 * np.log(2.0 * np.pi) + 0.5 * torch.pow((x - mu) / sigma, 2))
        # XXX this allows for the user to mask out certain parts of the score, for example
        # when the data is a ragged tensor. also useful for KL annealing. this entire logic
        # will likely be done in a better/cleaner way in the future
        if self.log_pdf_mask is not None:
            log_pxs = log_pxs * self.log_pdf_mask
        batch_log_pdf = torch.sum(log_pxs, -1)
        batch_log_pdf_shape = self.batch_shape(x) + (1,)
        return batch_log_pdf.contiguous().view(batch_log_pdf_shape)
transformed_distribution.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def log_pdf(self, y, *args, **kwargs):
        """
        :param y: a value sampled from the transformed distribution
        :type y: torch.autograd.Variable

        :returns: the score (the log pdf) of y
        :rtype: torch.autograd.Variable

        Scores the sample by inverting the bijector(s) and computing the score using the score
        of the base distribution and the log det jacobian
        """
        inverses = []
        next_to_invert = y
        for bijector in reversed(self.bijectors):
            inverse = bijector.inverse(next_to_invert)
            inverses.append(inverse)
            next_to_invert = inverse
        log_pdf_base = self.base_dist.log_pdf(inverses[-1], *args, **kwargs)
        log_det_jacobian = self.bijectors[-1].log_det_jacobian(y, *args, **kwargs)
        for bijector, inverse in zip(list(reversed(self.bijectors))[1:], inverses[:-1]):
            log_det_jacobian += bijector.log_det_jacobian(inverse, *args, **kwargs)
        return log_pdf_base - log_det_jacobian
util.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def log_gamma(xx):
    if isinstance(xx, torch.Tensor):
        xx = Variable(xx)
    ttype = xx.data.type()
    gamma_coeff = [
        76.18009172947146,
        -86.50532032941677,
        24.01409824083091,
        -1.231739572450155,
        0.1208650973866179e-2,
        -0.5395239384953e-5,
    ]
    magic1 = 1.000000000190015
    magic2 = 2.5066282746310005
    x = xx - 1.0
    t = x + 5.5
    t = t - (x + 0.5) * torch.log(t)
    ser = Variable(torch.ones(x.size()).type(ttype)) * magic1
    for c in gamma_coeff:
        x = x + 1.0
        ser = ser + torch.pow(x / c, -1)
    return torch.log(ser * magic2) - t
util.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def log_beta(t):
    """
    Computes log Beta function.

    :param t:
    :type t: torch.autograd.Variable of dimension 1 or 2
    :rtype: torch.autograd.Variable of float (if t.dim() == 1) or torch.Tensor (if t.dim() == 2)
    """
    assert t.dim() in (1, 2)
    if t.dim() == 1:
        numer = torch.sum(log_gamma(t))
        denom = log_gamma(torch.sum(t))
    else:
        numer = torch.sum(log_gamma(t), 1)
        denom = log_gamma(torch.sum(t, 1))
    return numer - denom
dirichlet.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def batch_log_pdf(self, x):
        """
        Evaluates log probability density over one or a batch of samples.

        Each of alpha and x can be either a single value or a batch of values batched along dimension 0.
        If they are both batches, their batch sizes must agree.
        In any case, the rightmost size must agree.

        :param torch.autograd.Variable x: A value (if x.dim() == 1) or or batch of values (if x.dim() == 2).
        :param alpha: A vector of concentration parameters.
        :type alpha: torch.autograd.Variable or None.
        :return: log probability densities of each element in the batch.
        :rtype: torch.autograd.Variable of torch.Tensor of dimension 1.
        """
        alpha = self.alpha.expand(self.shape(x))
        x_sum = torch.sum(torch.mul(alpha - 1, torch.log(x)), -1)
        beta = log_beta(alpha)
        batch_log_pdf_shape = self.batch_shape(x) + (1,)
        return (x_sum - beta).contiguous().view(batch_log_pdf_shape)
box_utils.py 文件源码 项目:textobjdetection 作者: andfoy 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def encode(matched, priors, variances):
    """Encode the variances from the priorbox layers into the ground truth boxes
    we have matched (based on jaccard overlap) with the prior boxes.
    Args:
        matched: (tensor) Coords of ground truth for each prior in point-form
            Shape: [num_priors, 4].
        priors: (tensor) Prior boxes in center-offset form
            Shape: [num_priors,4].
        variances: (list[float]) Variances of priorboxes
    Return:
        encoded boxes (tensor), Shape: [num_priors, 4]
    """

    # dist b/t match center and prior's center
    g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
    # encode variance
    g_cxcy /= (variances[0] * priors[:, 2:])
    # match wh / prior wh
    g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
    g_wh = torch.log(g_wh) / variances[1]
    # return target for smooth_l1_loss
    return torch.cat([g_cxcy, g_wh], 1)  # [num_priors,4]


# Adapted from https://github.com/Hakuyume/chainer-ssd
test_oim.py 文件源码 项目:open-reid 作者: Cysu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_forward_backward(self):
        import torch
        import torch.nn.functional as F
        from torch.autograd import Variable
        from reid.loss import OIMLoss
        criterion = OIMLoss(3, 3, scalar=1.0, size_average=False)
        criterion.lut = torch.eye(3)
        x = Variable(torch.randn(3, 3), requires_grad=True)
        y = Variable(torch.range(0, 2).long())
        loss = criterion(x, y)
        loss.backward()
        probs = F.softmax(x)
        grads = probs.data - torch.eye(3)
        abs_diff = torch.abs(grads - x.grad.data)
        self.assertEquals(torch.log(probs).diag().sum(), -loss)
        self.assertTrue(torch.max(abs_diff) < 1e-6)
box_utils.py 文件源码 项目:realtime-action-detection 作者: gurkirt 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def encode(matched, priors, variances):
    """Encode the variances from the priorbox layers into the ground truth boxes
    we have matched (based on jaccard overlap) with the prior boxes.
    Args:
        matched: (tensor) Coords of ground truth for each prior in point-form
            Shape: [num_priors, 4].
        priors: (tensor) Prior boxes in center-offset form
            Shape: [num_priors,4].
        variances: (list[float]) Variances of priorboxes
    Return:
        encoded boxes (tensor), Shape: [num_priors, 4]
    """

    # dist b/t match center and prior's center
    g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
    # encode variance
    g_cxcy /= (variances[0] * priors[:, 2:])
    # match wh / prior wh
    g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
    g_wh = torch.log(g_wh) / variances[1]
    # return target for smooth_l1_loss
    return torch.cat([g_cxcy, g_wh], 1)  # [num_priors,4]


# Adapted from https://github.com/Hakuyume/chainer-ssd
model.py 文件源码 项目:pytorch-nlp 作者: endymecy 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _forward_alg(self, feats):
        # calculate in log domain
        # feats is len(sentence) * tagset_size
        # initialize alpha with a Tensor with values all equal to -10000.
        init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)
        init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
        forward_var = autograd.Variable(init_alphas)
        if self.use_gpu:
            forward_var = forward_var.cuda()
        for feat in feats:
            emit_score = feat.view(-1, 1)
            tag_var = forward_var + self.transitions + emit_score
            max_tag_var, _ = torch.max(tag_var, dim=1)
            tag_var = tag_var - max_tag_var.view(-1, 1)
            forward_var = max_tag_var + torch.log(torch.sum(torch.exp(tag_var), dim=1)).view(1, -1) # ).view(1, -1)
        terminal_var = (forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]).view(1, -1)
        alpha = log_sum_exp(terminal_var)
        # Z(x)
        return alpha
nn.py 文件源码 项目:pyprob 作者: probprog 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def __init__(self, traces, sort=True):
        self.batch = traces
        self.length = len(traces)
        self.traces_lengths = []
        self.traces_max_length = 0
        self.observes_max_length = 0
        sb = {}
        for trace in traces:
            if trace.length is None:
                util.logger.log('Batch: Received a trace of length zero.')
            if trace.length > self.traces_max_length:
                self.traces_max_length = trace.length
            if trace.observes_tensor.size(0) > self.observes_max_length:
                self.observes_max_length = trace.observes_tensor.size(0)
            h = hash(trace.addresses_suffixed())
            if not h in sb:
                sb[h] = []
            sb[h].append(trace)
        self.sub_batches = []
        for _, t in sb.items():
            self.sub_batches.append(t)
        if sort:
            # Sort the batch in decreasing trace length.
            self.batch = sorted(self.batch, reverse=True, key=lambda t: t.length)
        self.traces_lengths = [t.length for t in self.batch]
nn.py 文件源码 项目:pyprob 作者: probprog 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss(self, x, samples):
        _, proposal_output = self.forward(x, samples)
        batch_size = len(samples)
        means = proposal_output[:, 0]
        stds = proposal_output[:, 1]
        two_std_squares = 2 * stds * stds + util.epsilon
        two_pi_std_squares = math.pi * two_std_squares
        half_log_two_pi_std_squares = 0.5 * torch.log(two_pi_std_squares + util.epsilon)
        l = 0
        for b in range(batch_size):
            value = samples[b].value[0]
            mean = means[b]
            two_std_square = two_std_squares[b]
            half_log_two_pi_std_square = half_log_two_pi_std_squares[b]
            l += half_log_two_pi_std_square + ((value - mean)**2) / two_std_square
        return l
nn.py 文件源码 项目:pyprob 作者: probprog 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def loss(self, x, samples):
        _, proposal_output = self.forward(x, samples)
        prior_mins = Variable(util.Tensor([s.distribution.prior_min for s in samples]), requires_grad=False)
        prior_maxs = Variable(util.Tensor([s.distribution.prior_max for s in samples]), requires_grad=False)
        batch_size = len(samples)
        modes = (proposal_output[:, 0] - prior_mins) / (prior_maxs - prior_mins)
        certainties = proposal_output[:, 1] + 2
        alphas = modes * (certainties - 2) + 1
        betas = (1 - modes) * (certainties - 2) + 1
        beta_funs = util.beta(alphas, betas)
        l = 0
        for b in range(batch_size):
            value = samples[b].value[0]
            prior_min = samples[b].distribution.prior_min
            prior_max = samples[b].distribution.prior_max
            normalized_value = (value - prior_min) / (prior_max - prior_min)
            alpha = alphas[b]
            beta = betas[b]
            beta_fun = beta_funs[b]
            l -= (alpha - 1) * np.log(normalized_value + util.epsilon) + (beta - 1) * np.log(1 - normalized_value + util.epsilon) - torch.log(beta_fun + util.epsilon) - np.log(prior_max - prior_min + util.epsilon)
        return l
nn.py 文件源码 项目:pyprob 作者: probprog 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss(self, x, samples):
        _, proposal_output = self.forward(x, samples)
        batch_size = len(samples)
        means = proposal_output[:,0:self.mixture_components]
        stds = proposal_output[:,self.mixture_components:2*self.mixture_components]
        coeffs = proposal_output[:,2*self.mixture_components:3*self.mixture_components]
        l = 0
        for b in range(batch_size):
            value = samples[b].value[0]
            prior_min = samples[b].distribution.prior_min
            prior_max = samples[b].distribution.prior_max
            ll = 0
            for c in range(self.mixture_components):
                mean = means[b,c]
                std = stds[b,c]
                coeff = coeffs[b,c]
                xi = (value - mean) / std
                phi_min = 0.5 * (1 + util.erf(((prior_min - mean) / std) * util.one_over_sqrt_two))
                phi_max = 0.5 * (1 + util.erf(((prior_max - mean) / std) * util.one_over_sqrt_two))
                ll += coeff * util.one_over_sqrt_two_pi * torch.exp(-0.5 * xi * xi) / (std * (phi_max - phi_min))
            l -= torch.log(ll + util.epsilon)
        return l
nn.py 文件源码 项目:pyprob 作者: probprog 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss(self, x, samples):
        # FoldedNormal logpdf
        # https://en.wikipedia.org/wiki/Folded_normal_distribution
        _, proposal_output = self.forward(x, samples)
        batch_size = len(samples)
        locations = proposal_output[:, 0]
        scales = proposal_output[:, 1]
        two_scales = 2 * scales + util.epsilon
        half_log_two_pi_scales = 0.5 * torch.log(math.pi * two_scales + util.epsilon)
        l = 0
        for b in range(batch_size):
            value = samples[b].value[0]
            if value < 0:
                l -= 0
            else:
                location = locations[b]
                two_scale = two_scales[b]
                half_log_two_pi_scale = half_log_two_pi_scales[b]
                logpdf_1 = -half_log_two_pi_scale - ((value - location)**2) / two_scale
                logpdf_2 = -half_log_two_pi_scale - ((value + location)**2) / two_scale
                l -= util.logsumexp(torch.cat([logpdf_1, logpdf_2]))
        return l
nn.py 文件源码 项目:pyprob 作者: probprog 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def loss(self, x, samples):
        _, proposal_output = self.forward(x, samples)
        batch_size = len(samples)
        modes = proposal_output[:, 0]
        certainties = proposal_output[:, 1] + 2
        alphas = modes * (certainties - 2) + 1
        betas = (1 - modes) * (certainties - 2) + 1
        beta_funs = util.beta(alphas, betas)
        l = 0
        for b in range(batch_size):
            value = samples[b].value[0]
            alpha = alphas[b]
            beta = betas[b]
            beta_fun = beta_funs[b]
            l -= (alpha - 1) * np.log(value + util.epsilon) + (beta - 1) * np.log(1 - value + util.epsilon) - torch.log(beta_fun + util.epsilon)
        return l
nn.py 文件源码 项目:pyprob 作者: probprog 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, input_example_non_batch, output_dim, reshape=None, dropout=0):
        super(ObserveEmbeddingCNN2D6C, self).__init__()
        self.reshape = reshape
        if self.reshape is not None:
            input_example_non_batch = input_example_non_batch.view(self.reshape)
            self.reshape.insert(0, -1) # For correct handling of the batch dimension in self.forward
        if input_example_non_batch.dim() == 2:
            self.input_sample = input_example_non_batch.unsqueeze(0).cpu()
        elif input_example_non_batch.dim() == 3:
            self.input_sample = input_example_non_batch.cpu()
        else:
            util.logger.log('ObserveEmbeddingCNN2D6C: Expecting a 3d input_example_non_batch (num_channels x height x width) or a 2d input_example_non_batch (height x width). Received: {0}'.format(input_example_non_batch.size()))
        self.input_channels = self.input_sample.size(0)
        self.output_dim = output_dim
        self.conv1 = nn.Conv2d(self.input_channels, 64, 3)
        self.conv2 = nn.Conv2d(64, 64, 3)
        self.conv3 = nn.Conv2d(64, 128, 3)
        self.conv4 = nn.Conv2d(128, 128, 3)
        self.conv5 = nn.Conv2d(128, 128, 3)
        self.conv6 = nn.Conv2d(128, 128, 3)
        self.drop = nn.Dropout(dropout)
nn.py 文件源码 项目:pyprob 作者: probprog 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def __init__(self, input_example_non_batch, output_dim, reshape=None, dropout=0):
        super(ObserveEmbeddingCNN3D4C, self).__init__()
        self.reshape = reshape
        if self.reshape is not None:
            input_example_non_batch = input_example_non_batch.view(self.reshape)
            self.reshape.insert(0, -1) # For correct handling of the batch dimension in self.forward
        if input_example_non_batch.dim() == 3:
            self.input_sample = input_example_non_batch.unsqueeze(0).cpu()
        elif input_example_non_batch.dim() == 4:
            self.input_sample = input_example_non_batch.cpu()
        else:
            util.logger.log('ObserveEmbeddingCNN3D4C: Expecting a 4d input_example_non_batch (num_channels x depth x height x width) or a 3d input_example_non_batch (depth x height x width). Received: {0}'.format(input_example_non_batch.size()))
        self.input_channels = self.input_sample.size(0)
        self.output_dim = output_dim
        self.conv1 = nn.Conv3d(self.input_channels, 64, 3)
        self.conv2 = nn.Conv3d(64, 64, 3)
        self.conv3 = nn.Conv3d(64, 128, 3)
        self.conv4 = nn.Conv3d(128, 128, 3)
        self.drop = nn.Dropout(dropout)
nn.py 文件源码 项目:pyprob 作者: probprog 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def set_observe_embedding(self, example_observes, obs_emb, obs_emb_dim, obs_reshape=None):
        self.obs_emb = obs_emb
        self.obs_emb_dim = obs_emb_dim
        if obs_emb == 'fc':
            observe_layer = ObserveEmbeddingFC(Variable(example_observes), obs_emb_dim, dropout=self.dropout)
        elif obs_emb == 'cnn1d2c':
            observe_layer = ObserveEmbeddingCNN1D2C(Variable(example_observes), obs_emb_dim, dropout=self.dropout)
            observe_layer.configure()
        elif obs_emb == 'cnn2d6c':
            observe_layer = ObserveEmbeddingCNN2D6C(Variable(example_observes), obs_emb_dim, obs_reshape, dropout=self.dropout)
            observe_layer.configure()
        elif obs_emb == 'cnn3d4c':
            observe_layer = ObserveEmbeddingCNN3D4C(Variable(example_observes), obs_emb_dim, obs_reshape, dropout=self.dropout)
            observe_layer.configure()
        elif obs_emb == 'lstm':
            observe_layer = ObserveEmbeddingLSTM(Variable(example_observes), obs_emb_dim, dropout=self.dropout)
        else:
            util.logger.log('set_observe_embedding: Unsupported observation embedding: ' + obs_emb)

        self.observe_layer = observe_layer


问题


面经


文章

微信
公众号

扫码关注公众号