python类exp()的实例源码

nonlinearity.py 文件源码 项目:paysage 作者: drckf 项目源码 文件源码 阅读 65 收藏 0 点赞 0 评论 0
def softmax(x: T.Tensor) -> T.Tensor:
    """
    Softmax function on a tensor.
    Exponentiaties the tensor elementwise and divides
        by the sum along axis=1.

    Args:
        x: A tensor.

    Returns:
        tensor: Softmax of the tensor.

    """
    xreg = matrix.subtract(matrix.tmax(x, axis=1, keepdims=True), x)
    y = torch.exp(xreg)
    return matrix.divide(matrix.tsum(y, axis=1, keepdims=True), y)
nonlinearity.py 文件源码 项目:paysage 作者: drckf 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def logaddexp(x1: T.FloatTensor, x2: T.FloatTensor) -> T.FloatTensor:
    """
    Elementwise logaddexp function: log(exp(x1) + exp(x2))

    Args:
        x1: A tensor.
        x2: A tensor.

    Returns:
        tensor: Elementwise logaddexp.

    """
    # log(exp(x1) + exp(x2))
    # = log( exp(x1) (1 + exp(x2 - x1))) = x1 + log(1 + exp(x2 - x1))
    # = log( exp(x2) (exp(x1 - x2) + 1)) = x2 + log(1 + exp(x1 - x2))
    diff = torch.min(x2 - x1, x1 - x2)
    return torch.max(x1, x2) + torch.log1p(exp(diff))
multibox_loss.py 文件源码 项目:DSOD-Pytorch-Implementation 作者: Ellinier 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def cross_entropy_loss(self, x, y):
        '''Cross entropy loss w/o averaging across all samples.

        Args:
          x: (tensor) sized [N,D].
          y: (tensor) sized [N,].

        Return:
          (tensor) cross entroy loss, sized [N,].
        '''
        # print(x.size()) # [8732, 16]
        xmax = x.data.max()
        # print(x.data.size()) # [8732, 16]
        # print(xmax.size()) # max--float object
        log_sum_exp = torch.log(torch.sum(torch.exp(x-xmax), 1)) + xmax
        # print(log_sum_exp.size()) # [8732,]
        # print(x.gather(1, y.view(-1,1)).size()) # [8732, 1]
        # print((log_sum_exp.view(-1, 1) - x.gather(1, y.view(-1,1))).size())
        return log_sum_exp.view(-1, 1) - x.gather(1, y.view(-1,1))
encoder.py 文件源码 项目:DSOD-Pytorch-Implementation 作者: Ellinier 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def decode(self, loc, conf):
        '''Transform predicted loc/conf back to real bbox locations and class labels.

        Args:
          loc: (tensor) predicted loc, sized [8732,4].
          conf: (tensor) predicted conf, sized [8732,21].

        Returns:
          boxes: (tensor) bbox locations, sized [#obj, 4].
          labels: (tensor) class labels, sized [#obj,1].
        '''
        variances = self.variances
        wh = torch.exp(loc[:,2:]*variances[1]) * self.default_boxes[:,2:]
        cxcy = loc[:,:2] * variances[0] * self.default_boxes[:,2:] + self.default_boxes[:,:2]
        boxes = torch.cat([cxcy-wh/2, cxcy+wh/2], 1)  # [8732,4]

        max_conf, labels = conf.max(1)  # [8732,1]
        ids = labels.squeeze(1).nonzero()
    if ids.numel() == 0:
        return None, None, None
        ids.squeeze_(1)  # [#boxes,]

        keep = self.nms(boxes[ids], max_conf[ids].squeeze(1), threshold=0.3)
        return boxes[ids][keep], labels[ids][keep]-1, max_conf[ids][keep]
trainer.py 文件源码 项目:TreeLSTMSentiment 作者: ttpro1995 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test(self, dataset):
        self.model.eval()
        self.embedding_model.eval()
        loss = 0
        predictions = torch.zeros(len(dataset))
        predictions = predictions
        indices = torch.range(1,dataset.num_classes)
        for idx in tqdm(xrange(len(dataset)),desc='Testing epoch  '+str(self.epoch)+''):
            tree, sent, label = dataset[idx]
            input = Var(sent, volatile=True)
            target = Var(map_label_to_target_sentiment(label,dataset.num_classes, fine_grain=self.args.fine_grain), volatile=True)
            if self.args.cuda:
                input = input.cuda()
                target = target.cuda()
            emb = F.torch.unsqueeze(self.embedding_model(input),1)
            output, _ = self.model(tree, emb) # size(1,5)
            err = self.criterion(output, target)
            loss += err.data[0]
            output[:,1] = -9999 # no need middle (neutral) value
            val, pred = torch.max(output, 1)
            predictions[idx] = pred.data.cpu()[0][0]
            # predictions[idx] = torch.dot(indices,torch.exp(output.data.cpu()))
        return loss/len(dataset), predictions
trainer.py 文件源码 项目:TreeLSTMSentiment 作者: ttpro1995 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test(self, dataset):
        self.model.eval()
        loss = 0
        predictions = torch.zeros(len(dataset))
        indices = torch.range(1,dataset.num_classes)
        for idx in tqdm(xrange(len(dataset)),desc='Testing epoch  '+str(self.epoch)+''):
            ltree,lsent,rtree,rsent,label = dataset[idx]
            linput, rinput = Var(lsent, volatile=True), Var(rsent, volatile=True)
            target = Var(map_label_to_target(label,dataset.num_classes), volatile=True)
            if self.args.cuda:
                linput, rinput = linput.cuda(), rinput.cuda()
                target = target.cuda()
            output = self.model(ltree,linput,rtree,rinput)
            err = self.criterion(output, target)
            loss += err.data[0]
            predictions[idx] = torch.dot(indices,torch.exp(output.data.cpu()))
        return loss/len(dataset), predictions
algos_utils.py 文件源码 项目:drl.pth 作者: seba-1511 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def gauss_log_prob(means, logstds, x):
    var = th.exp(2 * logstds)
    top = (-(x - means)**2)
    bottom = (2 * var) - 0.5 * LOG2PI - logstds
    gp = top / bottom 
    return th.sum(gp, dim=1)
actor_critic.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def softmax(self):
        numers = torch.exp(Tensor([self.weight @ self.feature(self.state, a) for a in range(self.action_size)]))
        return numers / sum(numers)
REINFORCE.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def softmax(self):
        numers = torch.exp(Tensor([self.weight @ self.feature(self.state, a) for a in range(self.action_size)]))
        return numers / sum(numers)
Exp.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def updateOutput(self, input):
        return torch.exp(self.output, input)
box_utils.py 文件源码 项目:ssd.pytorch 作者: amdegroot 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def log_sum_exp(x):
    """Utility function for computing log_sum_exp while determining
    This will be used to determine unaveraged confidence loss across
    all examples in a batch.
    Args:
        x (Variable(tensor)): conf_preds from conf layers
    """
    x_max = x.data.max()
    return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max


# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
kernel.py 文件源码 项目:sef 作者: passalis 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def symbolic_kernel(self, X):
        if self.kernel_type == 'linear':
            K = self.alpha * torch.dot(X, self.X_kernel.transpose(0, 1)) + self.c
        elif self.kernel_type == 'poly':
            K = (self.alpha * torch.dot(X, self.X_kernel.transpose(0, 1)) + self.c) ** self.degree
        elif self.kernel_type == 'rbf':
            D = sym_distance_matrix(X, self.X_kernel, self_similarity=False)
            K = torch.exp(-D ** 2 / (self.sigma_kernel ** 2))
        else:
            raise Exception('Unknown kernel type: ', self.kernel_type)
        return K
similarity.py 文件源码 项目:sef 作者: passalis 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def sym_heat_similarity_matrix(X, sigma):
    """
    Defines the self similarity matrix using the heat kernel
    :param X:
    :param sigma:
    :return:
    """
    D = sym_distance_matrix(X, X, self_similarity=True)
    return torch.exp(-D ** 2 / (sigma ** 2))
model.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def log_sum_exp(input, keepdim=False):
    assert input.dim() == 2
    max_scores, _ = input.max(dim=-1, keepdim=True)
    output = input - max_scores.expand_as(input)
    return max_scores + torch.log(torch.sum(torch.exp(output), dim=-1, keepdim=keepdim))
sketch_rnn.py 文件源码 项目:Pytorch-Sketch-RNN 作者: alexis-jacq 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def forward(self, inputs, z, hidden_cell=None):
        if hidden_cell is None:
            # then we must init from z
            hidden,cell = torch.split(F.tanh(self.fc_hc(z)),hp.dec_hidden_size,1)
            hidden_cell = (hidden.unsqueeze(0).contiguous(), cell.unsqueeze(0).contiguous())
        outputs,(hidden,cell) = self.lstm(inputs, hidden_cell)
        # in training we feed the lstm with the whole input in one shot
        # and use all outputs contained in 'outputs', while in generate
        # mode we just feed with the last generated sample:
        if self.training:
            y = self.fc_params(outputs.view(-1, hp.dec_hidden_size))
        else:
            y = self.fc_params(hidden.view(-1, hp.dec_hidden_size))
        # separate pen and mixture params:
        params = torch.split(y,6,1)
        params_mixture = torch.stack(params[:-1]) # trajectory
        params_pen = params[-1] # pen up/down
        # identify mixture params:
        pi,mu_x,mu_y,sigma_x,sigma_y,rho_xy = torch.split(params_mixture,1,2)
        # preprocess params::
        if self.training:
            len_out = Nmax+1
        else:
            len_out = 1
        pi = F.softmax(pi.t().squeeze()).view(len_out,-1,hp.M)
        sigma_x = torch.exp(sigma_x.t().squeeze()).view(len_out,-1,hp.M)
        sigma_y = torch.exp(sigma_y.t().squeeze()).view(len_out,-1,hp.M)
        rho_xy = torch.tanh(rho_xy.t().squeeze()).view(len_out,-1,hp.M)
        mu_x = mu_x.t().squeeze().contiguous().view(len_out,-1,hp.M)
        mu_y = mu_y.t().squeeze().contiguous().view(len_out,-1,hp.M)
        q = F.softmax(params_pen).view(len_out,-1,3)
        return pi,mu_x,mu_y,sigma_x,sigma_y,rho_xy,q,hidden,cell
sketch_rnn.py 文件源码 项目:Pytorch-Sketch-RNN 作者: alexis-jacq 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def bivariate_normal_pdf(self, dx, dy):
        z_x = ((dx-self.mu_x)/self.sigma_x)**2
        z_y = ((dy-self.mu_y)/self.sigma_y)**2
        z_xy = (dx-self.mu_x)*(dy-self.mu_y)/(self.sigma_x*self.sigma_y)
        z = z_x + z_y -2*self.rho_xy*z_xy
        exp = torch.exp(-z/(2*(1-self.rho_xy**2)))
        norm = 2*np.pi*self.sigma_x*self.sigma_y*torch.sqrt(1-self.rho_xy**2)
        return exp/norm
sketch_rnn.py 文件源码 项目:Pytorch-Sketch-RNN 作者: alexis-jacq 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def kullback_leibler_loss(self):
        LKL = -0.5*torch.sum(1+self.sigma-self.mu**2-torch.exp(self.sigma))\
            /float(hp.Nz*hp.batch_size)
        if use_cuda:
            KL_min = Variable(torch.Tensor([hp.KL_min]).cuda()).detach()
        else:
            KL_min = Variable(torch.Tensor([hp.KL_min])).detach()
        return hp.wKL*self.eta_step * torch.max(LKL,KL_min)
sketch_rnn.py 文件源码 项目:Pytorch-Sketch-RNN 作者: alexis-jacq 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def sample_next_state(self):

        def adjust_temp(pi_pdf):
            pi_pdf = np.log(pi_pdf)/hp.temperature
            pi_pdf -= pi_pdf.max()
            pi_pdf = np.exp(pi_pdf)
            pi_pdf /= pi_pdf.sum()
            return pi_pdf

        # get mixture indice:
        pi = self.pi.data[0,0,:].cpu().numpy()
        pi = adjust_temp(pi)
        pi_idx = np.random.choice(hp.M, p=pi)
        # get pen state:
        q = self.q.data[0,0,:].cpu().numpy()
        q = adjust_temp(q)
        q_idx = np.random.choice(3, p=q)
        # get mixture params:
        mu_x = self.mu_x.data[0,0,pi_idx]
        mu_y = self.mu_y.data[0,0,pi_idx]
        sigma_x = self.sigma_x.data[0,0,pi_idx]
        sigma_y = self.sigma_y.data[0,0,pi_idx]
        rho_xy = self.rho_xy.data[0,0,pi_idx]
        x,y = sample_bivariate_normal(mu_x,mu_y,sigma_x,sigma_y,rho_xy,greedy=False)
        next_state = torch.zeros(5)
        next_state[0] = x
        next_state[1] = y
        next_state[q_idx+2] = 1
        if use_cuda:
            return Variable(next_state.cuda()).view(1,1,-1),x,y,q_idx==1,q_idx==2
        else:
            return Variable(next_state).view(1,1,-1),x,y,q_idx==1,q_idx==2
vae.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def forward(self, x):
        # define the forward computation on the image x
        # first shape the mini-batch to have pixels in the rightmost dimension
        x = x.view(-1, 784)
        # then compute the hidden units
        hidden = self.softplus(self.fc1(x))
        # then return a mean vector and a (positive) square root covariance
        # each of size batch_size x z_dim
        z_mu = self.fc21(hidden)
        z_sigma = torch.exp(self.fc22(hidden))
        return z_mu, z_sigma


# define the PyTorch module that parameterizes the
# observation likelihood p(x|z)
vae_comparison.py 文件源码 项目:pyro 作者: uber 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def forward(self, x):
        x = x.view(-1, 784)
        h1 = self.relu(self.fc1(x))
        return self.fc21(h1), torch.exp(self.fc22(h1))


# VAE Decoder network


问题


面经


文章

微信
公众号

扫码关注公众号