python类sqrt()的实例源码

circle.py 文件源码 项目:MIT-CS-lectures 作者: William-Python-King 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def moveOrigin(self, newOrigin, dist=1):
        dist = math.sqrt((origin.x - newOrigin.x)**2 +
                         (origin.y - newOrigin.y)**2)  # ??????
        xDist = origin.x - newOrigin.x  # ????
        yDist = origin.y - newOrigin.y  # ????
        ratio = dist / distance
        xMove = abs(xDist) * ratio
        yMove = abs(yDist) * ratio
        if xDist > 0:
            newX = origin.x - xMove
        else:
            newX = origin.x + xMove
        if yDist > 0:
            newY = origin.y - yMove
        else:
            newY = origin.y + yMove
        return (newX, newY)
init.py 文件源码 项目:cnn-graph-classification 作者: giannisnik 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def xavier_uniform(tensor, gain=1):
    """Fills the input Tensor or Variable with values according to the method
    described in "Understanding the difficulty of training deep feedforward
    neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform
    distribution. The resulting tensor will have values sampled from
    :math:`U(-a, a)` where
    :math:`a = gain \\times \sqrt{2 / (fan\_in + fan\_out)} \\times \sqrt{3}`.
    Also known as Glorot initialisation.

    Args:
        tensor: an n-dimensional torch.Tensor or autograd.Variable
        gain: an optional scaling factor

    Examples:
        >>> w = torch.Tensor(3, 5)
        >>> nn.init.xavier_uniform(w, gain=nn.init.calculate_gain('relu'))
    """
    if isinstance(tensor, Variable):
        xavier_uniform(tensor.data, gain=gain)
        return tensor

    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = gain * math.sqrt(2.0 / (fan_in + fan_out))
    a = math.sqrt(3.0) * std  # Calculate uniform bounds from standard deviation
    return tensor.uniform_(-a, a)
init.py 文件源码 项目:cnn-graph-classification 作者: giannisnik 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def xavier_normal(tensor, gain=1):
    """Fills the input Tensor or Variable with values according to the method
    described in "Understanding the difficulty of training deep feedforward
    neural networks" - Glorot, X. & Bengio, Y. (2010), using a normal
    distribution. The resulting tensor will have values sampled from
    :math:`N(0, std)` where
    :math:`std = gain \\times \sqrt{2 / (fan\_in + fan\_out)}`.
    Also known as Glorot initialisation.

    Args:
        tensor: an n-dimensional torch.Tensor or autograd.Variable
        gain: an optional scaling factor

    Examples:
        >>> w = torch.Tensor(3, 5)
        >>> nn.init.xavier_normal(w)
    """
    if isinstance(tensor, Variable):
        xavier_normal(tensor.data, gain=gain)
        return tensor

    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = gain * math.sqrt(2.0 / (fan_in + fan_out))
    return tensor.normal_(0, std)
random.py 文件源码 项目:python- 作者: secondtonone1 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _test_generator(n, func, args):
    import time
    print(n, 'times', func.__name__)
    total = 0.0
    sqsum = 0.0
    smallest = 1e10
    largest = -1e10
    t0 = time.time()
    for i in range(n):
        x = func(*args)
        total += x
        sqsum = sqsum + x*x
        smallest = min(x, smallest)
        largest = max(x, largest)
    t1 = time.time()
    print(round(t1-t0, 3), 'sec,', end=' ')
    avg = total/n
    stddev = _sqrt(sqsum/n - avg*avg)
    print('avg %g, stddev %g, min %g, max %g\n' % \
              (avg, stddev, smallest, largest))
crystal.py 文件源码 项目:lammps-data-file 作者: kbsezginel 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def unit_cell_volume(self):
        """
        Calculates unit cell volume of a given MOF object.
        """
        a = self.uc_size[0]
        b = self.uc_size[1]
        c = self.uc_size[2]
        alp = math.radians(self.uc_angle[0])
        bet = math.radians(self.uc_angle[1])
        gam = math.radians(self.uc_angle[2])

        volume = 1 - math.cos(alp)**2 - math.cos(bet)**2 - math.cos(gam)**2
        volume += 2 * math.cos(alp) * math.cos(bet) * math.cos(gam)
        volume = a * b * c * math.sqrt(volume)
        frac_volume = volume / (a * b * c)

        self.ucv = volume
        self.frac_ucv = frac_volume
yellowfin.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def dist_to_opt(self):
    global_state = self._global_state
    beta = self._beta
    if self._iter == 0:
      global_state["grad_norm_avg"] = 0.0
      global_state["dist_to_opt_avg"] = 0.0
    global_state["grad_norm_avg"] = \
      global_state["grad_norm_avg"] * beta + (1 - beta) * math.sqrt(global_state["grad_norm_squared"] )
    global_state["dist_to_opt_avg"] = \
      global_state["dist_to_opt_avg"] * beta \
      + (1 - beta) * global_state["grad_norm_avg"] / (global_state['grad_norm_squared_avg'] + eps)
    if self._zero_debias:
      debias_factor = self.zero_debias_factor()
      self._dist_to_opt = global_state["dist_to_opt_avg"] / debias_factor
    else:
      self._dist_to_opt = global_state["dist_to_opt_avg"]
    if self._sparsity_debias:
      self._dist_to_opt /= (np.sqrt(self._sparsity_avg) + eps)
    return
yellowfin.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def lr_grad_norm_avg(self):
    # this is for enforcing lr * grad_norm not 
    # increasing dramatically in case of instability.
    #  Not necessary for basic use.
    global_state = self._global_state
    beta = self._beta
    if "lr_grad_norm_avg" not in global_state:
      global_state['grad_norm_squared_avg_log'] = 0.0
    global_state['grad_norm_squared_avg_log'] = \
      global_state['grad_norm_squared_avg_log'] * beta \
      + (1 - beta) * np.log(global_state['grad_norm_squared'] + eps)
    if "lr_grad_norm_avg" not in global_state:
      global_state["lr_grad_norm_avg"] = \
        0.0 * beta + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      # we monitor the minimal smoothed ||lr * grad||
      global_state["lr_grad_norm_avg_min"] = \
        np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() )
    else:
      global_state["lr_grad_norm_avg"] = global_state["lr_grad_norm_avg"] * beta \
        + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      global_state["lr_grad_norm_avg_min"] = \
        min(global_state["lr_grad_norm_avg_min"], 
            np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() ) )
yellowfin_backup.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def dist_to_opt(self):
    global_state = self._global_state
    beta = self._beta
    if self._iter == 0:
      global_state["grad_norm_avg"] = 0.0
      global_state["dist_to_opt_avg"] = 0.0
    global_state["grad_norm_avg"] = \
      global_state["grad_norm_avg"] * beta + (1 - beta) * math.sqrt(global_state["grad_norm_squared"] )
    global_state["dist_to_opt_avg"] = \
      global_state["dist_to_opt_avg"] * beta \
      + (1 - beta) * global_state["grad_norm_avg"] / (global_state['grad_norm_squared_avg'] + eps)
    if self._zero_debias:
      debias_factor = self.zero_debias_factor()
      self._dist_to_opt = global_state["dist_to_opt_avg"] / debias_factor
    else:
      self._dist_to_opt = global_state["dist_to_opt_avg"]
    if self._sparsity_debias:
      self._dist_to_opt /= (np.sqrt(self._sparsity_avg) + eps)
    return
yellowfin_backup.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def lr_grad_norm_avg(self):
    # this is for enforcing lr * grad_norm not 
    # increasing dramatically in case of instability.
    #  Not necessary for basic use.
    global_state = self._global_state
    beta = self._beta
    if "lr_grad_norm_avg" not in global_state:
      global_state['grad_norm_squared_avg_log'] = 0.0
    global_state['grad_norm_squared_avg_log'] = \
      global_state['grad_norm_squared_avg_log'] * beta \
      + (1 - beta) * np.log(global_state['grad_norm_squared'] + eps)
    if "lr_grad_norm_avg" not in global_state:
      global_state["lr_grad_norm_avg"] = \
        0.0 * beta + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      # we monitor the minimal smoothed ||lr * grad||
      global_state["lr_grad_norm_avg_min"] = \
        np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() )
    else:
      global_state["lr_grad_norm_avg"] = global_state["lr_grad_norm_avg"] * beta \
        + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps)
      global_state["lr_grad_norm_avg_min"] = \
        min(global_state["lr_grad_norm_avg_min"], 
            np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() ) )
yellowfin_backup.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_cubic_root(self):
    # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
    # where x = sqrt(mu).
    # We substitute x, which is sqrt(mu), with x = y + 1.
    # It gives y^3 + py = q
    # where p = (D^2 h_min^2)/(2*C) and q = -p.
    # We use the Vieta's substution to compute the root.
    # There is only one real solution y (which is in [0, 1] ).
    # http://mathworld.wolfram.com/VietasSubstitution.html
    # eps in the numerator is to prevent momentum = 1 in case of zero gradient
    p = (self._dist_to_opt + eps)**2 * (self._h_min + eps)**2 / 2 / (self._grad_var + eps)
    w3 = (-math.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
    w = math.copysign(1.0, w3) * math.pow(math.fabs(w3), 1.0/3.0)
    y = w - p / 3.0 / (w + eps)
    x = y + 1

    if DEBUG:
      logging.debug("p %f, den %f", p, self._grad_var + eps)
      logging.debug("w3 %f ", w3)
      logging.debug("y %f, den %f", y, w + eps)

    return x
yellowfin_backup.py 文件源码 项目:YellowFin_Pytorch 作者: JianGoForIt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def update_hyper_param(self):
    for group in self._optimizer.param_groups:
      group['momentum'] = self._mu
      if self._force_non_inc_step == False:
        group['lr'] = min(self._lr * self._lr_factor, 
          self._lr_grad_norm_thresh / (math.sqrt(self._global_state["grad_norm_squared"] ) + eps) )
      elif self._iter > self._curv_win_width:
        # force to guarantee lr * grad_norm not increasing dramatically. 
        # Not necessary for basic use. Please refer to the comments
        # in YFOptimizer.__init__ for more details
        self.lr_grad_norm_avg()
        debias_factor = self.zero_debias_factor()
        group['lr'] = min(self._lr * self._lr_factor,
          2.0 * self._global_state["lr_grad_norm_avg_min"] \
          / (np.sqrt(np.exp(self._global_state['grad_norm_squared_avg_log'] / debias_factor) ) + eps) )
    return
layers.py 文件源码 项目:fxnn 作者: khaotik 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def lyr_linear(
        self, name_,
        s_x_,
        idim_, odim_,
        init_=None, bias_=0., params_di_='params'):
        '''
        dense matrix multiplication, optionally adding a bias vector
        '''
        name_W = name_+'_w'
        name_B = name_+'_b'
        self.set_vars(params_di_)
        if init_ is None:
            init_ = dict(init_=[1.4/sqrt(idim_+odim_)])
        v_W = self.get_variable(name_W, (idim_,odim_), **init_)
        if bias_ is None:
            s_ret = T.dot(s_x_, v_W)
        else:
            v_B = self.get_variable(name_B, (odim_,), bias_)
            s_ret = T.dot(s_x_, v_W) + v_B
        return s_ret
rasterfairy.py 文件源码 项目:RasterFairy 作者: Quasimondo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def rasterMaskToGrid( rasterMask ):
    grid = []
    mask = rasterMask['mask']
    for y in range(rasterMask['height']):
        for x in range(rasterMask['width']):
            if mask[y,x]==0:
                grid.append([x,y])

    grid = np.array(grid,dtype=np.float)
    if not (rasterMask is None) and rasterMask['hex'] is True:
        f = math.sqrt(3.0)/2.0 
        offset = -0.5
        if np.argmin(rasterMask['mask'][0]) > np.argmin(rasterMask['mask'][1]):
            offset = 0.5
        for i in range(len(grid)):
            if (grid[i][1]%2.0==0.0):
                grid[i][0]-=offset
            grid[i][1] *= f
    return grid
rasterfairy.py 文件源码 项目:RasterFairy 作者: Quasimondo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def getBestCircularMatch(n):
    bestc = n*2
    bestr = 0
    bestrp = 0.0

    minr = int(math.sqrt(n / math.pi))
    for rp in range(0,10):
        rpf = float(rp)/10.0
        for r in range(minr,minr+3):
            rlim = (r+rpf)*(r+rpf)
            c = 0
            for y in range(-r,r+1):
                yy = y*y
                for x in range(-r,r+1):
                    if x*x+yy<rlim:
                        c+=1
            if c == n:
                return r,rpf,c

            if c>n and c < bestc:
                bestrp = rpf
                bestr = r
                bestc = c
    return bestr,bestrp,bestc
utils.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def fit_transform(self, X, y=None):
        """Fit the model with X and apply the dimensionality reduction on X.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data, where n_samples is the number of samples
            and n_features is the number of features.
        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)
        """
        U, S, V = self._fit(X)
        U = U[:, :int(self.n_components_)]

        if self.whiten:
            # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
            U *= sqrt(X.shape[0])
        else:
            # X_new = X * V = U * S * V^T * V = U * S
            U *= S[:int(self.n_components_)]

        return U
utils.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_covariance(self):
        """Compute data covariance with the generative model.
        ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
        where  S**2 contains the explained variances.
        Returns
        -------
        cov : array, shape=(n_features, n_features)
            Estimated covariance of data.
        """
        components_ = self.components_
        exp_var = self.explained_variance_
        if self.whiten:
            components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
        exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
        cov = np.dot(components_.T * exp_var_diff, components_)
        cov.flat[::len(cov) + 1] += self.noise_variance_  # modify diag inplace
        return cov
utils.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def transform(self, X):
        """Apply the dimensionality reduction on X.
        X is projected on the first principal components previous extracted
        from a training set.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            New data, where n_samples is the number of samples
            and n_features is the number of features.
        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)
        """
        check_is_fitted(self, 'mean_')

        X = check_array(X)
        if self.mean_ is not None:
            X = X - self.mean_
        X_transformed = np.dot(X, self.components_.T)
        if self.whiten:
            X_transformed /= np.sqrt(self.explained_variance_)
        return X_transformed
labels_rbm.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
        """Creates a logistic model.

        Args:
          model_input: 'batch' x 'num_features' matrix of input features.
          vocab_size: The number of classes in the dataset.

        Returns:
          A dictionary with a tensor containing the probability predictions of the
          model in the 'predictions' key. The dimensions of the tensor are
          batch_size x num_classes."""

        input_size = vocab_size
        output_size = FLAGS.hidden_size
        with tf.name_scope("rbm"):
            self.weights = tf.Variable(tf.truncated_normal([input_size, output_size],
                                    stddev=1.0 / math.sqrt(float(input_size))), name="weights")
            self.v_bias = tf.Variable(tf.zeros([input_size]), name="v_bias")
            self.h_bias = tf.Variable(tf.zeros([output_size]), name="h_bias")
            tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.weights))
            tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.v_bias))
            tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.h_bias))
model.py 文件源码 项目:hippylib 作者: hippylib 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def evalGradientParameter(self,x, mg):
        """
        Evaluate the gradient for the variational parameter equation at the point x=[u,a,p].
        Parameters:
        - x = [u,a,p] the point at which to evaluate the gradient.
        - mg the variational gradient (g, atest) being atest a test function in the parameter space
          (Output parameter)

        Returns the norm of the gradient in the correct inner product g_norm = sqrt(g,g)
        """ 
        self.prior.grad(x[PARAMETER], mg)
        tmp = self.generate_vector(PARAMETER)
        self.problem.eval_da(x, tmp)
        mg.axpy(1., tmp)
        self.misfit.grad(PARAMETER,x,tmp)
        mg.axpy(1., tmp)
        self.prior.Msolver.solve(tmp, mg)
        #self.prior.Rsolver.solve(tmp, mg)
        return math.sqrt(mg.inner(tmp))
cumulative.py 文件源码 项目:zipline-chinese 作者: zhanghan1990 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def information_ratio(algo_volatility, algorithm_return, benchmark_return):
    """
    http://en.wikipedia.org/wiki/Information_ratio

    Args:
        algorithm_returns (np.array-like):
            All returns during algorithm lifetime.
        benchmark_returns (np.array-like):
            All benchmark returns during algo lifetime.

    Returns:
        float. Information ratio.
    """
    if zp_math.tolerant_equals(algo_volatility, 0):
        return np.nan

    # The square of the annualization factor is in the volatility,
    # because the volatility is also annualized,
    # i.e. the sqrt(annual factor) is in the volatility's numerator.
    # So to have the the correct annualization factor for the
    # Sharpe value's numerator, which should be the sqrt(annual factor).
    # The square of the sqrt of the annual factor, i.e. the annual factor
    # itself, is needed in the numerator to factor out the division by
    # its square root.
    return (algorithm_return - benchmark_return) / algo_volatility
random.py 文件源码 项目:kinect-2-libras 作者: inessadl 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _test_generator(n, func, args):
    import time
    print n, 'times', func.__name__
    total = 0.0
    sqsum = 0.0
    smallest = 1e10
    largest = -1e10
    t0 = time.time()
    for i in range(n):
        x = func(*args)
        total += x
        sqsum = sqsum + x*x
        smallest = min(x, smallest)
        largest = max(x, largest)
    t1 = time.time()
    print round(t1-t0, 3), 'sec,',
    avg = total/n
    stddev = _sqrt(sqsum/n - avg*avg)
    print 'avg %g, stddev %g, min %g, max %g' % \
              (avg, stddev, smallest, largest)
recommendations.py 文件源码 项目:Programming-Collective-Intelligence 作者: clyyuanzi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def sim_distance(prefs,person1,person2):
    # shared-items ??    
    si ={}
    for item in prefs[person1]:
        if item in prefs[person2]:
            si[item]=1

    #??????????????0
    if(len(si)==0):
        return 0

    #??share-items???????
    sum_of_squares = sum([pow(prefs[person1][item]-prefs[person2][item],2) 
                        for item in si])

    return 1/(1+sqrt(sum_of_squares))
clusters.py 文件源码 项目:Programming-Collective-Intelligence 作者: clyyuanzi 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def pearson(v1,v2):
    sum1 = sum(v1)
    sum2 = sum(v2)

    sum1Sq = sum([pow(v,2) for v in v1])
    sum2Sq = sum([pow(v,2) for v in v2])

    length = len(v1) if len(v1)<len(v2)  else len(v2)
    pSum = sum([v1[i]*v2[i] for i in range(length)])

    numerator = pSum-sum1*sum2/len(v1)
    denominator = sqrt(sum1Sq-pow(sum1,2)/len(v1))*(sum2Sq-pow(sum2,2)/len(v1))
    if denominator==0:
        return 0

    return 1-numerator/denominator
bezierTools.py 文件源码 项目:otRebuilder 作者: Pal3love 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def approximateQuadraticArcLengthC(pt1, pt2, pt3):
    # Approximate length of quadratic Bezier curve using Gauss-Legendre quadrature
    # with n=3 points for complex points.
    #
    # This, essentially, approximates the length-of-derivative function
    # to be integrated with the best-matching fifth-degree polynomial
    # approximation of it.
    #
    #https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Legendre_quadrature

    # abs(BezierCurveC[2].diff(t).subs({t:T})) for T in sorted(.5, .5±sqrt(3/5)/2),
    # weighted 5/18, 8/18, 5/18 respectively.
    v0 = abs(-0.492943519233745*pt1 + 0.430331482911935*pt2 + 0.0626120363218102*pt3)
    v1 = abs(pt3-pt1)*0.4444444444444444
    v2 = abs(-0.0626120363218102*pt1 - 0.430331482911935*pt2 + 0.492943519233745*pt3)

    return v0 + v1 + v2
restricted_gaussian_sampler.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, dimension, randn=np.random.randn, debug=False):
        """pass dimension of the underlying sample space
        """
        try:
            self.N = len(dimension)
            std_vec = np.array(dimension, copy=True)
        except TypeError:
            self.N = dimension
            std_vec = np.ones(self.N)
        if self.N < 10:
            print('Warning: Not advised to use VD-CMA for dimension < 10.')
        self.randn = randn
        self.dvec = std_vec
        self.vvec = self.randn(self.N) / math.sqrt(self.N)
        self.norm_v2 = np.dot(self.vvec, self.vvec)
        self.norm_v = np.sqrt(self.norm_v2)
        self.vn = self.vvec / self.norm_v
        self.vnn = self.vn**2
        self.pc = np.zeros(self.N)
        self._debug = debug  # plot covariance matrix
shape.py 文件源码 项目:sc8pr 作者: dmaccarthy 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def intersectCircle(self, other):
        "Find the intersection(s) of two circles as list of points"
        R = self.radius
        r = other.radius
        d = dist(self.pos, other.pos)
        if d > r + R or d == 0 or d < abs(r - R): return []
        r2 = r * r
        x = (d*d + r2 - R*R) / (2*d)
        ux, uy = delta(self.pos, other.pos, 1)
        x0, y0 = other.pos
        x0 += x * ux
        y0 += x * uy
        if x < r:
            y = sqrt(r2 - x*x)
            return [(x0 - y * uy, y0 + y * ux), (x0 + y * uy, y0 - y * ux)]
        else: return [(x0, y0)]
effect.py 文件源码 项目:sc8pr 作者: dmaccarthy 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def eqn(self, x, n, size):
        "Calculate paint boundary"
        if not self.side: n = 1 - n
        w, h = size
        y = 0
        xc = 0
        for d in self.drops:
            r = d[0] * w / 2
            R = 1.1 * r
            xc += r
            dx = abs(x - xc)
            if dx <= R:
                dy = sqrt(R * R - dx * dx)
                Y = (h + R) * self.posn(n, *d[1:]) + dy - R
                if Y > y: y = Y
            xc += r
        return y, self.side
recovery.py 文件源码 项目:astrobase 作者: waqasbhatti 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def matthews_correl_coeff(ntp, ntn, nfp, nfn):
    '''
    This calculates the Matthews correlation coefficent.

    https://en.wikipedia.org/wiki/Matthews_correlation_coefficient

    '''

    mcc_top = (ntp*ntn - nfp*nfn)
    mcc_bot = msqrt((ntp + nfp)*(ntp + nfn)*(ntn + nfp)*(ntn + nfn))

    if mcc_bot > 0:
        return mcc_top/mcc_bot
    else:
        return np.nan



#######################################
## VARIABILITY RECOVERY (PER MAGBIN) ##
#######################################
utils.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def combine_images(generated_images):

    total, width, height, ch = generated_images.shape
    cols = int(math.sqrt(total))
    rows = math.ceil(float(total)/cols)

    combined_image = np.zeros((height*rows, width*cols, 3),
                              dtype = generated_images.dtype)

    for index, image in enumerate(generated_images):
        i = int(index/cols)
        j = index % cols
        combined_image[width*i:width*(i+1), height*j:height*(j+1), :]\
            = image

    return combined_image
transformations.py 文件源码 项目:Neural-Networks-for-Inverse-Kinematics 作者: paramrajpura 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def quaternion_matrix(quaternion):
    """Return homogeneous rotation matrix from quaternion.

    >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
    >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
    True
    >>> M = quaternion_matrix([1, 0, 0, 0])
    >>> numpy.allclose(M, numpy.identity(4))
    True
    >>> M = quaternion_matrix([0, 1, 0, 0])
    >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
    True

    """
    q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
    n = numpy.dot(q, q)
    if n < _EPS:
        return numpy.identity(4)
    q *= math.sqrt(2.0 / n)
    q = numpy.outer(q, q)
    return numpy.array([
        [1.0-q[2, 2]-q[3, 3],     q[1, 2]-q[3, 0],     q[1, 3]+q[2, 0], 0.0],
        [    q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3],     q[2, 3]-q[1, 0], 0.0],
        [    q[1, 3]-q[2, 0],     q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
        [                0.0,                 0.0,                 0.0, 1.0]])


问题


面经


文章

微信
公众号

扫码关注公众号