python类sqrt()的实例源码

recognition_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def im_detect_and_describe(img, mask=None, detector='dense', descriptor='SIFT', colorspace='gray',
                           step=4, levels=7, scale=np.sqrt(2)): 
    """ 
    Describe image using dense sampling / specific detector-descriptor combination. 
    """
    detector = get_detector(detector=detector, step=step, levels=levels, scale=scale)
    extractor = cv2.DescriptorExtractor_create(descriptor)

    try:     
        kpts = detector.detect(img, mask=mask)
        kpts, desc = extractor.compute(img, kpts)

        if descriptor == 'SIFT': 
            kpts, desc = root_sift(kpts, desc)

        pts = np.vstack([kp.pt for kp in kpts]).astype(np.int32)
        return pts, desc

    except Exception as e: 
        print 'im_detect_and_describe', e
        return None, None
data_preparation.py 文件源码 项目:sea-lion-counter 作者: rdinse 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compHistDistance(h1, h2):
  def normalize(h):    
    if np.sum(h) == 0: 
        return h
    else:
        return h / np.sum(h)

  def smoothstep(x, x_min=0., x_max=1., k=2.):
      m = 1. / (x_max - x_min)
      b = - m * x_min
      x = m * x + b
      return betainc(k, k, np.clip(x, 0., 1.))

  def fn(X, Y, k):
    return 4. * (1. - smoothstep(Y, 0, (1 - Y) * X + Y + .1)) \
      * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) \
             + 2. * smoothstep(Y, 0, (1 - Y) * X + Y + .1) \
             * (1. - 2. * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) - 0.5)

  h1 = normalize(h1)
  h2 = normalize(h2)

  return max(0, np.sum(fn(h2, h1, len(h1))))
  # return np.sum(np.where(h2 != 0, h2 * np.log10(h2 / (h1 + 1e-10)), 0))  # KL divergence
mog6.py 文件源码 项目:a-nice-mc 作者: ermongroup 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, z):
        z1 = tf.reshape(tf.slice(z, [0, 0], [-1, 1]), [-1])
        z2 = tf.reshape(tf.slice(z, [0, 1], [-1, 1]), [-1])
        v1 = tf.sqrt((z1 - 5) * (z1 - 5) + z2 * z2) * 2
        v2 = tf.sqrt((z1 + 5) * (z1 + 5) + z2 * z2) * 2
        v3 = tf.sqrt((z1 - 2.5) * (z1 - 2.5) + (z2 - 2.5 * np.sqrt(3)) * (z2 - 2.5 * np.sqrt(3))) * 2
        v4 = tf.sqrt((z1 + 2.5) * (z1 + 2.5) + (z2 + 2.5 * np.sqrt(3)) * (z2 + 2.5 * np.sqrt(3))) * 2
        v5 = tf.sqrt((z1 - 2.5) * (z1 - 2.5) + (z2 + 2.5 * np.sqrt(3)) * (z2 + 2.5 * np.sqrt(3))) * 2
        v6 = tf.sqrt((z1 + 2.5) * (z1 + 2.5) + (z2 - 2.5 * np.sqrt(3)) * (z2 - 2.5 * np.sqrt(3))) * 2
        pdf1 = tf.exp(-0.5 * v1 * v1) / tf.sqrt(2 * np.pi * 0.25)
        pdf2 = tf.exp(-0.5 * v2 * v2) / tf.sqrt(2 * np.pi * 0.25)
        pdf3 = tf.exp(-0.5 * v3 * v3) / tf.sqrt(2 * np.pi * 0.25)
        pdf4 = tf.exp(-0.5 * v4 * v4) / tf.sqrt(2 * np.pi * 0.25)
        pdf5 = tf.exp(-0.5 * v5 * v5) / tf.sqrt(2 * np.pi * 0.25)
        pdf6 = tf.exp(-0.5 * v6 * v6) / tf.sqrt(2 * np.pi * 0.25)
        return -tf.log((pdf1 + pdf2 + pdf3 + pdf4 + pdf5 + pdf6) / 6)
LinUCBAgentClass.py 文件源码 项目:simple_rl 作者: david-abel 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _compute_score(self, context):
        '''
        Args:
            context (list)

        Returns:
            (dict):
                K (str): action
                V (float): score
        '''

        a_inv = self.model['act_inv']
        theta = self.model['theta']

        estimated_reward = {}
        uncertainty = {}
        score_dict = {}
        max_score = 0
        for action_id in xrange(len(self.actions)):
            action_context = np.reshape(context[action_id], (-1, 1))
            estimated_reward[action_id] = float(theta[action_id].T.dot(action_context))
            uncertainty[action_id] = float(self.alpha * np.sqrt(action_context.T.dot(a_inv[action_id]).dot(action_context)))
            score_dict[action_id] = estimated_reward[action_id] + uncertainty[action_id]

        return score_dict
rulsif.py 文件源码 项目:shift-detect 作者: paolodedios 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def getMedianDistanceBetweenSamples(self, sampleSet=None) :
        """
        Jaakkola's heuristic method for setting the width parameter of the Gaussian
        radial basis function kernel is to pick a quantile (usually the median) of
        the distribution of Euclidean distances between points having different
        labels.

        Reference:
        Jaakkola, M. Diekhaus, and D. Haussler. Using the Fisher kernel method to detect
        remote protein homologies. In T. Lengauer, R. Schneider, P. Bork, D. Brutlad, J.
        Glasgow, H.- W. Mewes, and R. Zimmer, editors, Proceedings of the Seventh
        International Conference on Intelligent Systems for Molecular Biology.
        """
        numrows = sampleSet.shape[0]
        samples = sampleSet

        G = sum((samples * samples), 1)
        Q = numpy.tile(G[:, None], (1, numrows))
        R = numpy.tile(G, (numrows, 1))

        distances = Q + R - 2 * numpy.dot(samples, samples.T)
        distances = distances - numpy.tril(distances)
        distances = distances.reshape(numrows**2, 1, order="F").copy()

        return numpy.sqrt(0.5 * numpy.median(distances[distances > 0]))
datasets.py 文件源码 项目:deep_architect 作者: negrinho 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def per_image_whiten(X):
    """ Subtracts the mean of each image in X and renormalizes them to unit norm.

    """
    num_examples, height, width, depth = X.shape

    X_flat = X.reshape((num_examples, -1))
    X_mean = X_flat.mean(axis=1)
    X_cent = X_flat - X_mean[:, None]
    X_norm = np.sqrt( np.sum( X_cent * X_cent, axis=1) ) 
    X_out = X_cent / X_norm[:, None]
    X_out = X_out.reshape(X.shape) 

    return X_out

# Assumes the following ordering for X: (num_images, height, width, num_channels)
modules.py 文件源码 项目:deep_architect 作者: negrinho 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def compile(self, in_x, train_feed, eval_feed):
        n = np.product(self.in_d)
        m, param_init_fn = [dom[i] for (dom, i) in zip(self.domains, self.chosen)]

        #sc = np.sqrt(6.0) / np.sqrt(m + n)
        #W = tf.Variable(tf.random_uniform([n, m], -sc, sc))
        W = tf.Variable( param_init_fn( [n, m] ) )
        b = tf.Variable(tf.zeros([m]))

        # if the number of input dimensions is larger than one, flatten the 
        # input and apply the affine transformation. 
        if len(self.in_d) > 1:
            in_x_flat = tf.reshape(in_x, shape=[-1, n])
            out_y = tf.add(tf.matmul(in_x_flat, W), b)
        else:
            out_y = tf.add(tf.matmul(in_x, W), b)
        return out_y

# computes the output dimension based on the padding scheme used.
# this comes from the tensorflow documentation
sigma_adaptation.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _update_ps(self, es):
        if not self.is_initialized:
            self.initialize(es)
        if self._ps_updated_iteration == es.countiter:
            return
        z = es.sm.transform_inverse((es.mean - es.mean_old) / es.sigma_vec.scaling)
        # works unless a re-parametrisation has been done
        # assert Mh.vequals_approximately(z, np.dot(es.B, (1. / es.D) *
        #         np.dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec)))
        z *= es.sp.weights.mueff**0.5 / es.sigma / es.sp.cmean
        # zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
        if es.opts['CSA_clip_length_value'] is not None:
            vals = es.opts['CSA_clip_length_value']
            min_len = es.N**0.5 + vals[0] * es.N / (es.N + 2)
            max_len = es.N**0.5 + vals[1] * es.N / (es.N + 2)
            act_len = sum(z**2)**0.5
            new_len = Mh.minmax(act_len, min_len, max_len)
            if new_len != act_len:
                z *= new_len / act_len
                # z *= (es.N / sum(z**2))**0.5  # ==> sum(z**2) == es.N
                # z *= es.const.chiN / sum(z**2)**0.5
        self.ps = (1 - self.cs) * self.ps + np.sqrt(self.cs * (2 - self.cs)) * z
        self._ps_updated_iteration = es.countiter
evolution_strategy.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def result_pretty(self, number_of_runs=0, time_str=None,
                      fbestever=None):
        """pretty print result.

        Returns `result` of ``self``.

        """
        if fbestever is None:
            fbestever = self.best.f
        s = (' after %i restart' + ('s' if number_of_runs > 1 else '')) \
            % number_of_runs if number_of_runs else ''
        for k, v in self.stop().items():
            print('termination on %s=%s%s' % (k, str(v), s +
                  (' (%s)' % time_str if time_str else '')))

        print('final/bestever f-value = %e %e' % (self.best.last.f,
                                                  fbestever))
        if self.N < 9:
            print('incumbent solution: ' + str(list(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair))))
            print('std deviation: ' + str(list(self.sigma * self.sigma_vec.scaling * np.sqrt(self.dC) * self.gp.scales)))
        else:
            print('incumbent solution: %s ...]' % (str(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair)[:8])[:-1]))
            print('std deviations: %s ...]' % (str((self.sigma * self.sigma_vec.scaling * np.sqrt(self.dC) * self.gp.scales)[:8])[:-1]))
        return self.result
evolution_strategy.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def isotropic_mean_shift(self):
        """normalized last mean shift, under random selection N(0,I)

        distributed.

        Caveat: while it is finite and close to sqrt(n) under random
        selection, the length of the normalized mean shift under
        *systematic* selection (e.g. on a linear function) tends to
        infinity for mueff -> infty. Hence it must be used with great
        care for large mueff.
        """
        z = self.sm.transform_inverse((self.mean - self.mean_old) /
                                      self.sigma_vec.scaling)
        # works unless a re-parametrisation has been done
        # assert Mh.vequals_approximately(z, np.dot(es.B, (1. / es.D) *
        #         np.dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec)))
        z /= self.sigma * self.sp.cmean
        z *= self.sp.weights.mueff**0.5
        return z
restricted_gaussian_sampler.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, dimension, randn=np.random.randn, debug=False):
        """pass dimension of the underlying sample space
        """
        try:
            self.N = len(dimension)
            std_vec = np.array(dimension, copy=True)
        except TypeError:
            self.N = dimension
            std_vec = np.ones(self.N)
        if self.N < 10:
            print('Warning: Not advised to use VD-CMA for dimension < 10.')
        self.randn = randn
        self.dvec = std_vec
        self.vvec = self.randn(self.N) / math.sqrt(self.N)
        self.norm_v2 = np.dot(self.vvec, self.vvec)
        self.norm_v = np.sqrt(self.norm_v2)
        self.vn = self.vvec / self.norm_v
        self.vnn = self.vn**2
        self.pc = np.zeros(self.N)
        self._debug = debug  # plot covariance matrix
bbobbenchmarks.py 文件源码 项目:pycma 作者: CMA-ES 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def _evalfull(self, x):
        fadd = self.fopt
        curshape, dim = self.shape_(x)
        # it is assumed x are row vectors

        if self.lastshape != curshape:
            self.initwithsize(curshape, dim)

        # BOUNDARY HANDLING

        # TRANSFORMATION IN SEARCH SPACE
        x = x - self.arrxopt
        x = monotoneTFosc(x)
        idx = (x > 0)
        x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))
        x = self.arrscales * x

        # COMPUTATION core
        ftrue = 10 * (self.dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)
        fval = self.noise(ftrue) # without noise

        # FINALIZE
        ftrue += fadd
        fval += fadd
        return fval, ftrue
tdose_utilities.py 文件源码 项目:TDOSE 作者: kasperschmidt 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def normalize_2D_cov_matrix(covmatrix,verbose=True):
    """
    Calculate the normalization foctor for a multivariate gaussian from it's covariance matrix
    However, not that gaussian returned by tu.gen_2Dgauss() is normalized for scale=1

    --- INPUT ---
    covmatrix       covariance matrix to normaliz
    verbose         Toggle verbosity

    """
    detcov  = np.linalg.det(covmatrix)
    normfac = 1.0 / (2.0 * np.pi * np.sqrt(detcov) )

    return normfac
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
domain.py 文件源码 项目:pyballd 作者: Yurlungur 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_dXdr(self,X):
        "Derivative of compactified coordinate with respect to radial"
        L = self.L
        r_h = self.r_h
        num = ((X-1)**2)*np.sqrt((r_h*(X-1))**2 + (L*(X+1))**2)
        denom = 2*L*L*(1+X)
        dXdr = num/denom
        return dXdr
domain.py 文件源码 项目:pyballd 作者: Yurlungur 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_x_from_r(self,r):
        "x = 0 when r = rh"
        r_h = self.r_h
        x = np.sqrt(r**2 - r_h**2)
        return x
domain.py 文件源码 项目:pyballd 作者: Yurlungur 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_r_from_x(self,x):
        "x = 0 when r = rh"
        r_h = self.r_h
        r = np.sqrt(x**2 + r_h**2)
        return r
orthopoly.py 文件源码 项目:pyballd 作者: Yurlungur 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_norm2_difference(foo,bar,xmin,xmax):
    """
    Returns sqrt(integral((foo-bar)**2)) on the interval [xmin,xmax]
    """
    out = integrator(lambda x: (foo(x)-bar(x))**2,xmin,xmax)[0]
    out /= float(xmax-xmin)
    out = np.sqrt(out)
    return out
# ======================================================================


# ======================================================================
# Nodal and Modal Details
# ======================================================================
orthopoly.py 文件源码 项目:pyballd 作者: Yurlungur 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def norm2(self,grid_func):
        """Calculates the 2norm of grid_func"""
        factor = np.prod([(s.xmax-s.xmin) for s in self.stencils])
        integral = self.inner_product(grid_func,grid_func) / factor
        norm2 = np.sqrt(integral)
        return norm2
nn.py 文件源码 项目:GELUs 作者: hendrycks 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def fit(self, x):
        s = x.shape
        x = x.copy().reshape((s[0],np.prod(s[1:])))
        m = np.mean(x, axis=0)
        x -= m
        sigma = np.dot(x.T,x) / x.shape[0]
        U, S, V = linalg.svd(sigma)
        tmp = np.dot(U, np.diag(1./np.sqrt(S+self.regularization)))
        tmp2 = np.dot(U, np.diag(np.sqrt(S+self.regularization)))
        self.ZCA_mat = th.shared(np.dot(tmp, U.T).astype(th.config.floatX))
        self.inv_ZCA_mat = th.shared(np.dot(tmp2, U.T).astype(th.config.floatX))
        self.mean = th.shared(m.astype(th.config.floatX))
pylspm.py 文件源码 项目:pylspm 作者: lseman 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def normaliza(self, X):
        correction = np.sqrt((len(X) - 1) / len(X))  # std factor corretion
        mean_ = np.mean(X, 0)
        scale_ = np.std(X, 0)
        X = X - mean_
        X = X / (scale_ * correction)
        return X


问题


面经


文章

微信
公众号

扫码关注公众号