python类cdist()的实例源码

random_layer.py 文件源码 项目:SVM-CNN 作者: dlmacedo 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _compute_input_activations(self, X):
        """Compute input activations given X"""

        n_samples = X.shape[0]

        mlp_acts = np.zeros((n_samples, self.n_hidden))
        if (self._use_mlp_input):
            b = self.components_['biases']
            w = self.components_['weights']
            mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)

        rbf_acts = np.zeros((n_samples, self.n_hidden))
        if (self._use_rbf_input):
            radii = self.components_['radii']
            centers = self.components_['centers']
            scale = self.rbf_width * (1.0 - self.alpha)
            rbf_acts = scale * cdist(X, centers)/radii

        self.input_activations_ = mlp_acts + rbf_acts
Utility.py 文件源码 项目:fuku-ml 作者: fukuball 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def kernel_matrix_xX(svm_model, original_x, original_X):

        if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
            K = (svm_model.zeta + svm_model.gamma * np.dot(original_x, original_X.T)) ** svm_model.Q
        elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
            K = np.exp(-svm_model.gamma * (cdist(original_X, np.atleast_2d(original_x), 'euclidean').T ** 2)).ravel()

        '''
        K = np.zeros((svm_model.data_num, svm_model.data_num))

        for i in range(svm_model.data_num):
            for j in range(svm_model.data_num):
                if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
                    K[i, j] = Kernel.polynomial_kernel(svm_model, original_x, original_X[j])
                elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
                    K[i, j] = Kernel.gaussian_kernel(svm_model, original_x, original_X[j])
        '''

        return K
coreg.py 文件源码 项目:decoding_challenge_cortana_2016_3rd 作者: kingjr 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _point_cloud_error(src_pts, tgt_pts):
    """Find the distance from each source point to its closest target point

    Parameters
    ----------
    src_pts : array, shape = (n, 3)
        Source points.
    tgt_pts : array, shape = (m, 3)
        Target points.

    Returns
    -------
    dist : array, shape = (n, )
        For each point in ``src_pts``, the distance to the closest point in
        ``tgt_pts``.
    """
    from scipy.spatial.distance import cdist
    Y = cdist(src_pts, tgt_pts, 'euclidean')
    dist = Y.min(axis=1)
    return dist
ezdtw.py 文件源码 项目:ezdtw 作者: kylerbrown 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def dtw(a, b, distance_metric='euclidean'):
    '''perform dynamic time warping on two matricies a and b
    first dimension must be time, second dimension shapes must be equal

    distance_metric: a string that matches a valid option for the 'metric' argument in
            scipy.spatial.distance.cdist, such as 'euclidean' 'cosine' 'correlaton'

    returns:
        trace_x, trace_y -- the warp path as two lists of indicies. Suitable for use in
        an iterpolation function such as numpy.interp

        to warp values from a to b, use: numpy.interp(warpable_values, trace_x, trace_y)
        to warp values from b to a, use: numpy.interp(warpable_values, trace_y, trace_x)
    '''
    distance = cdist(a, b, distance_metric)
    cum_min_dist = dtw_distance(distance)
    trace_x, trace_y = backtrack(cum_min_dist)
    return trace_x, trace_y
_fixes.py 文件源码 项目:pyAFQ 作者: yeatmanlab 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _orient_generator(out, roi1, roi2):
    """
    Helper function to `orient_by_rois`

    Performs the inner loop separately. This is needed, because functions with
    `yield` always return a generator
    """
    for idx, sl in enumerate(out):
        dist1 = cdist(sl, roi1, 'euclidean')
        dist2 = cdist(sl, roi2, 'euclidean')
        min1 = np.argmin(dist1, 0)
        min2 = np.argmin(dist2, 0)
        if min1[0] > min2[0]:
            yield sl[::-1]
        else:
            yield sl
_fixes.py 文件源码 项目:pyAFQ 作者: yeatmanlab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _orient_list(out, roi1, roi2):
    """
    Helper function to `orient_by_rois`

    Performs the inner loop separately. This is needed, because functions with
    `yield` always return a generator.

    Flips the streamlines in place (as needed) and returns a reference to the
    updated list.
    """
    for idx, sl in enumerate(out):
        dist1 = cdist(sl, roi1, 'euclidean')
        dist2 = cdist(sl, roi2, 'euclidean')
        min1 = np.argmin(dist1, 0)
        min2 = np.argmin(dist2, 0)
        if min1[0] > min2[0]:
            out[idx] = sl[::-1]
    return out
kernel_utils.py 文件源码 项目:product-taz 作者: TheAnomalieZ 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def dist2(ls, x1, x2=None):
    # Assumes NxD and MxD matrices.
    # Compute the squared distance matrix, given length scales.

    if x2 is None:
        # Find distance with self for x1.

        # Rescale.
        xx1 = x1 / ls        
        xx2 = xx1

    else:
        # Rescale.
        xx1 = x1 / ls
        xx2 = x2 / ls

    r2 = cdist(xx1,xx2,'sqeuclidean')

    return r2
kernel_utils.py 文件源码 项目:product-taz 作者: TheAnomalieZ 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def dist2(ls, x1, x2=None):
    # Assumes NxD and MxD matrices.
    # Compute the squared distance matrix, given length scales.

    if x2 is None:
        # Find distance with self for x1.

        # Rescale.
        xx1 = x1 / ls        
        xx2 = xx1

    else:
        # Rescale.
        xx1 = x1 / ls
        xx2 = x2 / ls

    r2 = cdist(xx1,xx2,'sqeuclidean')

    return r2
refstruct.py 文件源码 项目:capriqorn 作者: bio-phys 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def queryDistance_legacy(xyz, ref, R):
    """Check which atoms in xyz lie within a radius R of any reference
    atom.

    Original implementation, expensive in terms of memory and CPU time
    at large problem sizes.

    Parameters
    ----------
    xyz : array_like (n_atoms, n_dim)
        atoms positions
    ref : array_like (n_atoms, n_dim)
        Reference atoms positions
    R : float
        distance to any atoms

    Returns
    -------
    query : ndarray (n_atoms)
        boolean array showing which particle are close to ref
    """
    xyz = np.asanyarray(xyz)
    ref = np.asanyarray(ref)
    return (cdist(xyz, ref) < R).sum(1).astype(bool)
refstruct.py 文件源码 项目:capriqorn 作者: bio-phys 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def maxInnerDistance(xyz):
    """max distance between atoms in ``xyz``

    Parameters
    ----------
    xyz : array_like
        array of atoms positions

    Returns
    -------
    float
        maximal distance
    """
    return cdist(xyz, xyz).max()


# --- cell list implementation by Juergen Koefinger below ---
algorithms.py 文件源码 项目:scikit-cmeans 作者: bm424 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def distances(self, x):
        """Calculates the distance between data x and the centers.

        The distance, by default, is calculated according to `metric`, but this
        method should be overridden by subclasses if required.

        Parameters
        ----------
        x : :obj:`np.ndarray`
            (n_samples, n_features)
            The original data.

        Returns
        -------
        :obj:`np.ndarray`
            (n_samples, n_clusters)
            Each entry (i, j) is the distance between sample i and cluster
            center j.

        """
        return cdist(x, self.centers, metric=self.metric)
design.py 文件源码 项目:GPflowOpt 作者: GPflow 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _shrink(X, npoints):
        """
        When designs are generated that are larger than the requested number of points (N* > N), resize them.
        If the size was correct all along, the LHD is returned unchanged.

        :param X: Generated LHD, size N* x D, with N* >= N
        :param npoints: What size to resize to (N)
        :return: LHD data matrix, size N x D
        """
        npStar, nv = X.shape

        # Pick N samples nearest to centre of X
        centre = npStar * np.ones((1, nv)) / 2.
        distances = cdist(X, centre).ravel()
        idx = np.argsort(distances)
        X = X[idx[:npoints], :]

        # Translate to origin
        X -= np.min(X, axis=0) - 1

        # Collapse gaps in the design to assure all cell projections onto axes have 1 sample
        Xs = np.argsort(X, axis=0)
        X[Xs, np.arange(nv)] = np.tile(np.arange(1, npoints + 1), (nv, 1)).T
        assert (X.shape[0] == npoints)
        return X
sudoku_steps.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
sudoku.py 文件源码 项目:pyku 作者: dubvulture 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
sf_kmeans.py 文件源码 项目:kmeans-service 作者: MAYHEM-Lab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _initial_farthest_traversal(self, data, seed=None):
        """ Find the initial set of cluster centers using Farthest Traversal strategy """
        # Pick first at random
        np.random.seed(seed)
        centers = data[np.random.randint(low=0, high=data.shape[0], size=1)]
        for _ in range(self.n_clusters - 1):
            dist = cdist(data, centers)
            dist = dist.sum(axis=1)
            assert dist.shape[0] == data.shape[0]  # making sure that axis=1 is correct
            # point with max. dist from all centers becomes a new center
            centers = np.append(centers, [data[np.argmax(dist)]], axis=0)
        return centers
sf_kmeans.py 文件源码 项目:kmeans-service 作者: MAYHEM-Lab 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _inertia(self, data):
        """ Sum of distances of all data points from their cluster centers """
        distances = np.zeros((data.shape[0], self.n_clusters))
        covar_matrices = self.covariances(self.labels_, cluster_centers=self.cluster_centers_, data=data)
        self._inv_covar_matrices = self._matrix_inverses(covar_matrices)
        for k in range(self.n_clusters):
            k_dist = cdist(data, np.array([self.cluster_centers_[k]]), metric=self.metric,
                           VI=self._inv_covar_matrices[k])
            k_dist = k_dist.reshape((data.shape[0],))
            distances[:, k] = k_dist
        distances = distances.min(axis=1)
        assert distances.shape[0] == data.shape[0]
        return distances.sum()
sf_kmeans.py 文件源码 项目:kmeans-service 作者: MAYHEM-Lab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _rss(self, data):
        """ Residual Sum of Square distances of all data points from their cluster centers """
        if self.metric == 'euclidean':
            distances = cdist(data, self.cluster_centers_, metric='euclidean')
        elif self.metric == 'mahalanobis':
            #covar_matrix = self.covariance(labels=self.labels_, cluster_centers=self.cluster_centers_, data=data)
            covar_matrices = self.covariances(self.labels_,
                                            cluster_centers=self.cluster_centers_, data=data)[0]
            self._inv_covar_matrices = self._matrix_inverses(covar_matrices)
            distances = cdist(data, self.cluster_centers_, metric='mahalanobis', VI=self._inv_covar_matrices)
        distances = distances.min(axis=1)
        distances = distances ** 2
        assert distances.shape[0] == data.shape[0]
        return distances.sum()
test.py 文件源码 项目:tensorflow_ocr 作者: BowieHsu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def order_points(pts):
    x_sorted = pts[np.argsort(pts[:,0]),:]
    left_most = x_sorted[:2,:]
    right_most = x_sorted[2:,:]

    left_most = left_most[np.argsort(left_most[:,1]), :]
    (tl, bl) = left_most

    D = dist.cdist(tl[np.newaxis], right_most, 'euclidean')[0]
    (br, tr) = right_most[np.argsort(D)[::-1],:]

    return np.array([tl, tr, br, bl], dtype='int32')
utils.py 文件源码 项目:POT 作者: rflamary 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def dist(x1, x2=None, metric='sqeuclidean'):
    """Compute distance between samples in x1 and x2 using function scipy.spatial.distance.cdist

    Parameters
    ----------

    x1 : np.array (n1,d)
        matrix with n1 samples of size d
    x2 : np.array (n2,d), optional
        matrix with n2 samples of size d (if None then x2=x1)
    metric : str, fun, optional
        name of the metric to be computed (full list in the doc of scipy),  If a string,
        the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
        'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski',
        'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
        'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.


    Returns
    -------

    M : np.array (n1,n2)
        distance matrix computed with given metric

    """
    if x2 is None:
        x2 = x1

    return cdist(x1, x2, metric=metric)
htfa.py 文件源码 项目:brainiak 作者: brainiak 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _assign_posterior(self):
        """assign posterior to the right prior based on
           Hungarian algorithm

        Returns
        -------
        HTFA
            Returns the instance itself.
        """

        prior_centers = self.get_centers(self.global_prior_)
        posterior_centers = self.get_centers(self.global_posterior_)
        posterior_widths = self.get_widths(self.global_posterior_)
        posterior_centers_mean_cov =\
            self.get_centers_mean_cov(self.global_posterior_)
        posterior_widths_mean_var =\
            self.get_widths_mean_var(self.global_posterior_)
        # linear assignment on centers
        cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
        _, col_ind = linear_sum_assignment(cost)
        # reorder centers/widths based on cost assignment
        self.set_centers(self.global_posterior_, posterior_centers)
        self.set_widths(self.global_posterior_, posterior_widths)
        # reorder cov/var based on cost assignment
        self.set_centers_mean_cov(
            self.global_posterior_,
            posterior_centers_mean_cov[col_ind])
        self.set_widths_mean_var(
            self.global_posterior_,
            posterior_widths_mean_var[col_ind])
        return self


问题


面经


文章

微信
公众号

扫码关注公众号