python类unique()的实例源码

recognition_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, filename, target_map, classifier='svm'): 

        self.seed_ = 0
        self.filename_ = filename
        self.target_map_ = target_map
        self.target_ids_ = (np.unique(target_map.keys())).astype(np.int32)
        self.epoch_no_ = 0
        self.st_time_ = time.time()

        # Setup classifier
        print('-------------------------------')        
        print('====> Building Classifier, setting class weights') 
        if classifier == 'svm': 
            self.clf_hyparams_ = {'C':[0.01, 0.1, 1.0, 10.0, 100.0], 'class_weight': ['balanced']}
            self.clf_base_ = LinearSVC(random_state=self.seed_)
        elif classifier == 'sgd': 
            self.clf_hyparams_ = {'alpha':[0.0001, 0.001, 0.01, 0.1, 1.0, 10.0], 'class_weight':['auto']} # 'loss':['hinge'], 
            self.clf_ = SGDClassifier(loss='log', penalty='l2', shuffle=False, random_state=self.seed_, 
                                      warm_start=True, n_jobs=-1, n_iter=1, verbose=4)
        else: 
            raise Exception('Unknown classifier type %s. Choose from [sgd, svm, gradient-boosting, extra-trees]' 
                            % classifier)
cluster.py 文件源码 项目:rca-evaluation 作者: sieve-microservices 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def silhouette_score(series, clusters):
    distances = np.zeros((series.shape[0], series.shape[0]))
    for idx_a, metric_a in enumerate(series):
        for idx_b, metric_b in enumerate(series):
            distances[idx_a, idx_b] = _sbd(metric_a, metric_b)[0]
    labels = np.zeros(series.shape[0])
    for i, (cluster, indicies) in enumerate(clusters):
        for index in indicies:
            labels[index] = i

    # silhouette is only defined, if we have 2 clusters with assignments at 
    # minimum
    if len(np.unique(labels)) == 1 or (len(np.unique(labels)) >= distances.shape[0]):
    #if len(np.unique(labels)) == 1:
        return labels, -1
    else:
        return labels, _silhouette_score(distances, labels, metric='precomputed')
ade20k_loader.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        lbl = self.encode_segmap(lbl)
        classes = np.unique(lbl)
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)
        assert(np.all(classes == np.unique(lbl)))

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
pca.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_normalized_dispersion(mat_mean, mat_var, nbins=20):
    mat_disp = (mat_var - mat_mean) / np.square(mat_mean)

    quantiles = np.percentile(mat_mean, np.arange(0, 100, 100 / nbins))
    quantiles = np.append(quantiles, mat_mean.max())

    # merge bins with no difference in value
    quantiles = np.unique(quantiles)

    if len(quantiles) <= 1:
        # pathological case: the means are all identical. just return raw dispersion.
        return mat_disp

    # calc median dispersion per bin
    (disp_meds, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, mat_disp, statistic='median', bins=quantiles)

    # calc median absolute deviation of dispersion per bin
    disp_meds_arr = disp_meds[disp_bins-1] # 0th bin is empty since our quantiles start from 0
    disp_abs_dev = abs(mat_disp - disp_meds_arr)
    (disp_mads, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, disp_abs_dev, statistic='median', bins=quantiles)

    # calculate normalized dispersion
    disp_mads_arr = disp_mads[disp_bins-1]
    disp_norm = (mat_disp - disp_meds_arr) / disp_mads_arr
    return disp_norm
shp1.py 文件源码 项目:j3dview 作者: blank63 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def gl_init(self,array_table):
        self.gl_hide = False

        self.gl_vertex_array = gl.VertexArray()
        glBindVertexArray(self.gl_vertex_array)

        self.gl_vertex_buffer = gl.Buffer()
        glBindBuffer(GL_ARRAY_BUFFER,self.gl_vertex_buffer)

        self.gl_element_count = 3*gl_count_triangles(self)
        self.gl_element_buffer = gl.Buffer()
        glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,self.gl_element_buffer)

        vertex_type =  numpy.dtype([array_table[attribute].field() for attribute in self.attributes])
        vertex_count = sum(len(primitive.vertices) for primitive in self.primitives)
        vertex_array = numpy.empty(vertex_count,vertex_type)

        for attribute in self.attributes:
            array_table[attribute].load(self,vertex_array)

        vertex_array,element_map = numpy.unique(vertex_array,return_inverse=True)
        element_array = gl_create_element_array(self,element_map,self.gl_element_count)

        glBufferData(GL_ARRAY_BUFFER,vertex_array.nbytes,vertex_array,GL_STATIC_DRAW)
        glBufferData(GL_ELEMENT_ARRAY_BUFFER,element_array.nbytes,element_array,GL_STATIC_DRAW)
1decision_tree_submit.py 文件源码 项目:Python-Machine-Learning-By-Example 作者: PacktPublishing 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_best_split(X, y, criterion):
    """ Obtain the best splitting point and resulting children for the data set X, y
    Args:
        X, y (numpy.ndarray, data set)
        criterion (gini or entropy)
    Returns:
        dict {index: index of the feature, value: feature value, children: left and right children}
    """
    best_index, best_value, best_score, children = None, None, 1, None
    for index in range(len(X[0])):
        for value in np.sort(np.unique(X[:, index])):
            groups = split_node(X, y, index, value)
            impurity = weighted_impurity([groups[0][1], groups[1][1]], criterion)
            if impurity < best_score:
                best_index, best_value, best_score, children = index, value, impurity, groups
    return {'index': best_index, 'value': best_value, 'children': children}
spatial_image_analysis.py 文件源码 项目:tissue_analysis 作者: VirtualPlants 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def consideronlylabels(self, list2consider, verbose = False):
        """
        Add labels to the ignoredlabels list (set) and update the self._labels cache.
        """
        if isinstance(list2consider, int):
            list2consider = [list2consider]

        toignore = set(np.unique(self.image))-set(list2consider)
        integers = np.vectorize(lambda x : int(x))
        toignore = integers(list(toignore)).tolist()


        if verbose: print 'Adding labels', toignore,'to the list of labels to ignore...'
        self._ignoredlabels.update(toignore)
        if verbose: print 'Updating labels list...'
        self._labels = self.__labels()
mlp-digits.py 文件源码 项目:NumpyDL 作者: oujago 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def main(max_iter):
    # prepare
    npdl.utils.random.set_seed(1234)

    # data
    digits = load_digits()

    X_train = digits.data
    X_train /= np.max(X_train)

    Y_train = digits.target
    n_classes = np.unique(Y_train).size

    # model
    model = npdl.model.Model()
    model.add(npdl.layers.Dense(n_out=500, n_in=64, activation=npdl.activations.ReLU()))
    model.add(npdl.layers.Dense(n_out=n_classes, activation=npdl.activations.Softmax()))
    model.compile(loss=npdl.objectives.SCCE(), optimizer=npdl.optimizers.SGD(lr=0.005))

    # train
    model.fit(X_train, npdl.utils.data.one_hot(Y_train), max_iter=max_iter, validation_split=0.1)
sampling.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_weighted_mask(self, image_shape, mask_shape,ROI_mask=None, labels_mask=None):

        if labels_mask is  None:
            raise ValueError('SamplingScheme error: please specify a labels_mask for this sampling scheme')
        print(np.unique(labels_mask))
        mask_boundaries = self.get_mask_boundaries(image_shape, mask_shape,ROI_mask)


        final_mask = np.zeros((self.n_categories,) + labels_mask.shape, dtype="int16")
        for index_cat in range(self.n_categories):
            final_mask[index_cat] = (labels_mask == index_cat,) * mask_boundaries

        final_mask = 1.0 * final_mask / np.reshape(np.sum(np.reshape(final_mask,(self.n_categories,-1)),axis=1),(self.n_categories,)+(1,)*len(image_shape))

        print(np.sum(np.reshape(final_mask,(self.n_categories,-1)),axis=1))
        return final_mask
neuralynxio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def get_channel_id_by_file_name(self, filename):
        """
        Checking parameters of NCS, NSE and NTT Files for given filename and
        return channel_id if result is consistent
        :param filename:
        :return:
        """
        channel_ids = []
        channel_ids += [k for k in self.parameters_ncs if
                        self.parameters_ncs[k]['filename'] == filename]
        channel_ids += [k for k in self.parameters_nse if
                        self.parameters_nse[k]['filename'] == filename]
        channel_ids += [k for k in self.parameters_ntt if
                        self.parameters_ntt[k]['filename'] == filename]
        if len(np.unique(np.asarray(channel_ids))) == 1:
            return channel_ids[0]
        elif len(channel_ids) > 1:
            raise ValueError(
                    'Ambiguous channel ids detected. Filename %s is associated'
                    ' to different channels of NCS and NSE and NTT %s'
                    '' % (filename, channel_ids))
        else:  # if filename was not detected
            return None
blackrockio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def __read_unit(self, unit_id, channel_idx):
        """
        Creates unit with unit id for given channel id.
        """
        # define a name for spiketrain
        # (unique identifier: 1000 * elid + unit_nb)
        name = "Unit {0}".format(1000 * channel_idx + unit_id)
        # define description for spiketrain
        desc = 'Unit from channel: {0}, id: {1}'.format(
            channel_idx, self.__get_unit_classification(unit_id))

        un = Unit(
            name=name,
            description=desc,
            file_origin='.'.join([self._filenames['nev'], 'nev']))

        # add additional annotations
        un.annotate(ch_idx=int(channel_idx))
        un.annotate(unit_id=int(unit_id))

        return un
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __draw_pk2(self):
        self.__cleanPk2()
        if self.units is not None:
            unique_units = np.unique(self.units)
            unique_units = unique_units.tolist()
            pca_1,pca_2 = self.PCAusedList.currentText().split("-")
            pca_1 = np.int(pca_1)-1
            pca_2 = np.int(pca_2)-1
            if self.wavePCAs[0].shape[0]>2:
                xs = self.wavePCAs[:,pca_1]
                ys = self.wavePCAs[:,pca_2]
                self.PcaScatterItem = []
                seg_num = 5000
                for i,ite_unit in enumerate(unique_units):
                    mask = self.units==ite_unit
                    temp_xs = xs[mask]
                    temp_ys = ys[mask]
                    segs = int(ceil(temp_xs.shape[0]/float(seg_num)))
                    for j in range(segs):
                        temp_xs_j = temp_xs[j*seg_num:(j+1)*seg_num]
                        temp_ys_j = temp_ys[j*seg_num:(j+1)*seg_num]
                        self.PcaScatterItem.append(pg.ScatterPlotItem(temp_xs_j,temp_ys_j,pen=self.colors[ite_unit],brush=self.colors[ite_unit],size=3,symbol="o"))
                for i in range(len(self.PcaScatterItem)):
                    self.pk2.addItem(self.PcaScatterItem[i])
neuralynxio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_channel_id_by_file_name(self, filename):
        """
        Checking parameters of NCS, NSE and NTT Files for given filename and
        return channel_id if result is consistent
        :param filename:
        :return:
        """
        channel_ids = []
        channel_ids += [k for k in self.parameters_ncs if
                        self.parameters_ncs[k]['filename'] == filename]
        channel_ids += [k for k in self.parameters_nse if
                        self.parameters_nse[k]['filename'] == filename]
        channel_ids += [k for k in self.parameters_ntt if
                        self.parameters_ntt[k]['filename'] == filename]
        if len(np.unique(np.asarray(channel_ids))) == 1:
            return channel_ids[0]
        elif len(channel_ids) > 1:
            raise ValueError(
                    'Ambiguous channel ids detected. Filename %s is associated'
                    ' to different channels of NCS and NSE and NTT %s'
                    '' % (filename, channel_ids))
        else:  # if filename was not detected
            return None
blackrockio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __read_unit(self, unit_id, channel_idx):
        """
        Creates unit with unit id for given channel id.
        """
        # define a name for spiketrain
        # (unique identifier: 1000 * elid + unit_nb)
        name = "Unit {0}".format(1000 * channel_idx + unit_id)
        # define description for spiketrain
        desc = 'Unit from channel: {0}, id: {1}'.format(
            channel_idx, self.__get_unit_classification(unit_id))

        un = Unit(
            name=name,
            description=desc,
            file_origin='.'.join([self._filenames['nev'], 'nev']))

        # add additional annotations
        un.annotate(ch_idx=int(channel_idx))
        un.annotate(unit_id=int(unit_id))

        return un
dvsproc.py 文件源码 项目:spikefuel 作者: duguyue100 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cal_event_count(timestamps):
    """Calculate event count based on timestamps.

    Parameters
    ----------
    timestamps : numpy.ndarray
        timestamps array in 1D array

    Returns
    -------
    event_arr : numpy.ndarray
        array has 2 rows, first row contains timestamps,
        second row consists of corresponding event count at particular
        timestep
    """
    event_ts, event_count = np.unique(timestamps, return_counts=True)

    return np.asarray((event_ts, event_count))
match.py 文件源码 项目:pscore_match 作者: kellieotto 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def recode_groups(groups, propensity):
    # Code groups as 0 and 1
    groups = (groups == groups.unique()[0])
    N = len(groups)
    N1 = groups[groups == 1].index
    N2 = groups[groups == 0].index
    g1 = propensity[groups == 1]
    g2 = propensity[groups == 0]
    # Check if treatment groups got flipped - the smaller should correspond to N1/g1
    if len(N1) > len(N2):
       N1, N2, g1, g2 = N2, N1, g2, g1
    return groups, N1, N2, g1, g2

################################################################################
############################# Base Matching Class ##############################
################################################################################
gps.py 文件源码 项目:PyGPS 作者: gregstarr 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def minScalErr(stec,el,z,thisBias):
    """
    this determines the slope of the vTEC vs. Elevation line, which
    should be minimized in the minimum scalloping technique for
    receiver bias removal
    inputs:
        stec - time indexed Series of slant TEC values
        el - corresponding elevation values, also Series
        z - mapping function values to convert to vTEC from entire file, may
            contain nans, Series
        thisBias - the bias to be tested and minimized
    """

    intel=np.asarray(el[stec.index],int) # bin the elevation values into int
    sTEC=np.asarray(stec,float)
    zmap = z[stec.index]
    c=np.array([(i,np.average((sTEC[intel==i]-thisBias)
                              /zmap[intel==i])) for i in np.unique(intel) if i>30])

    return np.polyfit(c[:,0],c[:,1],1)[0]
solution_classes.py 文件源码 项目:risk-slim 作者: ustunb 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def filter_sort_unique(self, max_objval=float('Inf')):
        # filter
        if max_objval < float('inf'):
            good_idx = self.objvals <= max_objval
            self.objvals = self.objvals[good_idx]
            self.solutions = self.solutions[good_idx]

        if len(self.objvals) > 0:
            sort_idx = np.argsort(self.objvals)
            self.objvals = self.objvals[sort_idx]
            self.solutions = self.solutions[sort_idx]

            # unique
            b = np.ascontiguousarray(self.solutions).view(
                np.dtype((np.void, self.solutions.dtype.itemsize * self.P)))
            _, unique_idx = np.unique(b, return_index=True)
            self.objvals = self.objvals[unique_idx]
            self.solutions = self.solutions[unique_idx]
keras_utils.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def reset(self):
        """ Resets the state of the generator"""
        self.step = 0
        Y = np.argmax(self.Y,1)
        labels = np.unique(Y)
        idx = []
        smallest = len(Y)
        for i,label in enumerate(labels):
            where = np.where(Y==label)[0]
            if smallest > len(where): 
                self.slabel = i
                smallest = len(where)
            idx.append(where)
        self.idx = idx
        self.labels = labels
        self.n_per_class = int(self.batch_size // len(labels))
        self.n_batches = int(np.ceil((smallest//self.n_per_class)))+1
        self.update_probabilities()
keras_utils.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, X, Y, batch_size,cropsize=0, truncate=False, sequential=False,
                 random=True, val=False, class_weights=None):

        assert len(X) == len(Y), 'X and Y must be the same length {}!={}'.format(len(X),len(Y))
        if sequential: print('Using sequential mode')
        print ('starting normal generator')
        self.X = X
        self.Y = Y
        self.rnd_idx = np.arange(len(Y))
        self.Y_last_epoch = []
        self.val = val
        self.step = 0
        self.i = 0
        self.cropsize=cropsize
        self.truncate = truncate
        self.random = False if sequential or val else random
        self.batch_size = int(batch_size)
        self.sequential = sequential
        self.c_weights = class_weights if class_weights else dict(zip(np.unique(np.argmax(Y,1)),np.ones(len(np.argmax(Y,1)))))
        assert set(np.argmax(Y,1)) == set([int(x) for x in self.c_weights.keys()]), 'not all labels in class weights'
        self.n_batches = int(len(X)//batch_size if truncate else np.ceil(len(X)/batch_size))
        if self.random: self.randomize()
keras_utils.py 文件源码 项目:AutoSleepScorerDev 作者: skjerns 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def next_normal(self):
        x_batch = self.X[self.step*self.batch_size:(self.step+1)*self.batch_size]
        y_batch = self.Y[self.step*self.batch_size:(self.step+1)*self.batch_size]

        diff = len(x_batch[0]) - self.cropsize
        if self.cropsize!=0 and not self.val:
            start = np.random.choice(np.arange(0,diff+5,5), len(x_batch))
            x_batch = [x[start[i]:start[i]+self.cropsize,:] for i,x in enumerate(x_batch)]
        elif self.cropsize !=0 and self.val:
            x_batch = [x[diff//2:diff//2+self.cropsize] for i,x in enumerate(x_batch)]

        x_batch = np.array(x_batch, dtype=np.float32)
        y_batch = np.array(y_batch, dtype=np.int32)
        self.step+=1
        if self.val:
            self.Y_last_epoch.extend(y_batch)
            return x_batch # for validation generator, save the new y_labels
        else:
            weights = np.ones(len(y_batch))
            for t in np.unique(np.argmax(y_batch,1)):
                weights[np.argmax(y_batch,1)==t] = self.c_weights[t]
            return (x_batch,y_batch)
generic_wrapper.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def get_preds_true_for_task(self,train_tasks, test_tasks, param_dict):
        t = param_dict['task_num']
        X = train_tasks[t]['X']
        y = train_tasks[t]['Y']

        test_X = test_tasks[t]['X']
        true_y = list(test_tasks[t]['Y'].flatten())

        if len(y)==0 or len(X)==0 or len(test_X) == 0 or len(true_y)==0:
            return None, None

        if self.cant_train_with_one_class and len(np.unique(y))==1:
            preds = list(np.unique(y)[0]*np.ones(len(true_y)))
        else:
            preds = self.train_and_predict_task(t, X, y, test_X, param_dict)

        return preds, true_y
Utilities.py 文件源码 项目:a-cadmci 作者: florez87 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def getClasses(labels):
        """
        Get unique values from a column of labels.

        Parameters
        ----------
        labels: array-like of shape = [number_samples] or [number_samples, number_outputs]
            The target values (class labels in classification).

        Return
        ----------
        classes: ndarray
            The sorted unique labels

        ids: ndarray
            The indices of the first occurrences of the unique values in the original array.
        """
        uniques, ids = numpy.unique(labels, return_inverse=True)
        return uniques, ids
test_gridsearch_optimizer.py 文件源码 项目:OptML 作者: johannespetrat 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def grid_spacing(self):
        interval = [1,10]
        p1 = Parameter('A', 'integer', lower=interval[0], upper=interval[1])
        p2 = Parameter('B', 'continuous', lower=interval[0], upper=interval[1])
        p3 = Parameter('C', 'categorical', possible_values=['Bla1', 'Bla2'])
        p4 = Parameter('D', 'boolean')
        grid_sizes = {'A': 5, 'B': 6}
        grid_search = GridSearchOptimizer(model, [p1, p2, p3, p4], clf_score, grid_sizes)
        grid = grid_search.grid
        for params in grid:
            self.assertIn(params['A'], range(*interval))
            self.assertIn(params['B']>=interval[0])
            self.assertIn(params['B']<=interval[1])
            self.assertIn(params['C'], ['Bla1', 'Bla2'])
            self.assertIn(params['D'], ['True', 'False'])
        lenA = len(np.unique([params['A'] for params in grid]))
        lenB = len(np.unique([params['B'] for params in grid]))
        lenC = len(np.unique([params['C'] for params in grid]))
        lenD = len(np.unique([params['D'] for params in grid]))
        self.assertTrue((lenA==grid_sizes['A']) or (lenA==grid_sizes['A']+1))
        self.assertTrue((lenB==grid_sizes['B']) or (lenB==grid_sizes['B']+1))
        self.assertTrue((lenC==grid_sizes['C']) or (lenC==grid_sizes['C']+1))
        self.assertTrue((lenD==grid_sizes['D']) or (lenD==grid_sizes['D']+1))
spectrogram_main.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def logscale_spec(spec, sr=44100, factor=20.):
    timebins, freqbins = np.shape(spec)

    scale = np.linspace(0, 1, freqbins) ** factor
    scale *= (freqbins-1)/max(scale)
    scale = np.unique(np.round(scale))

    # create spectrogram with new freq bins
    newspec = np.complex128(np.zeros([timebins, len(scale)]))
    for i in range(0, len(scale)):
        if i == len(scale)-1:
            newspec[:,i] = np.sum(spec[:,scale[i]:], axis=1)
        else:        
            newspec[:,i] = np.sum(spec[:,scale[i]:scale[i+1]], axis=1)

    # list center freq of bins
    allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1])
    freqs = []
    for i in range(0, len(scale)):
        if i == len(scale)-1:
            freqs += [np.mean(allfreqs[scale[i]:])]
        else:
            freqs += [np.mean(allfreqs[scale[i]:scale[i+1]])]

    return newspec, freqs
spectrogram.py 文件源码 项目:audio_scripts 作者: audiofilter 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def logscale_spec(spec, sr=44100, factor=20.):
    timebins, freqbins = np.shape(spec)

    scale = np.linspace(0, 1, freqbins) ** factor
    scale *= (freqbins-1)/max(scale)
    scale = np.unique(np.round(scale))

    # create spectrogram with new freq bins
    newspec = np.complex128(np.zeros([timebins, len(scale)]))
    for i in range(0, len(scale)):
        if i == len(scale)-1:
            newspec[:,i] = np.sum(spec[:,scale[i]:], axis=1)
        else:        
            newspec[:,i] = np.sum(spec[:,scale[i]:scale[i+1]], axis=1)

    # list center freq of bins
    allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1])
    freqs = []
    for i in range(0, len(scale)):
        if i == len(scale)-1:
            freqs += [np.mean(allfreqs[scale[i]:])]
        else:
            freqs += [np.mean(allfreqs[scale[i]:scale[i+1]])]

    return newspec, freqs
sf_kmeans.py 文件源码 项目:kmeans-service 作者: MAYHEM-Lab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def free_parameters(self, data):
        """
        Compute free parameters for the model fit using K-Means
        """
        K = np.unique(self.labels_).shape[0]  # number of clusters
        n, d = data.shape
        r = (K - 1) + (K * d)
        if self.metric == 'euclidean':
            r += 1  # one parameter for variance
        elif self.metric == 'mahalanobis':
            if self.covar_type == 'full' and self.covar_tied:
                r += (d * (d + 1) * 0.5)  # half of the elements (including diagonal) in the matrix
            if self.covar_type == 'full' and not self.covar_tied:
                r += (d * (d + 1) * 0.5 * K)  # half of the elements (including diagonal) in the matrix
            if self.covar_type == 'diag' and self.covar_tied:
                r += d  # diagonal elements of the matrix
            if self.covar_type == 'diag' and not self.covar_tied:
                r += (d * K)  # diagonal elements of the matrix
            if self.covar_type == 'spher' and self.covar_tied:
                r += 1  # all diagonal elements are equal
            if self.covar_type == 'spher' and not self.covar_tied:
                r += K  # all diagonal elements are equal
        return r
custom_dr.py 文件源码 项目:sef 作者: passalis 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sim_target_supervised(target_data, target_labels, sigma, idx, target_params):
    cur_labels = target_labels[idx]
    N = cur_labels.shape[0]

    N_labels = len(np.unique(cur_labels))

    Gt, mask = np.zeros((N, N)), np.zeros((N, N))

    for i in range(N):
        for j in range(N):
            if cur_labels[i] == cur_labels[j]:
                Gt[i, j] = 0.8
                mask[i, j] = 1
            else:
                Gt[i, j] = 0.1
                mask[i, j] = 0.8 / (N_labels - 1)

    return np.float32(Gt), np.float32(mask)
DCIP_overburden_PseudoSection.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_Surface_Potentials(mtrue, survey, src, field_obj):

    phi = field_obj['phi']
    CCLoc = mesh.gridCC
    XLoc = np.unique(mesh.gridCC[:, 0])
    surfaceInd, zsurfaceLoc = get_Surface(mtrue, XLoc)
    phiSurface = phi[surfaceInd]
    phiScale = 0.

    if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
        refInd = Utils.closestPoints(mesh, [xmax+60., 0.], gridLoc='CC')
        # refPoint =  CCLoc[refInd]
        # refSurfaceInd = np.where(xSurface == refPoint[0])
        # phiScale = np.median(phiSurface)
        phiScale = phi[refInd]
        phiSurface = phiSurface - phiScale

    return XLoc, phiSurface, phiScale
sphereElectrostatic_example.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def Plot_ChargesDensity(XYZ, sig0, sig1, R, E0, ax):

    xr, yr, zr = np.unique(XYZ[:, 0]), np.unique(XYZ[:, 1]), np.unique(XYZ[:, 2])
    xcirc = xr[np.abs(xr) <= R]

    Et, Ep, Es = get_ElectricField(XYZ, sig0, sig1, R, E0)
    rho = get_ChargesDensity(XYZ, sig0, sig1, R, Et, Ep)

    ax.set_xlim([xr.min(), xr.max()])
    ax.set_ylim([yr.min(), yr.max()])
    ax.set_aspect('equal')
    Cplot = ax.pcolor(xr, yr, rho.reshape(xr.size, yr.size))
    cb1 = plt.colorbar(Cplot, ax=ax)
    cb1.set_label(label= 'Charge Density ($C/m^2$)', size=ftsize_label) #weight='bold')
    cb1.ax.tick_params(labelsize=ftsize_axis)
    ax.plot(xcirc, np.sqrt(R**2-xcirc**2), '--k', xcirc, -np.sqrt(R**2-xcirc**2), '--k')
    ax.set_ylabel('Y coordinate ($m$)', fontsize=ftsize_label)
    ax.set_xlabel('X coordinate ($m$)', fontsize=ftsize_label)
    ax.tick_params(labelsize=ftsize_axis)
    ax.set_title('Charges Density', fontsize=ftsize_title)

    return ax


问题


面经


文章

微信
公众号

扫码关注公众号