python类flatnonzero()的实例源码

utils.py 文件源码 项目:vae-style-transfer 作者: sunsided 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def imcrop_tosquare(img):
    """Make any image a square image.

    Parameters
    ----------
    img : np.ndarray
        Input image to crop, assumed at least 2d.

    Returns
    -------
    crop : np.ndarray
        Cropped image.
    """
    size = np.min(img.shape[:2])
    extra = img.shape[:2] - size
    crop = img
    for i in np.flatnonzero(extra):
        crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i)
    return crop
node.py 文件源码 项目:edm2016 作者: Knewton 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def get_data_by_id(self, ids):
        """  Helper for getting current data values from stored identifiers
        :param float|list ids: ids for which data are requested
        :return: the stored ids
        :rtype: np.ndarray
        """
        if self.ids is None:
            raise ValueError("IDs not stored in node {}".format(self.name))
        if self.data is None:
            raise ValueError("No data in node {}".format(self.name))
        ids = np.array(ids, ndmin=1, copy=False)
        found_items = np.in1d(ids, self.ids)
        if not np.all(found_items):
            raise ValueError("Cannot find {} among {}".format(ids[np.logical_not(found_items)],
                                                              self.name))
        idx = np.empty(len(ids), dtype='int')
        for k, this_id in enumerate(ids):
            if self.ids.ndim > 1:
                idx[k] = np.flatnonzero(np.all(self.ids == this_id, axis=1))[0]
            else:
                idx[k] = np.flatnonzero(self.ids == this_id)[0]
        return np.array(self.data, ndmin=1)[idx]
test_linear_operators.py 文件源码 项目:edm2016 作者: Knewton 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def subset_test(lin_op):
        """ Test that subsetting a linear operator produces the correct outputs.
        :param LinearOperator lin_op: the linear operator
        """
        sub_idx = np.random.rand(lin_op.shape[0], 1) > 0.5
        # make sure at least one element included
        sub_idx[np.random.randint(0, len(sub_idx))] = True
        sub_idx = np.flatnonzero(sub_idx)
        sub_lin_op = undertest.get_subset_lin_op(lin_op, sub_idx)

        # test projection to subset of indices
        x = np.random.randn(lin_op.shape[1], np.random.randint(1, 3))
        np.testing.assert_array_almost_equal(sub_lin_op * x, (lin_op * x)[sub_idx, :])

        # test back projection from subset of indices
        y = np.random.randn(len(sub_idx), np.random.randint(1, 3))
        z = np.zeros((lin_op.shape[0], y.shape[1]))
        z[sub_idx] = y
        np.testing.assert_array_almost_equal(sub_lin_op.rmatvec(y), lin_op.rmatvec(z))
test_node.py 文件源码 项目:edm2016 作者: Knewton 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_get_data_by_id(self):
        dim, data, cpd, ids = self.gen_data()
        node = undertest.Node(name='test node', data=data, cpd=cpd, ids=ids)
        # test setting of ids
        np.testing.assert_array_equal(node.ids, ids)
        # test for one id
        idx = np.random.randint(0, dim)
        np.testing.assert_array_equal(node.get_data_by_id(ids[idx]).ravel(), node.data[idx])
        # test for a random set of ids
        ids_subset = np.random.choice(ids, dim, replace=True)
        np.testing.assert_array_equal(node.get_data_by_id(ids_subset),
                                      [node.data[np.flatnonzero(ids == x)[0]] for x in ids_subset])
        # test for all ids
        self.assertEqual(node.get_all_data_and_ids(), {x: node.get_data_by_id(x) for x in ids})
        # test when data are singleton
        dim, _, cpd, ids = self.gen_data(dim=1)
        node = undertest.Node(name='test node', data=1, cpd=cpd, ids=ids)
        self.assertEqual(node.get_all_data_and_ids(), {x: node.get_data_by_id(x) for x in ids})
owhyper.py 文件源码 项目:orange-infrared 作者: markotoplak 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def migrate_settings(cls, settings_, version):
        if version < 2:
            # delete the saved attr_value to prevent crashes
            try:
                del settings_["context_settings"][0].values["attr_value"]
            except:
                pass

        # migrate selection
        if version <= 2:
            try:
                current_context = settings_["context_settings"][0]
                selection = getattr(current_context, "selection", None)
                if selection is not None:
                    selection = [(i, 1) for i in np.flatnonzero(np.array(selection))]
                    settings_.setdefault("imageplot", {})["selection_group_saved"] = selection
            except:
                pass
owintegrate.py 文件源码 项目:orange-infrared 作者: markotoplak 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def redraw_integral(self):
        dis = []
        if np.any(self.curveplot.selection_group) and self.curveplot.data:
            # select data
            ind = np.flatnonzero(self.curveplot.selection_group)[0]
            show = self.curveplot.data[ind:ind+1]

            previews = self.flow_view.preview_n()
            for i in range(self.preprocessormodel.rowCount()):
                if i in previews:
                    item = self.preprocessormodel.item(i)
                    desc = item.data(DescriptionRole)
                    params = item.data(ParametersRole)
                    if not isinstance(params, dict):
                        params = {}
                    preproc = desc.viewclass.createinstance(params)
                    preproc.metas = False
                    datai = preproc(show)
                    di = datai.domain.attributes[0].compute_value.draw_info(show)
                    color = self.flow_view.preview_color(i)
                    dis.append({"draw": di, "color": color})
        refresh_integral_markings(dis, self.markings_list, self.curveplot)
diffexp.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def run_differential_expression(matrix, clusters, sseq_params=None):
    """ Compute differential expression for each cluster vs all other cells
        Args: matrix      - GeneBCMatrix  :  gene expression data
              clusters    - np.array(int) :  1-based cluster labels
              sseq_params - dict          :  params from compute_sseq_params """

    n_clusters = np.max(clusters)

    if sseq_params is None:
        print "Computing params..."
        sys.stdout.flush()
        sseq_params = compute_sseq_params(matrix.m)

    # Create a numpy array with 3*K columns;
    # each group of 3 columns is mean, log2, pvalue for cluster i
    all_de_results = np.zeros((matrix.genes_dim, 3*n_clusters))

    for cluster in xrange(1, 1+n_clusters):
        in_cluster = clusters == cluster
        group_a = np.flatnonzero(in_cluster)
        group_b = np.flatnonzero(np.logical_not(in_cluster))
        print 'Computing DE for cluster %d...' % cluster
        sys.stdout.flush()

        de_result = sseq_differential_expression(
            matrix.m, group_a, group_b, sseq_params)
        all_de_results[:, 0+3*(cluster-1)] = de_result['norm_mean_a']
        all_de_results[:, 1+3*(cluster-1)] = de_result['log2_fold_change']
        all_de_results[:, 2+3*(cluster-1)] = de_result['adjusted_p_value']

    return DIFFERENTIAL_EXPRESSION(all_de_results)
matrix.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def select_nonzero_axes(self):
        new_mat = GeneBCMatrix(list(self.genes), list(self.bcs))
        new_mat.m = self.m

        nonzero_bcs = np.flatnonzero(new_mat.get_reads_per_bc())
        if new_mat.bcs_dim > len(nonzero_bcs):
            new_mat = new_mat.select_barcodes(nonzero_bcs)

        nonzero_genes = np.flatnonzero(new_mat.get_reads_per_gene())
        if new_mat.genes_dim > len(nonzero_genes):
            new_mat = new_mat.select_genes(nonzero_genes)

        return new_mat, nonzero_bcs, nonzero_genes
rfclass.py 文件源码 项目:astrobase 作者: waqasbhatti 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def gridsearch_report(results, n_top=3):
    for i in range(1, n_top + 1):
        candidates = np.flatnonzero(results['rank_test_score'] == i)
        for candidate in candidates:
            LOGINFO("Model with rank: {0}".format(i))
            LOGINFO("Mean validation score: {0:.3f} (std: {1:.3f})".format(
                  results['mean_test_score'][candidate],
                  results['std_test_score'][candidate]))
            LOGINFO("Parameters: {0}".format(results['params'][candidate]))



###################################
## NON-PERIODIC VAR FEATURE LIST ##
###################################
solution_classes.py 文件源码 项目:risk-slim 作者: ustunb 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def computeObjvals(self, getObjval):
        new = self.copy()
        compute_ind = np.flatnonzero(np.isnan(new.objvals))
        new.objvals[compute_ind] = map(getObjval, new.solutions[compute_ind])
        return new
ShuffleLabelsOut.py 文件源码 项目:SourceFilterContoursMelody 作者: juanjobosch 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _iter_indices(self):

        for y_train, y_test in super(ShuffleLabelsOut, self)._iter_indices():
            # these are the indices of classes in the partition
            # invert them into data indices

            train = np.flatnonzero(np.in1d(self.y_indices, y_train))
            test = np.flatnonzero(np.in1d(self.y_indices, y_test))

            yield train, test
utils.py 文件源码 项目:higlass-server 作者: hms-dbmi 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_chrom(abs_pos, chr_info=None, c=None):
    if chr_info is None:
        try:
            chr_info = get_chrom_names_cumul_len(c)
        except:
            return None

    try:
        chr_id = np.flatnonzero(chr_info[2] > abs_pos)[0] - 1
    except IndexError:
        return None

    return chr_info[0][chr_id]
utils.py 文件源码 项目:higlass-server 作者: hms-dbmi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def abs_coord_2_bin(c, pos, chr_info):
    try:
        chr_id = np.flatnonzero(chr_info[2] > pos)[0] - 1
    except IndexError:
        return c.info['nbins']

    chrom = chr_info[0][chr_id]
    relPos = pos - chr_info[2][chr_id]

    return c.offset((chrom, relPos, chr_info[1][chrom]))
SOTA.py 文件源码 项目:SOTA-Py 作者: mehrdadn 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def prepare(self, isrc, tbudget, preallocate_aggressively, prediscretize, stderr=None):
        # preallocate_aggressively = {-1: minimize dynamic memory usage, 0: minimize initialization latency, 1: maximize speed}
        network = self.network
        if stderr is not None: tprev = timeit.default_timer(); print_("Computing optimal update order...", end=' ', file=stderr)
        ibudget = int(discretize_up(numpy.asarray([tbudget], float), self.discretization))
        (stack, _, visited_iedges, end_itimes) = dijkstra(network, False, isrc, self.timins, True, ibudget, self.min_itimes_to_dest)
        eused = numpy.flatnonzero((0 <= visited_iedges) & (visited_iedges <= ibudget)).tolist()
        self.end_itimes = end_itimes.tolist()
        if stderr is not None: print_(int((timeit.default_timer() - tprev) * 1000), "ms", file=stderr); del tprev

        if prediscretize:
            if stderr is not None: tprev = timeit.default_timer(); print_("Discretizing edges...", end=' ', file=stderr)
            for eiused, tidist in zip(eused, network.discretize_edges(
                list(map(network.edges.hmm.__getitem__, eused)),
                list(map(network.edges.tmin.__getitem__, eused)),
                self.discretization,
                suppress_calculation=self.suppress_calculation
            )):
                self.cached_edges_tidist[eiused] = tidist
            if stderr is not None: print_(int((timeit.default_timer() - tprev) * 1000), "ms", file=stderr); del tprev

        if preallocate_aggressively >= 0:
            # uv[i][t] should be the probability of reaching the destination from node i in <= t steps (so for T = 0 we get uv[idst] == [1.0])
            # Rationale: uv[i] should be the convolution of edge[i,j] with uv[j], with no elements missing.
            for i in xrange(len(self.min_itimes_to_dest)):
                m = max(self.end_itimes[i] - self.min_itimes_to_dest[i], 0)
                self.uv[i].ensure_size(m, preallocate_aggressively > 0)
            for eij in eused:
                m = max(self.end_itimes[network.edges.begin[eij]] - (self.timins[eij] + self.min_itimes_to_dest[network.edges.end[eij]]), 0)
                self.ue[eij].ensure_size(m, preallocate_aggressively > 0)
                self.we[eij].ensure_size(m, preallocate_aggressively > 0)
        return (stack, eused, ibudget)
monitors.py 文件源码 项目:braindecode 作者: robintibor 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _compute_trial_pred_labels_from_cnt_y(self, dataset, all_preds, ):
        # Todo: please test this
        # we only want the preds that are for the same labels as the last label in y
        # (there might be parts of other class-data at start, for trialwise misclass we assume
        # they are contained in other trials at the end...)
        preds_per_trial = compute_preds_per_trial_for_set(
            all_preds, self.input_time_length, dataset)
        trial_labels = []
        trial_pred_labels = []
        for trial_pred, trial_y in zip(preds_per_trial, dataset.y):
            # first cut to the part actually having predictions
            trial_y = trial_y[-trial_pred.shape[1]:]
            wanted_class = trial_y[-1]
            trial_labels.append(wanted_class)
            # extract the first marker different from the wanted class
            # by starting from the back of the trial
            i_last_sample = np.flatnonzero(trial_y[::-1] != wanted_class)
            if len(i_last_sample) > 0:
                i_last_sample = i_last_sample[0]
                # remember last sample is now from back
                trial_pred = trial_pred[:, -i_last_sample:]
            trial_pred_label = np.argmax(np.mean(trial_pred, axis=1))
            trial_pred_labels.append(trial_pred_label)
        trial_labels = np.array(trial_labels)
        trial_pred_labels = np.array(trial_pred_labels)
        return trial_labels, trial_pred_labels
optimize.py 文件源码 项目:AlphaPy 作者: ScottFreeLLC 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def grid_report(results, n_top=3):
    r"""Report the top grid search scores.

    Parameters
    ----------
    results : dict of numpy arrays
        Mean test scores for each grid search iteration.
    n_top : int, optional
        The number of grid search results to report.

    Returns
    -------
    None : None

    """
    for i in range(1, n_top + 1):
        candidates = np.flatnonzero(results['rank_test_score'] == i)
        for candidate in candidates:
            logger.info("Model with rank: {0}".format(i))
            logger.info("Mean validation score: {0:.3f} (std: {1:.3f})".format(
                        results['mean_test_score'][candidate],
                        results['std_test_score'][candidate]))
            logger.info("Parameters: {0}".format(results['params'][candidate]))


#
# Function hyper_grid_search
#
utils.py 文件源码 项目:radwatch-analysis 作者: bearing 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def peak_finder(spectrum, energy):
    '''
    PEAK_FINDER will search for peaks within a certain range determined by the
    Energy given. It takes a spectrum object and an Energy value as input. The
    energy range to look in is given by the Full-Width-Half-Maximum (FWHM).
    If more than one peak is found in the given range, the peak with the
    highest amount of counts will be used.
    '''
    e0 = spectrum.energy_cal[0]
    eslope = spectrum.energy_cal[1]
    energy_axis = e0 + eslope*spectrum.channel

    peak_energy = []
    # rough estimate of fwhm.
    fwhm = 0.05*energy**0.5
    fwhm_range = 1

    # peak search area
    start_region = np.flatnonzero(energy_axis > energy - fwhm_range * fwhm)[0]
    end_region = np.flatnonzero(energy_axis > energy + fwhm_range * fwhm)[0]
    y = spectrum.data[start_region:end_region]
    indexes = peakutils.indexes(y, thres=0.5, min_dist=4)
    tallest_peak = []
    if indexes.size == 0:
        peak_energy.append(int((end_region - start_region) / 2) + start_region)
    else:
        for i in range(indexes.size):
            spot = spectrum.data[indexes[i]+start_region]
            tallest_peak.append(spot)
        indexes = indexes[np.argmax(tallest_peak)]
        peak_energy.append(int(indexes+start_region))
    peak_energy = float(energy_axis[peak_energy])
    return(peak_energy)
Gamma_Analysis.py 文件源码 项目:radwatch-analysis 作者: bearing 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def peak_finder(spectrum, energy):
    '''
    PEAK_FINDER will search for peaks within a certain range determined by the
    Energy given. It takes a Spectra file and an Energy value as input. The
    energy range to look in is given by the Full-Width-Half-Maximum (FWHM).
    If more than one peak is found in the given range, the peak with the
    highest amount of counts will be used.
    '''
    e0 = spectrum.energy_cal[0]
    eslope = spectrum.energy_cal[1]
    energy_axis = e0 + eslope*spectrum.channel

    peak_energy = []
    # rough estimate of fwhm.
    fwhm = 0.05*energy**0.5
    fwhm_range = 1

    # peak search area
    start_region = np.flatnonzero(energy_axis > energy - fwhm_range * fwhm)[0]
    end_region = np.flatnonzero(energy_axis > energy + fwhm_range * fwhm)[0]
    y = spectrum.data[start_region:end_region]
    indexes = peakutils.indexes(y, thres=0.5, min_dist=4)
    tallest_peak = []
    if indexes.size == 0:
        peak_energy.append(int((end_region - start_region) / 2) + start_region)
    else:
        for i in range(indexes.size):
            spot = spectrum.data[indexes[i]+start_region]
            tallest_peak.append(spot)
        indexes = indexes[np.argmax(tallest_peak)]
        peak_energy.append(int(indexes+start_region))
    peak_energy = float(energy_axis[peak_energy])
    return(peak_energy)
quaternion.py 文件源码 项目:cvcalib 作者: Algomorph 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _transform2quat(self):
        """Construct quaternion from the transform/rotation matrix 

        :returns: quaternion formed from transform matrix
        :rtype: numpy array
        """

        # Code was copied from perl PDL code that uses backwards index ordering
        T = self.transform.transpose()  
        den = np.array([ 1.0 + T[0, 0] - T[1, 1] - T[2, 2],
                              1.0 - T[0, 0] + T[1, 1] - T[2, 2],
                              1.0 - T[0, 0] - T[1, 1] + T[2, 2],
                              1.0 + T[0, 0] + T[1, 1] + T[2, 2]])

        max_idx = np.flatnonzero(den == max(den))[0]

        q = np.zeros(4)
        q[max_idx] = 0.5 * sqrt(max(den))
        denom = 4.0 * q[max_idx]
        if (max_idx == 0):
            q[1] = (T[1, 0] + T[0, 1]) / denom 
            q[2] = (T[2, 0] + T[0, 2]) / denom 
            q[3] = -(T[2, 1] - T[1, 2]) / denom 
        if (max_idx == 1):
            q[0] = (T[1, 0] + T[0, 1]) / denom 
            q[2] = (T[2, 1] + T[1, 2]) / denom 
            q[3] = -(T[0, 2] - T[2, 0]) / denom 
        if (max_idx == 2):
            q[0] = (T[2, 0] + T[0, 2]) / denom 
            q[1] = (T[2, 1] + T[1, 2]) / denom 
            q[3] = -(T[1, 0] - T[0, 1]) / denom 
        if (max_idx == 3):
            q[0] = -(T[2, 1] - T[1, 2]) / denom 
            q[1] = -(T[0, 2] - T[2, 0]) / denom 
            q[2] = -(T[1, 0] - T[0, 1]) / denom 

        return q
check_dataset.py 文件源码 项目:pytorch_fnet 作者: AllenCellModeling 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def check_blank_slices(volume, slice_dim='z'):
    idx_dim = 'zyx'.find(slice_dim)
    axes_other = tuple(i for i in range(3) if (i != idx_dim))
    assert idx_dim >= 0

    means = np.mean(volume, axis=axes_other)
    assert means.ndim == 1
    threshold = 10
    median_of_means = np.median(means)
    mask_bads = np.logical_or(means < threshold, means < 0.5*median_of_means)
    if np.count_nonzero(mask_bads):
        idx_bads = np.flatnonzero(mask_bads)
        msg = 'bad {:s}: {:s}'.format(slice_dim, str(tuple(idx_bads)))
        return False, msg
    return True, 'okay'


问题


面经


文章

微信
公众号

扫码关注公众号