python类combinations_with_replacement()的实例源码

metrics.py 文件源码 项目:community-detection 作者: msionkin 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def corr_matr():
    louv = communities_to_array_from_file("./results/you_lovain.txt")
    slm = communities_to_array_from_file("./results/you_slm.txt")
    walk = prepare_comms("./results/you_walktrap.txt")
    label = prepare_comms("./results/you_label_propagation_reduced.txt")
    info = prepare_comms("./results/you_infomap.txt")
    fast = comms_from_file_fast_greedy("./results/com_youtube_ungraph_zero_start_reduced-fc_a.groups")
    fast = communities_to_array(fast)

    data = {'louv': louv, 'slm': slm, 'walk': walk, 'label': label, 'info': info, 'fast': fast}
    #data = {'louv': louv, 'slm': slm, 'walk': walk}
    columns = data.keys()
    df = pd.DataFrame(columns=columns, index=columns)
    for col_a, col_b in itertools.combinations_with_replacement(columns, 2):
        print(col_a, col_b)
        df[col_a][col_b] = calc(data[col_a], data[col_b])
    print(df)
    return df
LongerExplorationPolicy.py 文件源码 项目:deer 作者: VinF 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def sampleUniformActionSequence(self):
        if ( isinstance(self.n_actions,int)):
            """ Sample an action sequence of length self._l, where the unordered sequences have uniform probabilities"""
            actions_list = range(self.n_actions)
        else:   
            """For N exploration steps, the goal is to have actions such that their sum spans quite uniformly 
            the whole range of possibilities. Among those possibilities, random choice/order of actions. """

            possible_actions=[]
            # Add for all actions N random element between min and max
            N=3
            for i,a in enumerate(self.n_actions):
                possible_actions.append([])
                for j in range(N):
                    possible_actions[i].append( self.random_state.uniform(self.n_actions[i][0],self.n_actions[i][1]) )
            actions_list = list(itertools.product(*possible_actions))

        sequences_with_replacement = list(itertools.combinations_with_replacement(actions_list, self._l))
        index_pick = self.random_state.randint(0, len(sequences_with_replacement))
        sequence = list(sequences_with_replacement[index_pick])
        self.random_state.shuffle(sequence)

        return sequence
wordlist_gen.py 文件源码 项目:hakkuframework 作者: 4shadoww 项目源码 文件源码 阅读 59 收藏 0 点赞 0 评论 0
def run(self):
        try:
            f = open(variables["output"][0], "a")
        except Exception as error:
            printError(error)
            return ModuleError(error)

        for L in range(self.lenmin, self.lenmax):
            for word in itertools.combinations_with_replacement(self.chars, L):
                if self.sh.kill == True:
                    f.close()
                    return
                word = ''.join(word)
                f.write(word+"\n")

        f.close()
analyze_metaclusters.py 文件源码 项目:word2vec_pipeline 作者: NIHOPA 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _compute_dispersion_matrix(X, labels):
    n = len(np.unique(labels))
    dist = np.zeros((n, n))
    ITR = list(itertools.combinations_with_replacement(range(n), 2))
    for i, j in tqdm(ITR):

        if i == j:
            d = pdist(X[labels == i], metric='cosine')
        else:
            d = cdist(X[labels == i], X[labels == j], metric='cosine')
            # Only take upper diagonal (+diagonal elements)
            d = d[np.triu_indices(n=d.shape[0], m=d.shape[1], k=0)]

        dist[i, j] = dist[j, i] = d.mean()

    return dist
data_manipulation.py 文件源码 项目:ML-From-Scratch 作者: eriklindernoren 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def polynomial_features(X, degree):
    n_samples, n_features = np.shape(X)

    def index_combinations():
        combs = [combinations_with_replacement(range(n_features), i) for i in range(0, degree + 1)]
        flat_combs = [item for sublist in combs for item in sublist]
        return flat_combs

    combinations = index_combinations()
    n_output_features = len(combinations)
    X_new = np.empty((n_samples, n_output_features))

    for i, index_combs in enumerate(combinations):  
        X_new[:, i] = np.prod(X[:, index_combs], axis=1)

    return X_new
_expr_utils.py 文件源码 项目:kdotp-symmetry 作者: greschd 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def monomial_basis(*degrees):
    """
    Returns the product basis of (kx, ky, kz), with monomials of the given degrees.

    :param degrees: Degree of the monomials. Multiple degrees can be given, in which case the basis consists of the monomials of all given degrees.
    :type degrees: int

    Example:

        >>> import kdotp_symmetry as kp
        >>> kp.monomial_basis(*range(3))
        [1, kx, ky, kz, kx**2, kx*ky, kx*kz, ky**2, ky*kz, kz**2]
    """
    if any(deg < 0 for deg in degrees):
        raise ValueError('Degrees must be non-negative integers')
    basis = []
    for deg in sorted(degrees):
        monomial_tuples = combinations_with_replacement(K_VEC, deg)
        basis.extend(
            reduce(operator.mul, m, sp.Integer(1)) for m in monomial_tuples
        )
    return basis
2016_QR_C_CoinJam.py 文件源码 项目:algorithmStudyDayByDay 作者: Eunsol-Lee 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def solve(N, J):
    pairs = 8
    zeros = N - 4 - pairs
    locations = combinations_with_replacement('01234', zeros)

    count = 0

    for result in locations:
        now = 0
        string = ''
        for i in range(zeros):
            while (now != int(result[i])):
                string += '11'
                now += 1
            string += '0'
        while (now < pairs / 2):
            now += 1
            string += '11'
        print ('11%s11 3 2 3 2 7 2 3 2 3' % (string))
        count += 1
        if (count == J):
            return
campus_topo_gen.py 文件源码 项目:ride 作者: KyleBenson 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def add_inter_building_links(self):
        """Adds links between buildings randomly for additional
        redundancy and topology diversity."""

        try:
            endpoints = random.sample(self.major_building_routers, self.inter_building_links * 2)
            endpoints = zip(endpoints[:len(endpoints)/2], endpoints[len(endpoints)/2:])
        except ValueError as e:
            raise ValueError("NOTE: requested more inter_building_links "
                             "than can be placed without repeating (major) buildings!")
            # XXX: this doesn't seem to work for 3 buildings and 2 inter-building links: fuhgedaboudit
            if self.inter_building_links > 400:
                print "Requested a lot of inter-building links.  This may take a while to generate all combinations without repeat..."
            endpoints = list(itertools.combinations_with_replacement(self.major_building_routers, 2))
            random.shuffle(endpoints)
            endpoints = endpoints[:self.inter_building_links]

        for src, dst in endpoints:
            self.add_link(src, dst)
fixes.py 文件源码 项目:Parallel-SGD 作者: angadgill 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def combinations_with_replacement(iterable, r):
        # combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
        pool = tuple(iterable)
        n = len(pool)
        if not n and r:
            return
        indices = [0] * r
        yield tuple(pool[i] for i in indices)
        while True:
            for i in reversed(range(r)):
                if indices[i] != n - 1:
                    break
            else:
                return
            indices[i:] = [indices[i] + 1] * (r - i)
            yield tuple(pool[i] for i in indices)
nodereport.py 文件源码 项目:CPU-Manager-for-Kubernetes 作者: Intel-Corp 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def check_cmk_config(report, conf_dir):
    check_conf = report.add_check("configDirectory")

    # Verify we can read the config directory
    try:
        c = config.Config(conf_dir)
    except Exception:
        check_conf.add_error("Unable to read the CMK configuration directory")
        return  # Nothing more we can check for now

    # Ensure pool cpu lists are disjoint
    with c.lock():
        cpu_lists = [
            {
                "pool": p,
                "list": cl,
                "cpus": proc.unfold_cpu_list(cl)
            }
            for p in c.pools()
            for cl in c.pool(p).cpu_lists()
        ]

    # Subset of cartesian product without self-maplets:
    # If a -> b is in the result then b -> a is not.
    # Search the filtered product for overlapping CPU lists.
    def same_list(a, b):
        return a["pool"] is b["pool"] and a["list"] is b["list"]

    def disjoint(a, b):
        return not set(a["cpus"]).intersection(set(b["cpus"]))

    for (a, b) in itertools.combinations_with_replacement(cpu_lists, 2):
        if not same_list(a, b) and not disjoint(a, b):
            check_conf.add_error(
                    "CPU list overlap detected in "
                    "{}:{} and {}:{} (in both: {})".format(
                        a["pool"], a["list"],
                        b["pool"], b["list"],
                        b["cpus"]))
crackingPassword.py 文件源码 项目:codefights 作者: emirot 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def crackingPassword(digits, k, d):
    def createNumber(digs):
        return "".join(map(str, digs))

    return [i for i in sorted([createNumber(i) for i in set(
        # list(combinations_with_replacement(digits,k)) + 
        # list(permutations(digits,k)) + 
        list(product(createNumber(digits), repeat=k)) ) ]) if ( 
        (int(i) % d == 0) or (int(i.lstrip("0")) % d == 0)
    ) ]
compatibility.py 文件源码 项目:zippy 作者: securesystemslab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def combinations_with_replacement(iterable, r):
        """Return r length subsequences of elements from the input iterable
        allowing individual elements to be repeated more than once.

        Combinations are emitted in lexicographic sort order. So, if the
        input iterable is sorted, the combination tuples will be produced
        in sorted order.

        Elements are treated as unique based on their position, not on their
        value. So if the input elements are unique, the generated combinations
        will also be unique.

        See also: combinations

        Examples
        ========

        >>> from sympy.core.compatibility import combinations_with_replacement
        >>> list(combinations_with_replacement('AB', 2))
        [('A', 'A'), ('A', 'B'), ('B', 'B')]
        """
        pool = tuple(iterable)
        n = len(pool)
        if not n and r:
            return
        indices = [0] * r
        yield tuple(pool[i] for i in indices)
        while True:
            for i in reversed(range(r)):
                if indices[i] != n - 1:
                    break
            else:
                return
            indices[i:] = [indices[i] + 1] * (r - i)
            yield tuple(pool[i] for i in indices)
preprocessing.py 文件源码 项目:ottertune 作者: cmu-db 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _combinations(n_features, degree, interaction_only, include_bias):
        comb = (combinations if interaction_only else combinations_with_replacement)
        start = int(not include_bias)
        return chain.from_iterable(comb(range(n_features), i)
                                   for i in range(start, degree + 1))
generator.py 文件源码 项目:rexploit 作者: DaniLabs 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def combinationRange(self, pattern, lengths, ranges):
        """
        Recursive function that create all possibilities
        :param pattern: the pattern
        :param lengths: the pattern's lengths
        :param ranges:
        :return: all patterns
        """
        if not pattern:
            return None

        if lengths and ranges:
            newPattern = []

            # Get first range and first length
            r = ranges.pop(0)
            l = int(lengths.pop(0))

            myRange = self.splitRange(r)
            if not myRange:
                return None

            comb = combinations_with_replacement(myRange, l)

            try:
                i = 0
                while i <= self.__maximum:
                    result = [''.join(comb.next())][0]
                    replace = "[" + r + "]"
                    for p in pattern:
                        newPattern.append(p.replace(replace, result, 1))
                    i += 1
            except StopIteration:
                pass

            return self.combinationRange(newPattern, lengths, ranges)
        else:
            return pattern
ultimate_question.py 文件源码 项目:HackerRank 作者: xkal36 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def is_ultimate_question(x, y, z):
    operators = ['+', '*']
    for i in itertools.combinations_with_replacement(operators, 2):
        expression1 = (str(x) + '%s' + str(y) + '%s' + str(z)) % (i[0], i[1])
        expression2 = (str(x) + '%s' + str(y) + '%s' + str(z)) % (i[1], i[0])
        if eval(expression1) == 42:
            return expression1
        elif eval(expression2) == 42:
            return expression2
    return "This is not the ultimate question"
discriminator.py 文件源码 项目:plda 作者: RaviSoji 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_unique_idx_pairs(self, idxs):
        idx_pairs = []
        for pair in combinations_with_replacement(idxs, 2):
            idx_pairs.append(list(pair))

        return np.asarray(idx_pairs)
pairs_of_integers_from_0_to_n.py 文件源码 项目:codewars 作者: AlekseiAQ 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def generate_pairs(n):
    return [list(x) for x in combinations_with_replacement(range(n+1), 2)]
__main__.py 文件源码 项目:CNValloc 作者: m1m0r1 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def combinations_with_replacement(iterable, r):
        """
        >>> list(combinations_with_replacement('AT', 2))
        [('A', 'A'), ('A', 'T'), ('T', 'T')]
        """
        assert isinstance(r, (int, long))
        assert r >= 0

        if r == 0:
            yield ()
        else:
            alls = list(iterable)
            for (i, el) in enumerate(alls):
                for els in combinations_with_replacement(alls[i:], r - 1):
                    yield (el,) + els
utils.py 文件源码 项目:ecml17 作者: gmum 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def must_consistent(part):
    must = []
    for i in range(len(part)):
        must += list(comb(part[i], 2))

    must += [(j, i) for i, j in must if i != j]
    return must
putDown.py 文件源码 项目:AAAI 作者: vignesh0710 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def combo_generator(l,numberofStacks,numberofBlocks):

    combos = []
    for each in combinations_with_replacement(l, numberofStacks):
        if sum(list(map(int, each))) == numberofBlocks:
            # print (list(map(int, each)))
            combos.append(list(map(int, each)))
    #print (combos)
    return combos
forced_sampler.py 文件源码 项目:CANToolz 作者: CANToolz 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def process(self, message) -> Iterable:
        stream = str(message)

        if stream not in self._streams:
            self._streams.add(stream)

            for bind in combinations(self._streams, self._power):
                if stream in bind:
                    self._sampler.bind(sorted(bind), self._joiner)

        yield from self._sampler.process(message)
phasing.py 文件源码 项目:phasm 作者: AbeelLab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def generate_new_hsets(self, haplotype_set: HaplotypeSet,
                           possible_paths: List[Tuple[OrientedDNASegment]],
                           relevant_reads: RelevantReads
                           ) -> Iterable[HaplotypeSet]:
        """For a given haplotype set, generate all possible extensions at the
        current bubble. Only yield the new haplotype sets that have a relative
        likelihood above a given threshold."""

        if callable(self.threshold):
            threshold = self.threshold(len(relevant_reads))
        else:
            threshold = self.threshold

        if threshold == 0.0:
            threshold = float('-inf')
        else:
            threshold = math.log10(threshold)

        if self.start_of_block:
            # For the first bubble the order does not matter, as a permutation
            # in a different order will in the end result in the same haplotype
            # set.
            extension_iter = iter(combinations_with_replacement(possible_paths,
                                                                self.ploidy))
        else:
            # Otherwise all possible k-tuples of possible paths, because now
            # order does matter
            extension_iter = iter(product(possible_paths, repeat=self.ploidy))

        num_possible_sets = len(possible_paths)**self.ploidy

        for extension in extension_iter:
            ext_read_sets = []
            # Get graph reads of the extension
            for hap_ext in extension:
                # We index with [1:-1] to ignore the entrance and exit of the
                # bubble
                ext_read_sets.append(set(self.get_all_reads(hap_ext[1:-1])))

            rl = self.calculate_rl(haplotype_set, extension, ext_read_sets,
                                   relevant_reads, num_possible_sets)

            if rl >= threshold:
                new_set = haplotype_set.extend(extension, ext_read_sets)
                new_set.log_rl = rl
                yield new_set
gridspec.py 文件源码 项目:xbpch 作者: darothen 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _get_model_info(model_name):
    """
    Get the grid specifications for a given model.

    Parameters
    ----------
    model_name : string
        Name of the model. Supports multiple formats
        (e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5').

    Returns
    -------
    specifications : dict
        Grid specifications as a dictionary.

    Raises
    ------
    ValueError
        If the model is not supported (see `models`) or if the given
        `model_name` corresponds to several entries in the list of
        supported models.

    """
    # trying to get as much as possible a valid model name from the given
    # `model_name`, using regular expressions.
    split_name = re.split(r'[\-_\s]', model_name.strip().upper())
    sep_chars = ('', ' ', '-', '_')
    gen_seps = itertools.combinations_with_replacement(
        sep_chars, len(split_name) - 1
    )
    test_names = ("".join((n for n in itertools.chain(*list(zip(split_name,
                                                           s + ('',))))))
                  for s in gen_seps)
    match_names = list([name for name in test_names if name
                        in _get_supported_models()])

    if not len(match_names):
        raise ValueError("Model '{0}' is not supported".format(model_name))
    elif len(match_names) > 1:
        raise ValueError("Multiple matched models for given model name '{0}'"
                         .format(model_name))

    valid_model_name = match_names[0]
    parent_models = _find_references(valid_model_name)

    model_spec = dict()
    for m in parent_models:
        model_spec.update(MODELS[m])
    model_spec.pop('reference')
    model_spec['model_family'] = parent_models[0]
    model_spec['model_name'] = valid_model_name

    return model_spec
run.py 文件源码 项目:pongr 作者: wseaton 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def ratings():
    s = '''
    select
        first_name,
        last_name,
        alias,
        rating,
        sigma,
        trueskill
    from ratings
    left join player using (alias)
    order by 3 desc
    '''

    s_team = '''
        select
            player1,
            player2,
            rating,
            sigma,
            trueskill
        from team_doubles_ratings
        --left join player using (alias)
        order by 3 desc
        '''

    s_rating_df = pd.read_sql(s, con=engine)
    d_rating_df = pd.read_sql(s.replace('ratings', 'doubles_ratings'), con=engine)
    t_rating_df = pd.read_sql(s_team, con=engine)


    chart = dist_plot(s_rating_df)

    singles_rating_df_4_template = s_rating_df.copy()

    s_rating_df = s_rating_df.to_dict('records')
    d_rating_df = d_rating_df.to_dict('records')
    t_rating_df = t_rating_df.to_dict('records')
    # top is for the data table as records, bottom is TrueSkill objects
    s_r_dict = rating_df_to_dict(singles_rating_df_4_template)

    rdo = OrderedDict(sorted(s_r_dict.items(), key=lambda x: x[1].mu, reverse=True))

    percent_df = pd.DataFrame()

    for pair in list(itertools.combinations_with_replacement(rdo, 2)):
        prob = win_probability(rdo[pair[0]], rdo[pair[1]])
        percent_df.loc[pair[0], pair[1]] = prob
        percent_df.loc[pair[1], pair[0]] = 1 - prob

    matrix = win_probability_matrix(percent_df)

    return render_template('ratings.html', singles_ratings=s_rating_df,
                           doubles_ratings=d_rating_df, team_df=t_rating_df,
                           dist=chart, matrix=matrix)
enrichment.py 文件源码 项目:gamtools 作者: pombo-lab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_feature_summary(pairwise_interactions, window_classes):
    """
    Count the number of pairwise interactions between different window classes.

    :param pairwise_interactions: Table of pairwise interactions.
    :type pairwise_interactions: :class:`pandas.DataFrame`
    :param window_classes: Table of windows and their classifications
    :type window_classes: :class:`pandas.DataFrame`
    :returns: A list of tuples of the form (first class, second class, count).

    >>> pairwise_interactions = pd.DataFrame([('chr1', 10, 20, 0.75),
    ...                                        ('chr2', 10, 20, 0.5),
    ...                                        ('chr1', 10, 30, 0.4)],
    ...                                      columns=['chrom', 'Pos_A', 'Pos_B',
    ...                                               'interaction'])
    >>> pairwise_interactions
      chrom  Pos_A  Pos_B  interaction
    0  chr1     10     20         0.75
    1  chr2     10     20         0.50
    2  chr1     10     30         0.40
    >>> window_classification = pd.DataFrame([('chr1', 10, True, False),
    ...                                       ('chr1', 20, False, True),
    ...                                       ('chr2', 20, True, False),
    ...                                       ('chr2', 30, False, True)],
    ...                                      columns=['chrom', 'i', 'Enhancer', 'Gene'])
    >>> window_classification
      chrom   i Enhancer   Gene
    0  chr1  10     True  False
    1  chr1  20    False   True
    2  chr2  20     True  False
    3  chr2  30    False   True
    >>> enrichment.get_feature_summary(pairwise_interactions, window_classification)
    [('Enhancer', 'Enhancer', 0), ('Enhancer', 'Gene', 1), ('Gene', 'Gene', 0)]
    """

    results = []
    feature_classes = [col for col in window_classes.columns
                       if not col in ['chrom', 'start', 'stop', 'i']]

    for feat1, feat2 in itertools.combinations_with_replacement(
            feature_classes, 2):

        feat_values = feature_pair_values(
            pairwise_interactions, window_classes, feat1, feat2)

        results.append((feat1, feat2, len(feat_values)))

    return results


问题


面经


文章

微信
公众号

扫码关注公众号