python类combinations()的实例源码

diagnostics.py 文件源码 项目:elfi 作者: elfi-dev 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _combine_ss(self, list_ss, max_cardinality):
        """Create all combinations of the initialised summary statistics up till the maximum cardinality.

        Parameters
        ----------
        list_ss : List of callable functions
            List of candidate summary statistics.
        max_cardinality : int
            Maximum cardinality of a candidate summary-statistics combination.

        Returns
        -------
        List
            Combinations of candidate summary statistics.

        """
        if max_cardinality > len(list_ss):
            max_cardinality = len(list_ss)

        # Combine the candidate summary statistics.
        combinations_ss = []
        for i in range(max_cardinality):
            for combination in combinations(list_ss, i + 1):
                combinations_ss.append(combination)
        return combinations_ss
exchange_rate_manager.py 文件源码 项目:gold-digger 作者: business-factory 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def pick_the_best(rates_records):
        """
        Compare rates to each other and group then by absolute difference.
        If there is group with minimal difference of two rates, choose one of them according the order of providers.
        If there is group with minimal difference with more than two rates, choose rate in the middle / aka most common rate in the list.

        :type rates_records: list[gold_digger.database.db_model.ExchangeRate]
        :rtype: gold_digger.database.db_model.ExchangeRate
        """
        if len(rates_records) in (1, 2):
            return rates_records[0]

        differences = defaultdict(list)
        for a, b in combinations(rates_records, 2):
            differences[abs(a.rate - b.rate)].extend((a, b))  # if (a,b)=1 and (b,c)=1 then differences[1]=[a,b,b,c]

        minimal_difference, rates = min(differences.items())
        if len(rates) == 2:
            return rates[0]
        else:
            return Counter(rates).most_common(1)[0][0]  # [(ExchangeRate, occurrences)]
models.py 文件源码 项目:pyprocessmacro 作者: QuentinAndre 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _simple_ind_effects_wrapper(self):
        """
        A wrapper for the indirect effects (and for total/contrast effects if specified)
        :return: pd.DataFrame
            A DataFrame of effects, se, llci, and ulci, for the simple/total/constrasts of indirect effects.
        """
        symb_to_var = self._symb_to_var
        results = self.estimation_results
        rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T

        med_names = [symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]
        rows_levels = []
        if self._options["total"]:
            rows_levels += ["TOTAL"]
        rows_levels += med_names
        if self._options["contrast"]:
            contrasts = ["Contrast: {} vs. {}".format(a, b) for a, b in combinations(med_names, 2)]
            rows_levels += contrasts
        rows_levels = np.array(rows_levels).reshape(-1, 1)

        rows = np.concatenate([rows_levels, rows_stats], axis=1)
        cols = ["", "Effect", "Boot SE", "BootLLCI", "BootULCI"]
        df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
        return df.apply(pd.to_numeric, args=["ignore"])
models.py 文件源码 项目:frisbeer-backend 作者: Moetto 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_teams(self):
        def calculate_team_elo(team):
            return int(sum([player.elo for player in team]) / len(team))

        elo_list = []
        players = set(self.players.all())
        possibilities = itertools.combinations(players, 3)
        for possibility in possibilities:
            team1 = possibility
            team2 = players - set(team1)
            elo1 = calculate_team_elo(team1)
            elo2 = calculate_team_elo(team2)
            elo_list.append((abs(elo1 - elo2), team1, team2))
        ideal_teams = sorted(elo_list, key=itemgetter(0))[0]
        self.gameplayerrelation_set\
            .filter(player__id__in=[player.id for player in ideal_teams[1]]).update(team=GamePlayerRelation.Team1)
        self.gameplayerrelation_set \
            .filter(player__id__in=[player.id for player in ideal_teams[2]]).update(team=GamePlayerRelation.Team2)
        print(ideal_teams[0])
        self.save()
sampling.py 文件源码 项目:leven-squash 作者: dwcoates 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def read_files(directory, length, num):
    files = [join(directory, f)
             for f in listdir(directory) if isfile(join(directory, f))]

    # make sure there are enough text chunks to make num combinations
    # of them.
    txt = ""
    count = 0
    for f in files:
        print("reading %s..." % (f))
        txt += read(f)
        num_chunks = len(txt) / length
        count = count + 1
        if num < nCr(num_chunks, 2):
            break

    print("Read %s/%s files in '%s'" % (count, len(files), directory))

    chunks = [txt[x:x + length] for x in range(0, len(txt), length)]

    print("Calculating distance average of %s measurements of text " +
          "strings length %s...") % (num, length)

    return list(islice(combinations(chunks, 2), 0, num))
penalty.py 文件源码 项目:dalila 作者: slipguru 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def apply_prox_operator(self, x, gamma):
        if self._lambda < 0:
            logging.error("A negative regularisation parameter was used")
            raise ValueError("A negative regularization parameter was used")

        l = list(set().union(*self._groups))
        if not (l == list(np.arange(x.shape[1]))):
            logging.error("The groups in group lasso must cover all the "
                          "features")
            raise ValueError("The groups in group lasso must cover all the "
                          "features")

        for pair in combinations(self._groups, r=2):
            if len(set(pair[0]) & set(pair[1])) > 0:
                logging.error("There are overlapping groups")
                raise ValueError("There are overlapping groups")

        new_x = np.zeros_like(x)
        for r in range(0, x.shape[0]):
            for g in self._groups:
                new_x[r, g] = self.prox_operator(x[r, g], gamma)
        return new_x
match_test.py 文件源码 项目:beans 作者: Yelp 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_no_re_matches(minimal_database):
    pref_1 = SubscriptionDateTime(datetime=datetime.now() - timedelta(weeks=MEETING_COOLDOWN_WEEKS - 1)).put()
    subscription = MeetingSubscription(title='all engineering weekly', datetime=[pref_1]).put()
    user_pref = UserSubscriptionPreferences(preference=pref_1, subscription=subscription).put()
    meeting_spec = MeetingSpec(meeting_subscription=subscription, datetime=pref_1.get().datetime)
    meeting_spec.put()

    users = []
    num_users = 20
    for i in range(0, num_users):
        user = User(email='{}@yelp.com'.format(i), metadata={
                    'department': 'dept{}'.format(i)}, subscription_preferences=[user_pref])
        user.put()
        MeetingRequest(user=user.key, meeting_spec=meeting_spec.key).put()
        users.append(user)

    previous_meetings = {pair for pair in itertools.combinations([user.key.id() for user in users], 2)}
    previous_meetings = previous_meetings - {(users[0].key.id(), users[1].key.id())}
    matches, unmatched = generate_meetings(users, meeting_spec, previous_meetings)
    assert len(unmatched) == num_users - 2
    assert [(match[0].key.id(), match[1].key.id()) for match in matches] == [(users[0].key.id(), users[1].key.id())]
pair_match.py 文件源码 项目:beans 作者: Yelp 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_disallowed_meetings(users, prev_meeting_tuples, spec):
    """Returns set of matches that are not allowed
    Returns:
        Set of tuples
    """
    # don't match users with previous meetings
    pairs = prev_meeting_tuples

    userids = sorted([user.key.id() for user in users])
    id_to_user = {user.key.id(): user for user in users}
    all_pairs = {pair for pair in itertools.combinations(userids, 2)}

    for rule in spec.meeting_subscription.get().dept_rules:
        rule = rule.get()
        pairs = pairs.union({pair for pair in all_pairs if is_same(rule.name, pair, id_to_user)})
    return pairs
orient_edges.py 文件源码 项目:pyBN 作者: ncullen93 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def orient_edges_gs2(edge_dict, Mb, data, alpha):
    """
    Similar algorithm as above, but slightly modified for speed?
    Need to test.
    """
    d_edge_dict = dict([(rv,[]) for rv in edge_dict])
    for X in edge_dict.keys():
        for Y in edge_dict[X]:
            nxy = set(edge_dict[X]) - set(edge_dict[Y]) - {Y}
            for Z in nxy:
                if Y not in d_edge_dict[X]:
                    d_edge_dict[X].append(Y) # SET Y -> X
                B = min(set(Mb[Y]) - {X} - {Z},set(Mb[Z]) - {X} - {Y})
                for i in range(len(B)):
                    for S in itertools.combinations(B,i):
                        cols = (Y,Z,X) + tuple(S)
                        pval = mi_test(data[:,cols])
                        if pval < alpha and X in d_edge_dict[Y]: # Y IS independent of Z given S+X
                            d_edge_dict[Y].remove(X)
                if X in d_edge_dict[Y]:
                    break
    return d_edge_dict
__init__.py 文件源码 项目:Splipy 作者: sintefmath 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def sections(src_dim, tgt_dim):
    """Generate all boundary sections from a source dimension to a target
    dimension. For example, `sections(3,1)` generates all edges on a volume.

    The return values are lists of length `src_dim` with each element either 0,
    --1 or `None`, which are suitable for passing to
    :func:`splipy.SplineObject.section`.
    """
    # Enumerate all combinations of fixed directions
    nfixed = src_dim - tgt_dim
    for fixed in combinations(range(src_dim), r=nfixed):
        # Enumerate all {0,-1}^n over the fixed directions
        for indices in product([0, -1], repeat=nfixed):
            args = [None] * src_dim
            for f, i in zip(fixed, indices[::-1]):
                args[f] = i
            yield args
causal_grammar.py 文件源码 项目:Causality 作者: vcla 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def powerset(iterable):
    "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
    s = list(iterable)
    return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s)+1))
password.py 文件源码 项目:NS_Proj 作者: drstarry 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_count(l, comb):
    result = 0
    for n in range(1, comb+1):
        for sub_char_set in itertools.combinations(char_set, n):
            char_n = sum(sub_char_set)
            for m in range(n, 0, -1):
                sign = (-1) ** (n - m)
                for sets in itertools.combinations(sub_char_set, m):
                    result += sign * sum(sets) ** l
    return result
postulates.py 文件源码 项目:zellij 作者: nedbat 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def all_pairs(seq):
    """Produce all pairs from seq, but not (a, a)"""
    return itertools.combinations(seq, 2)
tetrahedron_fractal.py 文件源码 项目:blender-scripting 作者: njanakiev 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def recursiveTetrahedron(bm, points, level=0):
    subTetras = []
    for i in range(len(points)):
        p0 = points[i]
        pK = points[:i] + points[i + 1:]
        subTetras.append([p0] + [(p0 + p)/2 for p in pK])

    if 0 < level:
        for subTetra in subTetras:
            recursiveTetrahedron(bm, subTetra, level-1)
    else:
        for subTetra in subTetras:
            verts = [bm.verts.new(p) for p in subTetra]
            faces = [bm.faces.new(face) for face in itertools.combinations(verts, 3)]
            bmesh.ops.recalc_face_normals(bm, faces=faces)
grace_util.py 文件源码 项目:scikit-dataaccess 作者: MITHaystack 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def dateMismatch(dates, days=10):
    '''
    Check if dates are not within a certain number of days of each other

    @param dates: Iterable container of pandas timestamps
    @param days: Number of days

    @return true if they are not with 10 days, false otherwise
    '''
    for combo in combinations(dates,2):
        if np.abs(combo[0] - combo[1]) > pd.to_timedelta(days, 'D'):
            return True
    return False
default_strategies.py 文件源码 项目:speccer 作者: bensimner 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def generate(self, depth, t, *args, **kwargs):
            alls = list(Strategy[t](depth, *args, **kwargs))

            for n in range(depth):
                for p in itertools.combinations(alls, n):
                    yield set(p)
foobar_4-1_free_the_bunny_prisoners.py 文件源码 项目:Google-FooBar 作者: FoxHub 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def answer(num_buns, num_required):
    """
    To start this problem, we calculate the number of ways we can arrange bunnies. This is:
        num_buns choose num_required

    This number determines how many distinct keys we have. So we then need to lexicographically hand off keys to each
    bunny and deputize them with the powers to open locks.

    At that point, this problem comes down to deciding how many keys to hand to each bunny. I had to think about that
    by hand.

    :param num_buns: The number of bunnies to distribute keys to.
    :param num_required: The "choose" part of our combinatorial.
    :return: bunny_keyrings, the enumerated keys belonging to each bunny in num_buns.
    """
    # One keyring per bunny.
    bunny_keyrings = [[] for num in range(num_buns)]
    # The number of keys each bunny requires is described by this formula, which I noticed by doing some napkin math.
    # If N == 4 and M == 4, bunnies_per_key == 1.
    # If N == 2 and M == 1, bunnies_per_key == 2.
    # If N == 5 and M == 3, bunnies_per_key == 3.
    # This generally fit the formula bunnies_per_key = N - M + 1.
    bunnies_per_key = num_buns - num_required + 1

    # itertools' enumerate function:
    # def enumerate(sequence, start=0):
    #     n = start
    #     for elem in sequence:
    #         yield n, elem
    #         n += 1
    # This yields a generator! So by generating combinations one at a time, we will get num_buns choose num_required
    # different permutations of num_buns, and we can assign each one as a bunny's keyring. Since this covers all
    # combinations, it should also assure that in all situations, if we pick num_required bunnies, they will all have
    # enough keys to open a cell.
    for keynum, keyholders in enumerate(combinations(range(num_buns), bunnies_per_key)):
        # print(keynum, keyholders)
        for index in keyholders:
            bunny_keyrings[index].append(keynum)

    return bunny_keyrings
traveled_speeds_classifier.py 文件源码 项目:rosie 作者: datasciencebr 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __calculate_sum_distances(self, X):
        coordinate_list = X[['latitude', 'longitude']].values
        edges = list(combinations(coordinate_list, 2))
        return np.sum([distance(edge[0][1:], edge[1][1:]).km for edge in edges])
fatcat.py 文件源码 项目:ssbio 作者: SBRG 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def run_fatcat_all_by_all(list_of_structure_paths, fatcat_sh, outdir='', silent=True, force_rerun=False):
    """Run FATCAT on all pairs of structures given a list of structures.

    Args:
        list_of_structure_paths (list): List of PDB file paths 
        fatcat_sh (str): Path to "runFATCAT.sh" executable script
        outdir (str): Path to where FATCAT XML output files will be saved 
        silent (bool): If command to run FATCAT should be printed to stdout
        force_rerun (bool): If FATCAT should be run even if XML output files already exist 

    Returns:
        Pandas DataFrame: TM-scores (similarity) between all structures

    """
    structure_ids = {x: i for i, x in enumerate(list_of_structure_paths)}

    comps = itertools.combinations(list_of_structure_paths, 2)
    tm_score_matrix = np.eye(len(list_of_structure_paths))

    for pdb1, pdb2 in tqdm(comps):
        fatcat_file = run_fatcat(pdb1, pdb2, fatcat_sh, outdir=outdir, silent=silent, force_rerun=force_rerun)
        tm_score_matrix[structure_ids[pdb1], structure_ids[pdb2]] = parse_fatcat(fatcat_file)['tm_score']
        tm_score_matrix[structure_ids[pdb2], structure_ids[pdb1]] = parse_fatcat(fatcat_file)['tm_score']

    # Convert to dataframe with filenames
    filenames = [op.splitext(op.basename(x))[0] for x in list_of_structure_paths]
    tm_score_matrix_annotated = pd.DataFrame(data=tm_score_matrix, columns=filenames, index=filenames)

    return tm_score_matrix_annotated
MAUC.py 文件源码 项目:TADPOLE 作者: noxtoby 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def MAUC(data, num_classes):
    """
    Calculates the MAUC over a set of multi-class probabilities and
    their labels. This is equation 7 in Hand and Till's 2001 paper.
    NB: The class labels should be in the set [0,n-1] where n = # of classes.
    The class probability should be at the index of its label in the
    probability list.
    I.e. With 3 classes the labels should be 0, 1, 2. The class probability
    for class '1' will be found in index 1 in the class probability list
    wrapped inside the zipped list with the labels.
    Args:
        data (list): A zipped list (NOT A GENERATOR) of the labels and the
            class probabilities in the form (m = # data instances):
             [(label1, [p(x1c1), p(x1c2), ... p(x1cn)]),
              (label2, [p(x2c1), p(x2c2), ... p(x2cn)])
                             ...
              (labelm, [p(xmc1), p(xmc2), ... (pxmcn)])
             ]
        num_classes (int): The number of classes in the dataset.
    Returns:
        The MAUC as a floating point value.
    """
    # Find all pairwise comparisons of labels
    class_pairs = [x for x in itertools.combinations(range(num_classes), 2)]

    # Have to take average of A value with both classes acting as label 0 as this
    # gives different outputs for more than 2 classes
    sum_avals = 0
    for pairing in class_pairs:
        sum_avals += (a_value(data, zero_label=pairing[0], one_label=pairing[1]) +
                      a_value(data, zero_label=pairing[1], one_label=pairing[0])) / 2.0

    return sum_avals * (2 / float(num_classes * (num_classes-1)))  # Eqn 7


问题


面经


文章

微信
公众号

扫码关注公众号