def test_quantize_from_probs2(size, resolution):
set_random_seed(make_seed(size, resolution))
probs = np.exp(np.random.random(size)).astype(np.float32)
probs2 = probs.reshape((1, size))
quantized = quantize_from_probs2(probs2, resolution)
assert quantized.shape == probs2.shape
assert quantized.dtype == np.int8
assert np.all(quantized.sum(axis=1) == resolution)
# Check that quantized result is closer to target than any other value.
quantized = quantized.reshape((size, ))
target = resolution * probs / probs.sum()
distance = np.abs(quantized - target).sum()
for combo in itertools.combinations(range(size), resolution):
other = np.zeros(size, np.int8)
for i in combo:
other[i] += 1
assert other.sum() == resolution
other_distance = np.abs(other - target).sum()
assert distance <= other_distance
python类combinations()的实例源码
def _most_similar_pair(clusters, distance_function):
result = []
for c1, c2 in combinations(clusters, 2):
if not c1.can_merge(c2):
continue
if c1.must_merge(c2):
logger.info("External IDs Match.\n%s\n%s\nMust Merge" % (c1, c2))
return c1, c2, 1.0
sim_score = distance_function(c1, c2)
result.append((c1, c2, sim_score))
if result:
sorted_result = sorted(result, key=lambda t: t[2], reverse=True)
return sorted_result[0]
def _default_lines(self):
'Return the larger list of lines which can be reconciled'
currency = self.show.account.company.currency
chunk = config.getint('account', 'reconciliation_chunk', default=10)
# Combination is exponential so it must be limited to small number
default = []
for lines in grouped_slice(self._all_lines(), chunk):
lines = list(lines)
best = None
for n in xrange(len(lines), 1, -1):
for comb_lines in combinations(lines, n):
amount = sum((l.debit - l.credit) for l in comb_lines)
if currency.is_zero(amount):
best = [l.id for l in comb_lines]
break
if best:
break
if best:
default.extend(best)
return default
def size(self):
"""Computes the hypervolume of this concept."""
hypervolume = 0.0
num_cuboids = len(self._core._cuboids)
# use the inclusion-exclusion formula over all the cuboids
for l in range(1, num_cuboids + 1):
inner_sum = 0.0
subsets = list(itertools.combinations(self._core._cuboids, l))
for subset in subsets:
intersection = subset[0]
for cuboid in subset:
intersection = intersection.intersect_with(cuboid)
inner_sum += self._hypervolume_couboid(intersection)
hypervolume += inner_sum * (-1.0)**(l+1)
return hypervolume
def GenerateCollisionTree(k, stateLen):
nodesByLevel = collections.defaultdict(list)
allBytes = range(2 ** 8)
allPossibleBlocks = []
for comb in itertools.combinations(allBytes, stateLen):
byteString = b''.join(x.to_bytes(1, 'little') for x in comb)
allPossibleBlocks.append(byteString)
for level in range(k+1):
nodes = []
if level == 0:
nodes = RandomUniqueStates(2 ** k, stateLen)
else:
nodes = FindCollisions(nodesByLevel[level-1], allPossibleBlocks, stateLen)
print('Generated nodes for level %d: %s' % (level, str(nodes)))
nodesByLevel[level] = nodes
return nodesByLevel
# Now I generate a meaningful message, crafted so that it's exactly 1 block
# length, but there is no need to do so. Nobody should be seeing this
# message, only the hash.
def scoreGeneSet(geneT,scoresO,geneNames,scoreType):
'''Given a tuple of genes in geneT, calculate the average score
between them. If there are some genes that lack a score, we give it
the minimum possible. The kind of score we use is indicated by
scoreType, which corresponds to the type of score in the scores
graph.
'''
geneNumsL = [geneNames.nameToNum(name) for name in geneT]
edgeL = getInternalEdges(geneNumsL,scoresO)
scSum = sum((scoresO.getScoreByEdge(edge,scoreType) for edge in edgeL))
# if there was an edge between every node, there would be
# len(geneT) choose 2.
maxPossibleNumEdges = len(list(itertools.combinations(geneT,2)))
actualNumEdges = len(edgeL)
numMissEdge=maxPossibleNumEdges-actualNumEdges
scSum += numMissEdge * min(scoresO.scoreD[scoreType])
avSc = scSum / maxPossibleNumEdges
return avSc,maxPossibleNumEdges,actualNumEdges
def tenx_diff_exp_all(tenx_data, communities):
diff_expr_dfs = []
for c1, c2 in itertools.combinations(np.unique(communities), 2):
group1 = (communities == c1)
group2 = (communities == c2)
diff_expr_df = sparse_diff_exp(tenx_data.genes.matrix,
group1, group2, tenx_data.genes.columns).sort_values('p')
diff_expr_df['community1'] = c1
diff_expr_df['community2'] = c2
diff_expr_dfs.append(diff_expr_df)
diff_expr = pd.concat(diff_expr_dfs)
print(diff_expr.shape)
print(diff_expr.head())
return diff_expr
def process(self, element):
content_value = element.properties.get('text', None)
text_line = ''
if content_value:
text_line = content_value.string_value
words = set([x.lower() for x in re.findall(r'[A-Za-z\']+', text_line)])
# You can add more stopwords if you want. These words are not included
# in the analysis.
stopwords = [
'a', 'amp', 'an', 'and', 'are', 'as', 'at', 'be', 'been',
'but', 'by', 'co', 'do', 'for', 'has', 'have', 'he', 'her', 'his',
'https', 'if', 'in', 'is', 'it', 'me', 'my', 'no', 'not', 'of', 'on',
'or', 'rt', 's', 'she', 'so', 't', 'than', 'that', 'the', 'they',
'this', 'to', 'us', 'was', 'we', 'what', 'with', 'you', 'your',
'who', 'when', 'via']
stopwords += list(map(chr, range(97, 123)))
pruned_words = list(words - set(stopwords))
pruned_words.sort()
import itertools
return list(itertools.combinations(pruned_words, 2))
def process(self, element):
content_value = element.properties.get('text', None)
text_line = ''
if content_value:
text_line = content_value.string_value
words = set([x.lower() for x in re.findall(r'[A-Za-z\']+', text_line)])
# You can add more stopwords if you want. These words are not included
# in the analysis.
stopwords = [
'a', 'amp', 'an', 'and', 'are', 'as', 'at', 'be', 'been',
'but', 'by', 'co', 'do', 'for', 'has', 'have', 'he', 'her', 'his',
'https', 'if', 'in', 'is', 'it', 'me', 'my', 'no', 'not', 'of', 'on',
'or', 'rt', 's', 'she', 'so', 't', 'than', 'that', 'the', 'they',
'this', 'to', 'us', 'was', 'we', 'what', 'with', 'you', 'your',
'who', 'when', 'via']
stopwords += list(map(chr, range(97, 123)))
pruned_words = list(words - set(stopwords))
pruned_words.sort()
import itertools
return list(itertools.combinations(pruned_words, 2))
def get_ones_max_pattern(self, player):
""" ?????5-7???????? """
temp_cards = []
temp_cards.extend(self.public_pot_cards)
temp_cards.extend(player.hands)
print('temp_cards: ', temp_cards)
if len(temp_cards) < 5:
raise LookupError('Cards < 5')
all_cards = list(combinations(
iterable=temp_cards,
r=5
))
max_cards = all_cards[0]
for cards in all_cards:
# 5-7????????python????????
self.dealer.get(
five_cards_A=cards,
five_cards_B=max_cards
)
if self.dealer.A_stronger_than_B:
max_cards = cards
return max_cards
def _gen_dup_trinary_alloy(self, sp1, n1, sp2, n2):
init_numbers = self.init_cell.numbers
isp1 = sp1.Z
isp2 = sp2.Z
sp_ind_origin = [i for i, s in enumerate(init_numbers)]
for sp1_comb_index in combinations(sp_ind_origin, n1):
sp_ind_bin = [x for x in sp_ind_origin if x not in sp1_comb_index]
for sp2_comb_index in combinations(sp_ind_bin, n2):
numbers = init_numbers.copy()
for i1, i2 in zip_longest(sp1_comb_index, sp2_comb_index):
if i1 is not None:
numbers[i1] = isp1
if i2 is not None:
numbers[i2] = isp2
yield GeneralCell(self.lattice, self.positions, numbers)
# pdb.set_trace()
def test_equivalent_any_addresses(self):
from certbot_nginx.obj import Addr
any_addresses = ("0.0.0.0:80 default_server ssl",
"80 default_server ssl",
"*:80 default_server ssl")
for first, second in itertools.combinations(any_addresses, 2):
self.assertEqual(Addr.fromstring(first), Addr.fromstring(second))
# Also, make sure ports are checked.
self.assertNotEqual(Addr.fromstring(any_addresses[0]),
Addr.fromstring("0.0.0.0:443 default_server ssl"))
# And they aren't equivalent to a specified address.
for any_address in any_addresses:
self.assertNotEqual(
Addr.fromstring("192.168.1.2:80 default_server ssl"),
Addr.fromstring(any_address))
egocentric_network_1_5.py 文件源码
项目:Visualization-of-popular-algorithms-in-Python
作者: MUSoC
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def EgocentricNetwork(G,v):
egocentric_network_edge_list = []
egocentric_network_node_list = [v]
for i in G.neighbors(v):
egocentric_network_node_list.append(i)
egocentric_network_edge_list.append((v,i))
egocentric_network_node_list.sort()
egocentric_network_edge_list = list(tuple(sorted(p)) for p in egocentric_network_edge_list)
for i in list(itertools.combinations(egocentric_network_node_list, 2)): #generates all possible pairs of nodes
if i in G.edges() and i not in egocentric_network_edge_list:
egocentric_network_edge_list.append(i)
return egocentric_network_edge_list,egocentric_network_node_list
#takes input from the file and creates a graph
def generate_feature_group_combinations(self, feature_groups):
combination_unflattened = sum([map(list,
combinations(feature_groups, i)) for i in range(len(feature_groups) + 1)],
[])
combinations_flattened = []
for combination in combination_unflattened:
flattened_combination = {'feature_column_names':[],
'feature_groups': []}
for feature_group in combination:
flattened_combination['feature_column_names'].extend(
feature_group['feature_column_names'])
flattened_combination['feature_groups'].extend(
feature_group['feature_groups'])
combinations_flattened.append(flattened_combination)
return combinations_flattened[1:]
def normalize_language_tag(tag):
"""Return a list of normalized combinations for a `BCP 47` language tag.
Example:
>>> from docutils.utils import normalize_language_tag
>>> normalize_language_tag('de_AT-1901')
['de-at-1901', 'de-at', 'de-1901', 'de']
>>> normalize_language_tag('de-CH-x_altquot')
['de-ch-x-altquot', 'de-ch', 'de-x-altquot', 'de']
"""
# normalize:
tag = tag.lower().replace('-','_')
# split (except singletons, which mark the following tag as non-standard):
tag = re.sub(r'_([a-zA-Z0-9])_', r'_\1-', tag)
subtags = [subtag for subtag in tag.split('_')]
base_tag = [subtags.pop(0)]
# find all combinations of subtags
taglist = []
for n in range(len(subtags), 0, -1):
for tags in unique_combinations(subtags, n):
taglist.append('-'.join(base_tag+tags))
taglist += base_tag
return taglist
def test_channelmanager_graph_building(
raiden_network,
token_addresses,
settle_timeout):
token_address = token_addresses[0]
total_pairs = 0
pairs = itertools.combinations(raiden_network, 2)
for app0, app1 in pairs:
manager = app0.raiden.default_registry.manager_by_token(token_address)
manager.new_netting_channel(
app0.raiden.address,
app1.raiden.address,
settle_timeout,
)
total_pairs += 1
assert total_pairs == len(manager.channels_addresses())
def get_ref_centre(nodelist):
"""Choose a node as the diagram centre and find the unit distance.
Find the shortest distance between two nodes and use it as the unit distance of the
diagram grid. Choose one of these two nodes as the diagram centre.
Args:
nodelist (list): A list of nodes
Returns:
The absolute position of the centre node and the unit distance of the diagram.
"""
centres = [node.centre for node in nodelist]
min_d = sp.dist(centres[0], centres[1])
min_p0,min_p1 = centres[0], centres[1]
for p0, p1 in itertools.combinations(centres, 2):
d = sp.dist(p0,p1)
if d < min_d:
min_d = d
min_p0,min_p1 = p0,p1
min_x = abs(min_p0[0] - min_p1[0])
min_y = abs(min_p0[1] - min_p1[1])
unit_d = max(min_x, min_y)
return [min_p0,unit_d]
def _GivePropertiesFromGeneralToSpecific(handler_list):
"""Makes sure that handlers have all properties of more general ones.
Ex. Since "*" matches everything "admin/*" matches, we want everything
matched by "admin/*" to have all the properties specified to "*".
Therefore we give properties from the "*" handler to the "admin/*" handler.
If the "*" handler is a SimpleHandler, it carries its own properties, so it
becomes a child of the "admin/*" handler. Otherwise, its properties are
define by its children, so its children are copied to the "admin/*"
handler.
This is an in-place mutation of the list.
Args:
handler_list: List of ordered Handlers.
"""
for i, j in itertools.combinations(xrange(len(handler_list)), 2):
if handler_list[j].MatchesAll(handler_list[i]):
if isinstance(handler_list[i], SimpleHandler):
handler_list[i] = handler_list[i].CreateOverlappedHandler()
handler_list[i].AddMatchingHandler(handler_list[j])
def _getCombi(stuff, grplen=[], kind=int, sep=''):
"""Function which return combinations of string/integer objects.
-> (start, stop) : number of combine elements from initial list
"""
allComb = []
if not grplen:
grplen = list(n.arange(1, len(idxGp)+1))
for L in grplen:
for subset in combinations(stuff, L):
if kind == str:
allComb.extend([sep.join(subset)])
elif kind == int:
t = []
[t.extend(k) for k in subset]
allComb.extend([t])
return allComb
def _seqcombination(cst, dyn, direction, grp):
"""Generate combi for forward/backward/exhaustive sequence.
cst : list containing the index of all the features
dyn : features to add or remove
direction : direction of the sequence
grp : group features
"""
if direction == 'forward':
combi = [cst + [y]
for y in dyn if not list(set(cst).intersection([y]))]
elif direction == 'backward':
combi = [list(set(cst).difference([x])) for x in dyn]
elif direction == 'exhaustive':
combi = [list(k) for i in range(1, len(cst)+1)
for k in combinations(cst, i)]
return [[j for i in k for j in grp['g'+str(i)]] for k in combi]
def _GivePropertiesFromGeneralToSpecific(handler_list):
"""Makes sure that handlers have all properties of more general ones.
Ex. Since "*" matches everything "admin/*" matches, we want everything
matched by "admin/*" to have all the properties specified to "*".
Therefore we give properties from the "*" handler to the "admin/*" handler.
If the "*" handler is a SimpleHandler, it carries its own properties, so it
becomes a child of the "admin/*" handler. Otherwise, its properties are
define by its children, so its children are copied to the "admin/*"
handler.
This is an in-place mutation of the list.
Args:
handler_list: List of ordered Handlers.
"""
for i, j in itertools.combinations(xrange(len(handler_list)), 2):
if handler_list[j].MatchesAll(handler_list[i]):
if isinstance(handler_list[i], SimpleHandler):
handler_list[i] = handler_list[i].CreateOverlappedHandler()
handler_list[i].AddMatchingHandler(handler_list[j])
def random_combinations(points_in_class):
n_cl = len(points_in_class)
max_points = 2 * n_cl # as used by by Orriols-Puig et al., 2010
all_combinations = []
for i, j in itertools.combinations(points_in_class, r = 2):
all_combinations.append((i, j))
points_i = 0
n = len(all_combinations)
for i in range(n):
point = np.random.choice(len(all_combinations), 1)[0]
yield all_combinations[point]
del all_combinations[point]
if points_i > max_points or len(all_combinations) == 0:
break
points_i = points_i + 1
def get_similar_ssid_sets(ssid_sets, threshold):
"""Return a mapping of ssid set to similar ssid sets.
:param ssid_sets: Iterable of SSID sets
:param threshold: Minimum Jaccard index for two sets to be matched as similar.
"""
ssid_set_to_matches = defaultdict(set)
ssid_pairs = combinations(ssid_sets, r=2)
# Distribute calulcations to worker processes
# Significant speed-up over single process
with multiprocessing.Pool() as pool:
task = partial(jaccard_worker, threshold=threshold)
# Immediately returns an iterable
similar_ssids = pool.imap_unordered(task, ssid_pairs, chunksize=300000)
# Consumes the iterable whenever a worker process yields
for match in similar_ssids:
if match:
ssid_set_to_matches[match[0]].add(match[1])
ssid_set_to_matches[match[1]].add(match[0])
return ssid_set_to_matches
def gen_ordered_statistics(transaction_manager, record):
"""
Returns a generator of ordered statistics as OrderedStatistic instances.
Arguments:
transaction_manager -- Transactions as a TransactionManager instance.
record -- A support record as a SupportRecord instance.
"""
items = record.items
for combination_set in combinations(sorted(items), len(items) - 1):
items_base = frozenset(combination_set)
items_add = frozenset(items.difference(items_base))
confidence = (
record.support / transaction_manager.calc_support(items_base))
lift = confidence / transaction_manager.calc_support(items_add)
yield OrderedStatistic(
frozenset(items_base), frozenset(items_add), confidence, lift)
def compute_similarity_matrix(self, parent=None):
clusters = list(self._models)
n_clusters = len(clusters)
X = np.vstack([self[cluster][0] for cluster in clusters])
nX = l2_normalize(X)
similarities = -squareform(pdist(nX, metric=self.distance))
matrix = ValueSortedDict()
for i, j in itertools.combinations(range(n_clusters), 2):
matrix[clusters[i], clusters[j]] = similarities[i, j]
matrix[clusters[j], clusters[i]] = similarities[j, i]
return matrix
def ksubsets(superset, k):
"""
Finds the subsets of size k in lexicographic order.
This uses the itertools generator.
Examples
========
>>> from sympy.combinatorics.subsets import ksubsets
>>> list(ksubsets([1,2,3], 2))
[(1, 2), (1, 3), (2, 3)]
>>> list(ksubsets([1,2,3,4,5], 2))
[(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 4), \
(2, 5), (3, 4), (3, 5), (4, 5)]
See Also
========
class:Subset
"""
return combinations(superset, k)
def combinations(s_data, subset_size, total_size=None, name=None):
assert isinstance(subset_size, int)
assert subset_size > 0
if total_size is None:
total_size = s_data.get_shape().as_list()[0]
if total_size is None:
raise ValueError(
"tensor size on axis 0 is unknown,"
" please supply 'total_size'")
else:
assert isinstance(total_size, int)
assert subset_size <= total_size
c_combs = tf.constant(
list(itertools.combinations(range(total_size), subset_size)),
dtype=hparams.INTX,
name=('combs' if name is None else name))
return tf.gather(s_data, c_combs)
def get_bettingest_couples():
# get list of users
users = User.objects.values('id', 'first_name', 'last_name')
# use itertools to get all combinations
global_bettingest_couples = []
for combo in combinations(users, 2):
# for each combo, check how many bets they have with eachother
num_bets = get_couple_bet_number(combo[0]['id'], combo[1]['id'])
# append to list of dictionaries
# e.g. L = [{num_bets: 5, users: ['John Doe', 'Jane Doe']}]
user1_name = combo[0]['first_name'] + ' ' + combo[0]['last_name']
user2_name = combo[1]['first_name'] + ' ' + combo[1]['last_name']
users_names = [user1_name, user2_name]
entry = {'num_bets': num_bets, 'users': users_names}
global_bettingest_couples.append(entry)
# pare down to top 5
pared_global_bettingest_couples = sorted(global_bettingest_couples, key=lambda k: k['num_bets'], reverse=True)[:10]
return pared_global_bettingest_couples
def find_4_cycles(edges):
"""return all unique four-cycles in graph"""
# for each node, add a list of reachable nodes
# for all pairs of reachable node test if they share a reachable node -> cycle
reachables = defaultdict(set)
for edge in edges:
reachables[edge[0]].add(edge[1])
reachables[edge[1]].add(edge[0])
loops = {}
for a, reachable in reachables.iteritems():
for b, c in itertools.combinations(reachable, 2):
for d in reachables[b].intersection(reachables[c]).difference(set([a])):
loops[tuple(sorted([a, b, d, c]))] = [a, b, d, c]
return loops.values()
def gen_cls_combs(labels):
"""Generate exhaustive label pairs.
Args:
labels: a set of labels.
Returns:
a list of unique label pairs.
"""
unique_labels = list(set(labels))
# generate class combinations to cover all classes at least once
cls_combs = []
for idx in range(len(unique_labels)):
cls1 = unique_labels[idx]
for id2 in range(idx + 1, len(unique_labels)):
cls2 = unique_labels[id2]
cls_combs.append((cls1, cls2))
return cls_combs