def phase_starts(self):
return list(itertools.accumulate([0] + [phase.length for phase in self.phase_list]))
python类accumulate()的实例源码
def foo3(s):
join_2_strings = lambda x,y: '.'.join((x, y))
return itertools.accumulate(s.split('.'), join_2_strings)
def running(lst, fn):
return list(accumulate(lst, fn))
def _ts(self):
lengths = [c.length for c in self._curves]
length = sum(lengths)
out = []
for i, j in enumerate(accumulate(lengths[:-1])):
self._curves[i].req_length = lengths[i]
out.append(j / length)
self._curves[-1].req_length = max(
0,
lengths[-1] - (length - self.req_length),
)
out.append(1)
return out
def choose_weighted(self, *weighted_choices):
choices, weights = zip(*weighted_choices)
cumdist = list(itertools.accumulate(weights))
x = self.random() * cumdist[-1]
return choices[bisect.bisect(cumdist, x)]
def _split(self, obj):
# "hello there world" -> ["hello", "hello there", "hello there world"]
from itertools import accumulate
return list(accumulate(obj.split(), lambda x, y: f'{x} {y}'))
def cumsum(self): return Vec(itertools.accumulate(self))
def is_valid_program(p):
""" checks that the accumulated program length is always greater than the
accumulated arities, indicating that the appropriate number of arguments is
alway present for functions. It then checks that the sum of arties +1
exactly equals the length of the stack, indicating that there are no
missing arguments. """
# print("p:",p)
arities = list(a.arity[a.in_type] for a in p)
accu_arities = list(accumulate(arities))
accu_len = list(np.arange(len(p))+1)
check = list(a < b for a,b in zip(accu_arities,accu_len))
# print("accu_arities:",accu_arities)
# print("accu_len:",accu_len)
# print("accu_arities < accu_len:",accu_arities<accu_len)
return all(check) and sum([a.arity[a.in_type] for a in p]) +1 == len(p) and len(p)>0
def generate_sentence(cfdist, word, num=15):
sentence = []
# Generate words until we meet a period
while word!='.':
sentence.append(word)
# Generate the next word based on probability
choices, weights = zip(*cfdist[word].items())
cumdist = list(itertools.accumulate(weights))
x = random.random() * cumdist[-1]
word = choices[bisect.bisect(cumdist, x)]
return ' '.join(sentence)
def accumulate(iterable):
" Super simpel 'accumulate' implementation. "
total = 0
for item in iterable:
total += item
yield total
def get_interval_offsets_txt(lines: List[str]) -> Iterator[Tuple[int, int]]:
"""Return all the intervals corresponding to the ``lines``
passed as parameter:
[(0, n), (n, m), …]
where the values are the character position of the beginning and end of
each line, counting from the first character of the file (start at 0)"""
idx_first_char = 0
cumulative_lines_length = list(itertools.accumulate(list(map(len, lines))))
return zip([idx_first_char] + cumulative_lines_length,
cumulative_lines_length)
def from_pydata(self, vertices, edges, faces):
"""
Make a mesh from a list of vertices/edges/faces
Until we have a nicer way to make geometry, use this.
:arg vertices:
float triplets each representing (X, Y, Z)
eg: [(0.0, 1.0, 0.5), ...].
:type vertices: iterable object
:arg edges:
int pairs, each pair contains two indices to the
*vertices* argument. eg: [(1, 2), ...]
:type edges: iterable object
:arg faces:
iterator of faces, each faces contains three or more indices to
the *vertices* argument. eg: [(5, 6, 8, 9), (1, 2, 3), ...]
:type faces: iterable object
.. warning::
Invalid mesh data
*(out of range indices, edges with matching indices,
2 sided faces... etc)* are **not** prevented.
If the data used for mesh creation isn't known to be valid,
run :class:`Mesh.validate` after this function.
"""
from itertools import chain, islice, accumulate
face_lengths = tuple(map(len, faces))
self.vertices.add(len(vertices))
self.edges.add(len(edges))
self.loops.add(sum(face_lengths))
self.polygons.add(len(faces))
self.vertices.foreach_set("co", tuple(chain.from_iterable(vertices)))
self.edges.foreach_set("vertices", tuple(chain.from_iterable(edges)))
vertex_indices = tuple(chain.from_iterable(faces))
loop_starts = tuple(islice(chain([0], accumulate(face_lengths)), len(faces)))
self.polygons.foreach_set("loop_total", face_lengths)
self.polygons.foreach_set("loop_start", loop_starts)
self.polygons.foreach_set("vertices", vertex_indices)
# if no edges - calculate them
if faces and (not edges):
self.update(calc_edges=True)
def plackett_luce(rankings, tolerance=1e-9, check_assumption=True, normalize=True, verbose=False):
'''This algorithm returns the MLE of the Plackett-Luce ranking parameters
over a given set of rankings. It requires that the set of players is unable
to be split into two disjoint sets where nobody from set A has beaten anyone from
set B. If this assumption fails, the algorithm will diverge. If the
assumption is checked and fails, the algorithm will short-circuit and
return None.
Input is a list of dictionaries, where each dictionary corresponds to an
individual ranking and contains the player : finish for that ranking.
Output is a dictionary containing player : plackett_luce_parameter keys
and values.
'''
players = set(key for ranking in rankings for key in ranking.keys())
rankings = [sorted(ranking.keys(),key=ranking.get) for ranking in rankings]
if verbose:
print('Using native Python implementation of Plackett-Luce.')
print('{:,} unique players found.'.format(len(players)))
print('{:,} rankings found.'.format(len(rankings)))
if check_assumption:
edges = [(source, dest) for ranking in rankings for source, dest in combinations(ranking, 2)]
scc_count = len(set(scc(edges).values()))
if verbose:
if scc_count == 1:
print ('No disjoint sets found. Algorithm convergence conditions are met.')
else:
print('{:,} disjoint sets found. Algorithm will diverge.'.format(scc_count))
if scc_count != 1:
return None
ws = Counter(name for ranking in rankings for name in ranking[:-1])
gammas = {player : 1.0 / len(players) for player in players}
gdiff = float('inf')
iteration = 0
start = time.perf_counter()
while gdiff > tolerance:
_gammas = gammas
gamma_sums = [list(accumulate(1 / s for s in reversed(list(accumulate(gammas[finisher] for finisher in reversed(ranking)))))) for ranking in rankings]
gammas = {player : ws[player] / sum(gamma_sum[min(ranking.index(player), len(ranking) - 2)]
for ranking, gamma_sum in zip(rankings, gamma_sums) if player in ranking)
for player in players}
if normalize:
gammas = {player : gamma / sum(gammas.values()) for player, gamma in gammas.items()}
pgdiff = gdiff
gdiff = sqrt(sum((gamma - _gammas[player]) ** 2 for player, gamma in gammas.items()))
iteration += 1
if verbose:
now = time.perf_counter()
print("%d %.2f seconds L2=%.2e" % (iteration, now-start, gdiff))
if gdiff > pgdiff:
print("Gamma difference increased, %.4e %.4e" % (gdiff, pgdiff))
start = now
return gammas