def imerge(iterables):
_hpop, _hreplace, _Stop = (heappop, heapreplace, StopIteration)
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
nx = it.next
h_append([nx(), itnum, nx])
except _Stop:
pass
heapify(h)
while 1:
try:
while 1:
v, itnum, nx = s = h[0]
yield v
s[0] = nx()
_hreplace(h, s)
except _Stop:
_hpop(h)
except IndexError:
return
python类heappop()的实例源码
def imerge(iterables):
_hpop, _hreplace, _Stop = (heappop, heapreplace, StopIteration)
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
nx = it.next
h_append([nx(), itnum, nx])
except _Stop:
pass
heapify(h)
while 1:
try:
while 1:
v, itnum, nx = s = h[0]
yield v
s[0] = nx()
_hreplace(h, s)
except _Stop:
_hpop(h)
except IndexError:
return
def get_skyline(LRH):
"""
Wortst Time Complexity: O(NlogN)
:type buildings: List[List[int]]
:rtype: List[List[int]]
"""
skyline, live = [], []
i, n = 0, len(LRH)
while i < n or live:
if not live or i < n and LRH[i][0] <= -live[0][1]:
x = LRH[i][0]
while i < n and LRH[i][0] == x:
heapq.heappush(live, (-LRH[i][2], -LRH[i][1]))
i += 1
else:
x = -live[0][1]
while live and -live[0][1] <= x:
heapq.heappop(live)
height = len(live) and -live[0][0]
if not skyline or height != skyline[-1][1]:
skyline += [x, height],
return skyline
def _timer(self):
if not self._queue or not self._waiters:
return
ts = heapq.nsmallest(1, self._queue)[0][0]
t = ts - time.time()
if t < 0:
score, f = self._waiters.pop(0)
ts, val = heapq.heappop(self._queue)
if score:
f.set_result((val, ts))
else:
f.set_result(val)
else:
await asyncio.sleep(t, loop=self.loop)
self._future = self.loop.create_task(self._timer())
def explore_frontier(sig, data_type, metric, max_size):
# closure for binding sig and rules easily
expand = producers.generate_expander(sig)
# generate appropriate starting node based on type
start_node = producers.e.Un(producers.parse_type(data_type))
frontier = [(metric(start_node), start_node)]
# go hog wild
while True:
# pull expression off heap, check expansions
m, expr = heapq.heappop(frontier)
expansions = expand(expr)
# if there are expansions, just add them back in
if expansions:
for new_expr in expansions:
heapq.heappush(frontier, (metric(new_expr), new_expr) )
# if there aren't any expansions and the program is closed, we're done
elif len(expr._values_from_kind("un")) == 0:
yield m[0], expr
#------------------------------------------------------------------------------
# now we treat this module as a script - time to execute!
#------------------------------------------------------------------------------
def run(self):
self._initialize_kernel()
while len(self._candidate_pieces) > 0:
_, (position, piece_id), relative_piece = heapq.heappop(self._candidate_pieces)
if position in self._taken_positions:
continue
# If piece is already placed, find new piece candidate and put it back to
# priority queue
if piece_id in self._kernel:
self.add_piece_candidate(relative_piece[0], relative_piece[1], position)
continue
self._put_piece_to_kernel(piece_id, position)
def create_beam_items(self, beam):
beam_items = []
for b in xrange(len(beam)):
config = beam[b]
prevScore = config.score
dense_feats = self.template.feat_template(config.nodes,
config.stack,
config.b0)
pr_scores = self.clf.predict_proba(dense_feats)[0]
pr_scores = np.log(pr_scores)
predictions = zip(pr_scores, self.clf.classes_)
valid_trans = self.get_valid_transitions(config)
for score, (action, label) in predictions:
if self.transitions[action] in valid_trans:
next_transition = valid_trans[self.transitions[action]]
heapq.heappush(
beam_items,
(prevScore + score, b, next_transition, label))
if len(beam_items) > self.beamwidth:
heapq.heappop(beam_items)
return beam_items
def find_k_nearest(self, k, s_query):
'''
Gets KNN using heap sort
'''
neighbors = []
for n in self.nodes:
dist = np.linalg.norm(s_query - n.state)
if dist > 0:
hn = HeapNode(n, dist)
heapq.heappush(neighbors, hn)
#Check if we have <= k to choose from
#in which case, simply return that list
ret_list = []
if len(neighbors) <= k:
for k in neighbors:
ret_list.append(k.node)
return ret_list
#get the k nearest neighbors
for i in xrange(k):
t = heapq.heappop(neighbors)
ret_list.append(t.node)
return ret_list
def __iter__(self):
if not self.intervals:
return
while True:
e = heappop(self.intervals)
next_run = e.point[0]
heappush(self.intervals, e.shift())
yield next_run, e.action
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
After the function call, we may call peek_ap_at_n to actually calculate
the average precision.
Note predictions and actuals must have the same shape.
Args:
predictions: a list storing the prediction scores.
actuals: a list storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
then it's possible some true positives were missed in them. In that case,
you can provide 'num_positives' in order to accurately track recall.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if not num_positives is None:
if not isinstance(num_positives, numbers.Number) or num_positives < 0:
raise ValueError("'num_positives' was provided but it wan't a nonzero number.")
if not num_positives is None:
self._total_positives += num_positives
else:
self._total_positives += numpy.size(numpy.where(actuals > 0))
topk = self._top_n
heap = self._heap
for i in range(numpy.size(predictions)):
if topk is None or len(heap) < topk:
heapq.heappush(heap, (predictions[i], actuals[i]))
else:
if predictions[i] > heap[0][0]: # heap[0] is the smallest
heapq.heappop(heap)
heapq.heappush(heap, (predictions[i], actuals[i]))
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
After the function call, we may call peek_ap_at_n to actually calculate
the average precision.
Note predictions and actuals must have the same shape.
Args:
predictions: a list storing the prediction scores.
actuals: a list storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
then it's possible some true positives were missed in them. In that case,
you can provide 'num_positives' in order to accurately track recall.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if not num_positives is None:
if not isinstance(num_positives, numbers.Number) or num_positives < 0:
raise ValueError("'num_positives' was provided but it wan't a nonzero number.")
if not num_positives is None:
self._total_positives += num_positives
else:
self._total_positives += numpy.size(numpy.where(actuals > 0))
topk = self._top_n
heap = self._heap
for i in range(numpy.size(predictions)):
if topk is None or len(heap) < topk:
heapq.heappush(heap, (predictions[i], actuals[i]))
else:
if predictions[i] > heap[0][0]: # heap[0] is the smallest
heapq.heappop(heap)
heapq.heappush(heap, (predictions[i], actuals[i]))
def accumulate(self, predictions, actuals, num_positives=None):
"""Accumulate the predictions and their ground truth labels.
After the function call, we may call peek_ap_at_n to actually calculate
the average precision.
Note predictions and actuals must have the same shape.
Args:
predictions: a list storing the prediction scores.
actuals: a list storing the ground truth labels. Any value
larger than 0 will be treated as positives, otherwise as negatives.
num_positives = If the 'predictions' and 'actuals' inputs aren't complete,
then it's possible some true positives were missed in them. In that case,
you can provide 'num_positives' in order to accurately track recall.
Raises:
ValueError: An error occurred when the format of the input is not the
numpy 1-D array or the shape of predictions and actuals does not match.
"""
if len(predictions) != len(actuals):
raise ValueError("the shape of predictions and actuals does not match.")
if not num_positives is None:
if not isinstance(num_positives, numbers.Number) or num_positives < 0:
raise ValueError("'num_positives' was provided but it wan't a nonzero number.")
if not num_positives is None:
self._total_positives += num_positives
else:
self._total_positives += numpy.size(numpy.where(actuals > 0))
topk = self._top_n
heap = self._heap
for i in range(numpy.size(predictions)):
if topk is None or len(heap) < topk:
heapq.heappush(heap, (predictions[i], actuals[i]))
else:
if predictions[i] > heap[0][0]: # heap[0] is the smallest
heapq.heappop(heap)
heapq.heappush(heap, (predictions[i], actuals[i]))
def merge_by_key(bam_filenames, key_func, bam_out):
file_cache = tk_cache.FileHandleCache(mode='rb', open_func=pysam.Samfile)
total_reads = 0
heap = []
for bam_filename in bam_filenames:
try:
bam = file_cache.get(bam_filename)
first_read = bam.next()
heapq.heappush(heap, (key_func(first_read), first_read, bam_filename))
except StopIteration:
pass
while len(heap) > 0:
# Get the minimum item and write it to the bam.
key, read, bam_filename = heapq.heappop(heap)
bam = file_cache.get(bam_filename)
bam_out.write(read)
total_reads += 1
# Get the next read from the source bam we just wrote from
# If that BAM file is out of reads, then we leave that one out
try:
next_read = bam.next()
heapq.heappush(heap, (key_func(next_read), next_read, bam_filename))
except StopIteration:
pass
return total_reads
def run(self):
"""Execute events until the queue is empty.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
delayfunc(time - now)
else:
event = pop(q)
# Verify that the event was not removed or altered
# by another thread after we last looked at q[0].
if event is checked_event:
action(*argument)
delayfunc(0) # Let other threads run
else:
heapq.heappush(q, event)
def queue(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
events = self._queue[:]
return map(heapq.heappop, [events]*len(events))
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
def _create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
vocab_size = len(self.vocab)
logger.info("constructing a huffman tree from %i words" % vocab_size)
# build the huffman tree
heap = list(self.vocab.values())
heapq.heapify(heap)
for i in range(vocab_size - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + vocab_size, left=min1, right=min2))
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < vocab_size:
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = np.array(list(points) + [node.index - vocab_size], dtype=int)
stack.append((node.left, np.array(list(codes) + [0], dtype=int), points))
stack.append((node.right, np.array(list(codes) + [1], dtype=int), points))
logger.info("built huffman tree with maximum node depth %i" % max_depth)
def add_starts(self, bap):
syms = []
for line in bap.syms:
heappush(syms, int(line, 16))
for i in range(len(syms)):
idaapi.add_func(heappop(syms), idaapi.BADADDR)
idc.Refresh()
idaapi.refresh_idaview_anyway()
def _expire_timers(self, now):
while (self._timers_heap and
self._timers_heap[0] <= now):
deadline = heapq.heappop(self._timers_heap)
callbacks = self._timers.get(deadline)
while callbacks:
callbacks.pop()()
del self._timers[deadline]
return self._timers_heap[0] if self._timers_heap else 0
# Proton's event model was changed after 0.7
def _results(self):
with self._condition:
while self._current < self._exec_count:
while not self._results_queue or self._results_queue[0][0] != self._current:
self._condition.wait()
while self._results_queue and self._results_queue[0][0] == self._current:
_, res = heappop(self._results_queue)
try:
self._condition.release()
if self._fail_fast and not res[0]:
self._raise(res[1])
yield res
finally:
self._condition.acquire()
self._current += 1