def terminate(self):
"""Terminate data feeding early.
Since TensorFlow applications can often terminate on conditions unrelated to the training data (e.g. steps, accuracy, etc),
this method signals the data feeding process to ignore any further incoming data. Note that Spark itself does not have a mechanism
to terminate an RDD operation early, so the extra partitions will still be sent to the executors (but will be ignored). Because
of this, you should size your input data accordingly to avoid excessive overhead.
"""
logging.info("terminate() invoked")
self.mgr.set('state', 'terminating')
# drop remaining items in the queue
queue = self.mgr.get_queue(self.qname_in)
count = 0
done = False
while not done:
try:
queue.get(block=True, timeout=5)
queue.task_done()
count += 1
except Empty:
logging.info("dropped {0} items from queue".format(count))
done = True
评论列表
文章目录