def __init__(self, n_consumers, initializer = None, initargs = ()):
self._tasks = mp.JoinableQueue()
self._ntasks = 0
self._results = mp.Queue()
self._state = "OPEN"
self._done = False
self._consumers = [ ConnectedConsumer(self._tasks, self._results,
initializer = initializer, initargs = initargs) \
for i in xrange(n_consumers) ]
for consumer in self._consumers:
consumer.daemon = True
python类JoinableQueue()的实例源码
def __init__(self):
self.args = pms
self.tasks = multiprocessing.JoinableQueue()
self.results = multiprocessing.Queue()
self.actors = []
self.actors.append(Actor(self.args, self.tasks, self.results, 9999, self.args.record_movie))
for i in xrange(self.args.jobs-1):
self.actors.append(Actor(self.args, self.tasks, self.results, 37*(i+3), False))
for a in self.actors:
a.start()
# we will start by running 20,000 / 1000 = 20 episodes for the first ieration
self.average_timesteps_in_episode = 1000
def __init__(self):
self.args = pms
self.tasks = multiprocessing.JoinableQueue()
self.results = multiprocessing.Queue()
self.actors = []
self.actors.append(Actor(self.args, self.tasks, self.results, 9999, self.args.record_movie))
for i in xrange(self.args.jobs-1):
self.actors.append(Actor(self.args, self.tasks, self.results, 37*(i+3), False))
for a in self.actors:
a.start()
# we will start by running 20,000 / 1000 = 20 episodes for the first ieration
self.average_timesteps_in_episode = 1000
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in xrange(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in xrange(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in xrange(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in xrange(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def __init__(self,
functions,
output_names=['val'],
initializator=None,
initializator_args={},
max_size=1024,
threads_num=8,
update=False,
resources_demanded=1,
mode="consumer",
counter=None):
self.functions = functions
self.output_names = output_names
self.initializator = initializator
self.initializator_args = initializator_args
self.threads_num = threads_num
self.update = update
self.resources_demanded = resources_demanded
self.mode = mode
self.counter = counter
self.out_queue = JoinableQueue(max_size)
self.in_queue = None
self.runners = None
self.runners_events = None
def init_input_queue(self, func, max_size=0):
self.in_queue = JoinableQueue(max_size)
func(self.in_queue)
def __init__(self, num_processes, handle_queue_size, output_queue_size=0):
self.__workers = []
self.__num_processes = num_processes
self.__queue = multiprocessing.JoinableQueue(handle_queue_size)
if output_queue_size != 0:
self.__output = multiprocessing.JoinableQueue(output_queue_size)
else:
self.__output = None
pass
def __init__(self, opts):
self.opts = opts
self.mode = None
self.dic_stats = {}
self.debug = False
self.labels = self.opts['labels']
self.colorsM = self.opts['colormap']
self.LUT = fromHEX2RGB(self.colorsM)
self.L = len(self.labels)
self.last_net = []
self.listManagers = self.opts['bmanagers']
self.num_random_imgs = self.opts['samples.vis']
self.policy = self.opts['policy']
self.listSampleImages = []
if(self.policy == 'fixed'):
for i in range(len(self.listManagers)):
self.listSampleImages.append(self.listManagers[i].getRandomSamples(self.num_random_imgs[i]))
self.messages_queue = JoinableQueue()
self.results_queue = JoinableQueue()
self.inner_process = Process(target=self.dummy, args=(self.messages_queue, self.results_queue))
self.inner_process.start()
# --------------------------------:
def main(unused_argv):
"""Dump stats about all the actions that are in use in a set of replays."""
run_config = run_configs.get()
if not gfile.Exists(FLAGS.replays):
sys.exit("{} doesn't exist.".format(FLAGS.replays))
stats_queue = multiprocessing.Queue()
stats_thread = threading.Thread(target=stats_printer, args=(stats_queue,))
stats_thread.start()
try:
# For some reason buffering everything into a JoinableQueue makes the
# program not exit, so save it into a list then slowly fill it into the
# queue in a separate thread. Grab the list synchronously so we know there
# is work in the queue before the SC2 processes actually run, otherwise
# The replay_queue.join below succeeds without doing any work, and exits.
print("Getting replay list:", FLAGS.replays)
replay_list = sorted(run_config.replay_paths(FLAGS.replays))
print(len(replay_list), "replays found.\n")
replay_queue = multiprocessing.JoinableQueue(FLAGS.parallel * 10)
replay_queue_thread = threading.Thread(target=replay_queue_filler,
args=(replay_queue, replay_list))
replay_queue_thread.daemon = True
replay_queue_thread.start()
for i in range(FLAGS.parallel):
p = ReplayProcessor(i, run_config, replay_queue, stats_queue)
p.daemon = True
p.start()
time.sleep(1) # Stagger startups, otherwise they seem to conflict somehow
replay_queue.join() # Wait for the queue to empty.
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, exiting.")
finally:
stats_queue.put(None) # Tell the stats_thread to print and exit.
stats_thread.join()
def parallel_for(a, cls, args=[], kwargs={}, num_processes=None):
from multiprocessing import Process, JoinableQueue, cpu_count, Pipe
if num_processes is None:
num_processes = cpu_count()
# Note that JoinableQueue uses an integer for tracking locations in the queue.
# Because it's using shared memory it's not terribly flexible and gives annoyingly
# unclear errors if you go over the limit. We'd like the queue to be as large as
# possible so that we can avoid contention, but without allocating a max possible
# size queue unless we need it, thus the calculation below. 32767 is a hard limit.
q = JoinableQueue(maxsize=min(len(a)+num_processes, 2**15 - 1))
output_pipes = [Pipe(duplex=False) for _ in range(num_processes)]
send_pipes = [p for _, p in output_pipes]
recv_pipes = [p for p, _ in output_pipes]
pool = [Process(target=_parallel_for, args=(q, cls, pipe) + tuple(args), kwargs=kwargs)
for pipe in send_pipes]
output_watcher = MultiPipeWatcher(recv_pipes)
try:
for p in pool:
p.start()
output_watcher.start()
for x in a:
q.put(x)
for _ in range(num_processes):
q.put(None) # End markers
q.close()
q.join_thread()
q.join()
for p in pool:
p.join()
output_watcher.flush()
output_watcher.join()
combined_output = output_watcher.merged
return combined_output
except KeyboardInterrupt:
print "Interrupted -- terminating worker processes"
for p in pool:
p.terminate()
for p in pool:
p.join()
raise
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in xrange(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in xrange(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _kill_workers(task_queue, num_workers):
'''
Sends a NoneType poision pill to all active workers.
Args:
task_queue (JoinableQueue): The task queue upon which
to put the poision pills
num_workers (int): The number of workers, which translates
to the number of poision pills to put in the queue
'''
for w in range(num_workers): task_queue.put(None)
def __init__(self, num_workers = 20):
self.queue = Queue()
self.pool = []
self._setup_workers(num_workers)
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def __init__(self, count, func, end_clause='end', **func_kwargs):
self.count = count
self.func = func
self.queue = multiprocessing.JoinableQueue()
self.end_clause = end_clause
self.func_kwargs = func_kwargs
def __init__(self, *args, **kwargs):
super(StreamingReplicatedPostgresqlContainer, self).__init__(
*args, **kwargs)
self.master_obtained = multiprocessing.Event()
self.master_lock = multiprocessing.Lock()
self.multiprocessing_data = data = multiprocessing.Manager().Namespace()
data.db_exists = False
data.exception = None
self.instances = multiprocessing.JoinableQueue()
def __iter__(self):
queue = JoinableQueue(maxsize=self.max_queue_size)
n_batches, job_queue = self._start_producers(queue)
# Run as consumer (read items from queue, in current thread)
for x in xrange(n_batches):
item = queue.get()
#print queue.qsize(), "GET"
yield item # Yield the item to the consumer (user)
queue.task_done()
queue.close()
job_queue.close()
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in xrange(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in xrange(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None