def test_messages_published_without_flush(self, message, producer_instance):
with capture_new_messages(
message.topic
) as get_messages, producer_instance as producer:
producer.publish(message)
assert len(multiprocessing.active_children()) == 0
assert len(get_messages()) == 1
python类active_children()的实例源码
def close(self):
"""Closes the producer, flushing all buffered messages into Kafka.
Calling this method directly is not recommended, instead, use the
producer as a context manager::
with Producer() as producer:
producer.publish(message)
...
producer.publish(message)
"""
self.registrar.stop()
self.monitor.close()
self._kafka_producer.close()
assert len(multiprocessing.active_children()) == 0
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def test_server_multiproc(set_timeout, restore_signal):
started = mp.Value('i', 0)
terminated = mp.Value('i', 0)
proc_idxs = mp.Array('i', 3)
@aiotools.actxmgr
async def myserver(loop, proc_idx, args):
started, terminated, proc_idxs = args
await asyncio.sleep(0)
with started.get_lock():
started.value += 1
proc_idxs[proc_idx] = proc_idx
yield
await asyncio.sleep(0)
with terminated.get_lock():
terminated.value += 1
def interrupt():
os.kill(0, signal.SIGINT)
set_timeout(0.2, interrupt)
aiotools.start_server(myserver, num_workers=3,
args=(started, terminated, proc_idxs))
assert started.value == 3
assert terminated.value == 3
assert list(proc_idxs) == [0, 1, 2]
assert len(mp.active_children()) == 0
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def start(debug=False):
#check master running
if master_pid("c") is True:
cmsg("Service master is running..., start action exit.", "error")
sys.exit(0)
try:
worker_list, worker_config_list = enabled_worker()
process_num = multiprocessing.cpu_count()*2
pid_list = []
pool = multiprocessing.Pool(processes=process_num)
# worker_max = [int(worker_config_list[w]["setup"]["process_num"]) for w in worker_list]
for w in worker_list:
if int(worker_config_list[w]["setup"]["process_num"]) < process_num:
max_worker = int(worker_config_list[w]["setup"]["process_num"])
else:
max_worker = process_num
for i in xrange(max_worker):
pool.apply_async(exec_worker, args=(w,))
for i in multiprocessing.active_children():
pid_list.append(i.pid)
pid_list.append(os.getpid())
write_master_pid = master_pid("w", os.getpid())
write_subproc_pid = processors_list("w", pid_list)
if (write_master_pid is None) or (write_subproc_pid is False):
print "Have error, write master/subproc pid fail!"
processors_list("k", pid_list)
master_pid("k", os.getpid())
else:
pool.close()
pool.join()
except Exception, ex:
print ex
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def trainer(model, fifos, shared_buffer, args):
iteration = 0
episode_rewards = []
episode_lengths = []
while len(multiprocessing.active_children()) > 0:
batch_observations = []
batch_actions = []
batch_returns = []
batch_advantages = []
# loop over fifos from all runners
for fifo in fifos:
try:
# wait for new trajectory
observations, actions, returns, advantages, rewards, lengths = fifo.get(timeout=args.queue_timeout)
# add to batch
batch_observations.append(observations)
batch_actions.append(actions)
batch_returns.append(returns)
batch_advantages.append(advantages)
# log statistics
episode_rewards += rewards
episode_lengths += lengths
except Empty:
# just ignore empty fifos, batch will be smaller
pass
# if any of the runners produced trajectories
if len(batch_observations) > 0:
# form training data from observations, actions and returns
x = np.array(batch_observations)
p = np.array(batch_actions)
R = np.array(batch_returns)
A = np.array(batch_advantages)
R = R[..., np.newaxis]
# train the model
total_loss, policy_loss, baseline_loss = model.train_on_batch([x, A], [p, R])
# share model parameters
shared_buffer.raw = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
iteration += 1
if iteration % args.stats_interval == 0:
print("Iter %d: episodes %d, mean episode reward %.2f, mean episode length %.2f." % (iteration, len(episode_rewards), np.mean(episode_rewards), np.mean(episode_lengths)))
episode_rewards = []
episode_lengths = []