def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
python类Pipe()的实例源码
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def _construct_pipes(core_classes_map):
"""Creates all the pipes needed to connect the cores"""
# Create the first pipe
receiver, sender = Pipe(duplex=False)
# The input pipe of the pipeline is the sender end (introduced the packages to the first core)
input_pipe = sender
for core_class in core_classes_map:
# If no kwargs passed, initialize as empty object
if Pipeline.KEY_KWARGS not in core_class:
core_class[Pipeline.KEY_KWARGS] = {}
# The input pipe of a core is the end that receives packages
core_class[Pipeline.KEY_KWARGS]['pipe_in'] = receiver
# Create the inter-core pipe
receiver, sender = Pipe(duplex=False)
# The output pipe of a core is the end that sends the result
core_class[Pipeline.KEY_KWARGS]['pipe_out'] = sender
# The output pipe of the pipeline is the receiver end of the last core (in order to receive its result)
output_pipe = receiver
return input_pipe, output_pipe
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
def main():
jobs = []
pipe_list = []
for i in range(5):
# ????????(???????)
recv_end , send_end = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=worker, args=(i, send_end))
jobs.append(p)
pipe_list.append(recv_end)
p.start()
for proc in jobs:
proc.join()
result_list = [x.recv() for x in pipe_list]
print(result_list)
for x in pipe_list:
x.close()
def __init__(self, env_m, worker_idx):
# These are instantiated in the *parent* process
# currently. Probably will want to change this. The parent
# does need to obtain the relevant Spaces at some stage, but
# that's doable.
self.worker_idx = worker_idx
self.env_m = env_m
self.m = len(env_m)
self.parent_conn, self.child_conn = multiprocessing.Pipe()
self.joiner = multiprocessing.Process(target=self.run)
self._clear_state()
self.start()
# Parent only!
self.child_conn.close()
def test_job_wrapper_fills_pipe_with_exception_info(self):
module_name = self.test_jobs_module.constants.THROW_AN_EXCEPTION_JOB_NAME
config = {}
to_job, to_self = Pipe()
job_wrapper(to_self, module_name, config, MockStatusUpdater(), entry_point_group_name="hoplite.test_jobs")
exec_info = to_job.recv()
# Get to the bottom level of the exception information
while ('type' not in exec_info) and (exec_info is not None):
exec_info = exec_info.get('previous_exception', None)
try:
self.assertEqual(exec_info['type'], str(TypeError))
self.assertEqual(exec_info['message'], "THE SKY IS FALLING!!")
except Exception, e:
raise e
finally:
to_job.close()
to_self.close()
def test_job_wrapper_fills_pipe_with_exception_info_bubble_up(self):
module_name = self.test_jobs_module.constants.JOB_FAILED_EXCEPTION_JOB_NAME
config = {}
to_job, to_self = Pipe()
job_wrapper(to_self, module_name, config, MockStatusUpdater(), entry_point_group_name="hoplite.test_jobs")
exec_info = to_job.recv()
exec_info = exec_info.get('previous_exception', None)
try:
self.assertEqual(exec_info['address'], "10.2.1.1")
self.assertEqual(exec_info['uuid'], "5")
self.assertIsInstance(exec_info['traceback'], types.TracebackType)
# Get to the very bottom level of the exception information
exec_info = exec_info.get('previous_exception', None)
self.assertEqual(exec_info['message'], "Test Message")
self.assertEqual(exec_info['type'], "Test Type String")
self.assertEqual(exec_info['exception_object'], "pickled_string")
except Exception, e:
raise e
finally:
to_job.close()
to_self.close()
def __init__(self, host='192.168.1.1'):
self.host = host
self.sequence = 1
self.timer = 0.2
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.at(ardrone.at.config, 'general:navdata_demo', 'TRUE')
self.at(ardrone.at.config, 'control:altitude_max', '20000')
self.video_pipe, video_pipe_other = multiprocessing.Pipe()
self.nav_pipe, nav_pipe_other = multiprocessing.Pipe()
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
self.network_process = ardrone.network.ARDroneNetworkProcess(self.host, nav_pipe_other, video_pipe_other, com_pipe_other)
self.network_process.start()
self.ipc_thread = ardrone.network.IPCThread(self)
self.ipc_thread.start()
self.image = PIL.Image.new('RGB', (640, 360))
self.navdata = dict()
self.time = 0
multiprocessing_pipes.py 文件源码
项目:Expert-Python-Programming_Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def main():
parent_conn, child_conn = Pipe()
child = Process(target=work, args=(child_conn,))
for item in (
42,
'some string',
{'one': 1},
CustomClass(),
None,
):
print(
"PRNT: send: {}".format(item)
)
parent_conn.send(item)
child.start()
child.join()
nutszebra_ilsvrc_object_localization_with_multi_gpus.py 文件源码
项目:trainer
作者: nutszebra
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def setup_workers(self):
# work only once
if self._initialized:
return
self._initialized = True
self.model.cleargrads()
for i in six.moves.range(1, len(self.gpus)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self.model, self.gpus, self.da, int(float(self.batch) / len(self.gpus) / self.train_batch_divide), self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with cuda.Device(self.gpus[0]):
self.model.to_gpu(self.gpus[0])
if len(self.gpus) > 1:
communication_id = nccl.get_unique_id()
self._send_message(("set comm_id", communication_id))
self.communication = nccl.NcclCommunicator(len(self.gpus),
communication_id,
0)
def setup_workers(self):
# work only once
if self._initialized:
return
self._initialized = True
self.model.zerograds()
for i in six.moves.range(1, len(self.gpus)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self.model, self.gpus, self.da, int(self.batch / len(self.gpus) / self.train_batch_divide), self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with cuda.Device(self.gpus[0]):
self.model.to_gpu(self.gpus[0])
if len(self.gpus) > 1:
communication_id = nccl.get_unique_id()
self._send_message(("set comm_id", communication_id))
self.communication = nccl.NcclCommunicator(len(self.gpus),
communication_id,
0)
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __init__(self):
testIfName()
self.stop = mp.Event()
self.plot = False
self.plotFunct = None
self.plotHistory = 100000
self.samplerate = 0
self.nChannelsInData = 1
self.saveData = False
self.saveDataFormat = "csv"
self.saveDataFilename = "data"
self.configDone = False
self.inputToPlot_write_end, self.inputToPlot_read_end = mp.Pipe()
self.inputToFile_write_end, self.inputToFile_read_end = mp.Pipe()
self.output_write_end, self.output_read_end = mp.Pipe()
self.processes = {}
self.rdy = {}
self.inputChannels = []
self.activeChannels = {}
def __call__(self, *args, **kargs):
def function_process(pipe, function, args, kargs):
pipe.send(function(*args, **kargs)) # A: result got from function process
p_pipe, c_pipe = multiprocessing.Pipe()
p = Process(target=function_process, args=(c_pipe, self.function, args, kargs))
p.start()
p.join(self._timeout_threshold) # Wait
if p.exception:
# if there is other Error
raise RuntimeError
elif p.is_alive():
# if passes the timeout threshold, terminate function process
p.terminate()
raise TimeoutError('Timeout')
else:
return p_pipe.recv() # return result from A
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
#
#
#
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
#
#
#
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def start_pollers(configs):
"""start_pollers starts a set of pollers for specified configurations.
Args:
configs: Configurations for the pollers.
Returns:
Array of poller info (process, comm pipe).
"""
pollers = []
for config in configs:
parent_pipe, child_pipe = Pipe()
p = Process(target=poll_scm, args=(config, child_pipe,))
pollers.append(PollerInfo(
process=p,
pipe=parent_pipe))
l.info('Starting poller for {}.'.format(config.name))
p.start()
return pollers
def __init__(self, nenvs, map_name):
"""
envs: list of gym environments to run in subprocesses
"""
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = []
i = 0
for (work_remote, ) in zip(self.work_remotes, ):
self.ps.append(
Process(target=worker, args=(work_remote, map_name, i)))
i += 1
#
# self.ps = [Process(target=worker, args=(work_remote, (map_name)))
# for (work_remote,) in zip(self.work_remotes,)]
for p in self.ps:
p.start()
self.remotes[0].send(('get_spaces', 1))
self.action_space, self.observation_space = self.remotes[0].recv()
#print("action_space: ", self.action_space, " observation_space: ", self.observation_space)