def __init__(self, data_queue, data_paths, repeat=True):
'''
data_queue : Multiprocessing queue
data_paths : list of data and label pair used to load data
repeat : if set True, return data until exit is set
'''
super(DataProcess, self).__init__()
# Queue to transfer the loaded mini batches
self.data_queue = data_queue
self.data_paths = data_paths
self.num_data = len(data_paths)
self.repeat = repeat
# Tuple of data shape
self.batch_size = cfg.CONST.BATCH_SIZE
self.exit = Event()
self.shuffle_db_inds()
python类Event()的实例源码
def run(self, started_event: Event) -> Process:
p = Process(target=_sub_process_main, args=(
started_event,
self._channel_name,
self._connection,
self._consumer_configuration,
self._consumer_factory,
self._command_processor_factory,
self._mapper_func))
self._logger.debug("Starting worker process for channel: %s on exchange %s on server %s",
self._channel_name, self._connection.exchange, self._connection.amqp_uri)
p.start()
started_event.wait(timeout=1)
return p
def receive(self):
def _receive(dispatcher: Dispatcher, initialized: Event) -> None:
for k, v in self._performers.items():
event = Event()
dispatcher._running_performers[k] = v.run(event)
event.wait(3) # TODO: Do we want to configure this polling interval?
initialized.set()
while self._state == DispatcherState.ds_running:
time.sleep(5) # yield to avoid spinning, between checking for changes to state
if self._state == DispatcherState.ds_awaiting:
initialized = Event()
self._supervisor = Thread(target=_receive, args=(self, initialized))
initialized.wait(5) # TODO: Should this be number of performs and configured with related?
self._state = DispatcherState.ds_running
self._supervisor.start()
def __init__(self, *args, **kwargs):
test_notes = global_vars['test_notes']
pause_reporting = global_vars['pause_reporting']
def wrapper(func, test_notes, pause_reporting, **kwargs):
"""
:param func: function to pass to multiprocessing.Process.
:param test_notes: multiprocessing Queue() instance. Allows us to add notes to
:param disable_reporting: multiprocessing Event() instance. Turns off reporting to terminal when input needed.
:param kwargs: dictionary that contains all args and kwargs being sent to wrapped function.
:return:
"""
global_vars['test_notes'] = test_notes
global_vars['pause_reporting'] = pause_reporting
args_ = kwargs['args'] if 'args' in kwargs else ()
kwargs_ = kwargs['kwargs'] if 'kwargs' in kwargs else {}
return func(*args_, **kwargs_)
wrapper_args = [kwargs['target'], test_notes, pause_reporting]
wrapper_kwargs = kwargs
multiprocessing.Process.__init__(self, target=wrapper, args=wrapper_args, kwargs=wrapper_kwargs)
def __init__(self, name, nsaddr=None, addr=None, serializer=None,
transport=None, base=Agent, attributes=None):
super().__init__()
self.name = name
self._daemon = None
self.host, self.port = address_to_host_port(addr)
if self.port is None:
self.port = 0
self.nsaddr = nsaddr
self.serializer = serializer
self.transport = transport
self.base = base
self.shutdown_event = multiprocessing.Event()
self.queue = multiprocessing.Queue()
self.sigint = False
self.attributes = attributes
def test_writer_blocks_reader(self):
with self.lock.for_write():
event = multiprocessing.Event()
def test():
self.assert_writer()
# Caller will block until this event is released.
event.set()
with self.lock.for_read():
self.assert_readers(1)
return 'read'
r = self.async(test)
# Wait until we can confirm that the reader is locked out.
event.wait()
self.assert_writer()
self.assertEqual('read', self.get_result(r))
self.assert_unlocked()
def test_wait_for_write(self):
event = multiprocessing.Event()
wait_count = 0
with self.lock.for_read():
def test():
with self.lock.for_write():
self.assert_writer()
event.set()
return 'written'
writer = self.async(test)
while not event.is_set():
self.assert_readers(1)
wait_count += 1
self.lock.wait_for_write()
self.assert_readers(1)
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
self.assertLessEqual(wait_count, 2)
def test_wait_for_write__writer_already_waiting_for_reader(self):
event = multiprocessing.Event()
with self.lock.for_read():
def test():
event.set()
with self.lock.for_write():
self.assert_writer()
event.set()
return 'written'
writer = self.async(test)
event.wait()
# Force a context switch so the writer is waiting
time.sleep(0.1)
self.lock.wait_for_write()
self.assert_readers(1)
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
def __init__(self, runner_class, path):
logger.info('Runner UI init')
urwid.set_encoding("UTF-8")
self.runner_class = runner_class
self.path = path
self.store = Store(self)
self.main_loop = None
self.w_main = None
self._first_failed_focused = False
# process comm
self.child_pipe = None
self.pipe_size = multiprocessing.Value('i', 0)
self.pipe_semaphore = multiprocessing.Event()
self.receive_buffer = ''
self.runner_process = None
self.init_main_screen()
def _setup_wsgi_server():
stop_event = multiprocessing.Event()
process = multiprocessing.Process(
target=_run_server,
args=(stop_event,)
)
process.start()
# NOTE(kgriffs): Let the server start up
time.sleep(0.2)
yield
stop_event.set()
# NOTE(kgriffs): Pump the request handler loop in case execution
# made it to the next server.handle_request() before we sent the
# event.
try:
requests.get(_SERVER_BASE_URL)
except Exception:
pass # Thread already exited
process.join()
def run_mission(agents_def):
assert len(agents_def) == 2, 'Incompatible number of agents (required: 2, got: %d)' % len(agents_def)
quit = Event()
processes = []
for agent in agents_def:
agent['quit'] = quit
p = Process(target=agent_factory, kwargs=agent)
p.daemon = True
p.start()
if agent['role'] == 0:
sleep(1) # Just to let time for the server to start
processes.append(p)
quit.wait()
for process in processes:
process.terminate()
def run_mission(agents_def):
assert len(agents_def) == 2, 'Incompatible number of agents (required: 2, got: %d)' % len(agents_def)
quit = Event()
processes = []
for agent in agents_def:
agent['quit'] = quit
p = Process(target=agent_factory, kwargs=agent)
p.daemon = True
p.start()
if agent['role'] == 0:
sleep(1) # Just to let time for the server to start
processes.append(p)
quit.wait()
for process in processes:
process.terminate()
def __init__(self, cfg, mrt_bgp4mp_queue, log_queue):
""" Constructor
:param cfg: Configuration dictionary
:param mrt_bgp4mp_queue: Output for BMP raw message forwarding
:param log_queue: Logging queue - sync logging
"""
multiprocessing.Process.__init__(self)
self._stop = multiprocessing.Event()
self._cfg = cfg
self._bgp4mp_queue = mrt_bgp4mp_queue
self._log_queue = log_queue
self.LOG = None
self._sock = None
def __init__(self, cfg, log_queue):
""" Constructor
:param cfg: Configuration dictionary
:param log_queue: Logging queue - sync logging
"""
multiprocessing.Process.__init__(self)
self._stop = multiprocessing.Event()
self._thr_list = {}
self._db_conn = None
self._cfg = cfg
self._log_queue = log_queue
self.LOG = None
self._interval = int(cfg['table_dump']['interval']['minutes'])
if self._interval < 15:
self._interval = 900 # to seconds
else:
self._interval *= 60 # to seconds
def wait(seconds,event=True,hook=None):
"""
:param seconds: seconds to wait for
:param event: if True (default) it uses a dummy Event, if False it uses time.sleep, if Event is passed then it calls event.wait(seconds)
"""
r = 0
try:
if hook and isCallable(hook):
Catched(hook)()
r+=1
if not event:
time.sleep(seconds)
elif hasattr(event,'wait'):
try:
event.wait(seconds)
except Exception,e:
raise e
else:
_EVENT and _EVENT.wait(seconds)
r+=2
except Exception,e:
## This method triggers unexpected exceptions on ipython exit
print('wait.hook failed!: %s,%s,%s,%s'%(event,event.wait,r,e))
if time: time.sleep(seconds)
def timed_range(seconds,period,event=None):
"""
Method used to execute the content of a for loop at periodic intervals.
For X seconds, this method will return each period fragment.
event can be used to pass a threading.Event to abort the loop if needed.
Usage:
for t in trange(15,0.1):
method_executed_at_10Hz_for_15s()
"""
t0 = time.time()
diff = 0
e = event or threading.Event()
while diff<seconds and not e.is_set():
e.wait(period)
diff = time.time()-t0
if not e.is_set:
yield diff
def __init__(self,line='',task=None,start=False,process=False,keep=10,trace=False):
if line: self.load(line)
if task is not None: self.task = task
self.last_match = 0
self.trace = trace
self.keep = keep
self.THREAD_CLASS = threading.Thread if not process else multiprocessing.Process
self.QUEUE_CLASS = Queue.Queue if not process else multiprocessing.Queue
self.EVENT_CLASS = threading.Event if not process else multiprocessing.Event
self.LOCK_CLASS = threading.RLock if not process else multiprocessing.RLock
self._thread = None
self.event = None
self._queue = self.QUEUE_CLASS(maxsize=int(self.keep or 10))
if start: self.start()
def __init__(self,name='',process=False,wait=.01,target=None,hook=None,trace=False):
self._name = name
self.wait = wait
self._process = process
self._trace = trace
self.hook=hook
self.THREAD_CLASS = threading.Thread if not process else multiprocessing.Process
self.QUEUE_CLASS = Queue.Queue if not process else multiprocessing.Queue
self.EVENT_CLASS = threading.Event if not process else multiprocessing.Event
self.LOCK_CLASS = threading.RLock if not process else multiprocessing.RLock
self.inQueue = self.QUEUE_CLASS()
self.outQueue = self.QUEUE_CLASS()
self.errorQueue = self.QUEUE_CLASS()
self.stopEvent = self.EVENT_CLASS()
if target is not None:
self.put(target)
self._thread = self.THREAD_CLASS(name='Worker',target=self.run)
self._thread.daemon = True
pass
def __init__(self,action=None,max_threads=5,start=False,mp=False):
import threading
if mp==True:
import multiprocessing
self._myThread = multiprocessing.Process
self._myQueue = multiprocessing.Queue
else:
import Queue
self._myThread = threading.Thread
self._myQueue = Queue.Queue
self._action = action
self._max_threads = max_threads
self._threads = []
self._pending = []
self._stop = threading.Event()
self._lock = threading.Lock()
self._locked = partial(locked,_lock=self._lock)
self._started = start
self._queue = self._myQueue()
def __init__(self, platform_index, device_index, ip, port):
Process.__init__(self)
Logger.__init__(self)
# self.logger_level ^= Logger.MSG_VERBOSE
self.daemon = True
self.exit_evt = Event()
self.running = Value('i', 0)
self.platform_index = platform_index
self.device_index = device_index
self.ip = ip
self.port = port
self.uuid = uuid.uuid1().hex
self.ocl_ga = None
## Terminate worker process, this should be only called when OpenCLGAClient
# is shutting down. The exti_evt will be set to break the wait in the
# process's run.
def __init__(self):
testIfName()
self.stop = mp.Event()
self.plot = False
self.plotFunct = None
self.plotHistory = 100000
self.samplerate = 0
self.nChannelsInData = 1
self.saveData = False
self.saveDataFormat = "csv"
self.saveDataFilename = "data"
self.configDone = False
self.inputToPlot_write_end, self.inputToPlot_read_end = mp.Pipe()
self.inputToFile_write_end, self.inputToFile_read_end = mp.Pipe()
self.output_write_end, self.output_read_end = mp.Pipe()
self.processes = {}
self.rdy = {}
self.inputChannels = []
self.activeChannels = {}
def __init__(self):
'''Execute a function asynchronously in another thread.'''
# management of execution queue
res = multiprocessing.Lock()
self.queue = multiprocessing.Condition(res)
self.state = []
# results
self.result = Queue.Queue()
# thread management
self.ev_unpaused = multiprocessing.Event()
self.ev_terminating = multiprocessing.Event()
self.thread = threading.Thread(target=self.__run__, name="Thread-{:s}-{:x}".format(self.__class__.__name__, id(self)))
# FIXME: we can support multiple threads, but since this is
# being bound by a single lock due to my distrust for IDA
# and race-conditions...we only use one.
self.lock = multiprocessing.Lock()
return self.__start()
def __init__(self, in_queue, out_queue, conf, conf_lock):
Process.__init__(self)
self._in_queue = in_queue
self._out_queue = out_queue
self._stop = Event()
self._stop.set()
self._new_conf = Event()
self._new_conf.clear()
self._conf_lock = conf_lock
self._conf = conf
self._jpg_buffer = deque([])
self._client = None
self._error_time = None
def __init__(self, queue, kill_switch, directory=None, permit_nonzero=False):
"""Instantiate a new worker
Parameters
----------
queue : obj
An instance of a :obj:`Queue <multiprocessing.Queue>`
kill_switch : obj
An instance of a :obj:`Event <multiprocessing.Event>`
directory : str, optional
The directory to execute the jobs in
permit_nonzero : bool, optional
Allow non-zero return codes [default: False]
"""
super(Worker, self).__init__()
self.queue = queue
self.kill_switch = kill_switch
self.directory = directory
self.permit_nonzero = permit_nonzero
def __init__(self, config):
default_config = Config(proc_count = 4, limit_batch_count = None)
self.config = default_config(**config)
self.exit = Event()
self.batch_queue = Queue(maxsize = 10)
if self.config.limit_batch_count is None:
self.limited = False
else:
self.limited = True
self.batch_list = []
self.index = -1
self.workers = []
for _ in range(self.config.proc_count):
self.workers.append(Process(target = config.worker, args = (self,)))
for w in self.workers:
w.daemon = True
w.start()
def __init__(self, config):
default_config = Config(proc_count = 4)
self.config = default_config(**config)
self.exit = Event()
self.task_list = config.task_list
self.task_queue = Queue(maxsize = 10)
self.batch_queue = Queue(maxsize = 10)
self.workers = []
self.distributor = Process(target = task_distributor, args = (self,))
for _ in range(self.config.proc_count):
self.workers.append(Process(target = config.worker, args = (self,)))
self.distributor.daemon = True
self.distributor.start()
for w in self.workers:
w.daemon = True
w.start()
def serve_many(workers=1):
# thank you sanic
workers = min(workers, multiprocessing.cpu_count())
event = multiprocessing.Event()
signal(SIGINT, lambda *_: event.set())
signal(SIGTERM, lambda *_: event.set())
processes = []
kwargs = dict(reuse_port=True)
for _ in range(workers):
# noinspection PyArgumentList
process = multiprocessing.Process(target=serve, kwargs=kwargs,
daemon=True)
process.start()
print('Started subprocess:', process.name, process.pid)
processes.append(process)
with contextlib.suppress(Exception):
while not event.is_set():
time.sleep(0.5)
[process.terminate() for process in processes]
[process.join() for process in processes]
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
#
#
#
def __init__(self, vnf_list):
# host cpu query
self.host_cpu_query = compute2vnfquery['host_cpu'].query_template.format('')
self.host_cpu_values = deque(maxlen=10)
self.vnf_list = vnf_list
# query the number of available cores
host_num_cpu_query = compute2vnfquery['num_cores'].query_template.format('')
ret = query_Prometheus(host_num_cpu_query)
self.num_cores = int(ret[1])
# cpu skewness query
self.skew_query_dict = {}
self.skew_value_dict = {}
for vnf_name in vnf_list:
skew_query = compute2vnfquery['skew_cpu'].query_template.format(vnf_name)
self.skew_query_dict[vnf_name] = skew_query
self.skew_value_dict[vnf_name] = deque(maxlen=5)
self.monitor = None
self.stop_event = threading.Event()
self.overload_flag = threading.Event()
def __init__(self, simulation, queue):
"""MultiAgentProcess
Examples:
>>> process = MultiAgentProcess(simulation, queue)
>>> process.start() # Starts the simulation
>>> ...
>>> process.stop() # Stops the simulation
Args:
simulation (MultiAgentSimulation):
queue (multiprocessing.Queue):
"""
super(MultiAgentProcess, self).__init__()
self.simulation = simulation
self.exit = Event()
self.queue = queue