def run_mission(agents_def):
assert len(agents_def) == 2, 'Incompatible number of agents (required: 2, got: %d)' % len(agents_def)
quit = Event()
processes = []
for agent in agents_def:
agent['quit'] = quit
p = Process(target=agent_factory, kwargs=agent)
p.daemon = True
p.start()
if agent['role'] == 0:
sleep(1) # Just to let time for the server to start
processes.append(p)
quit.wait()
for process in processes:
process.terminate()
python类Event()的实例源码
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
#
#
#
def __init__(self, resources_path, wallet_file_path, wallet_password, app, log_level=2):
self.user_agent = str(uuid4().hex)
wallet_log_path = os.path.join(os.path.dirname(wallet_file_path), "sumo-wallet-rpc.log")
wallet_rpc_args = u'%s/bin/sumo-wallet-rpc --wallet-file %s --log-file %s --rpc-bind-port 19736 --user-agent %s --log-level %d' \
% (resources_path, wallet_file_path, wallet_log_path, self.user_agent, log_level)
ProcessManager.__init__(self, wallet_rpc_args, "sumo-wallet-rpc")
sleep(0.2)
self.send_command(wallet_password)
self.rpc_request = WalletRPCRequest(app, self.user_agent)
# self.rpc_request.start()
self.ready = False
self.block_hex = None
self.block_height = 0
self.is_password_invalid = Event()
def __init__(self, var_name, port=PORT, **init_kwargs):
super().__init__()
self._exit = Event()
self.var_name = var_name
self.port = port
self.entity_name = None
self.socket = None
self.fig = None
self.plt = None
self.init_kwargs = init_kwargs
def __init__(self, name, pipe_instance):
multiprocessing.Process.__init__(self, name=name)
self.pipe = pipe_instance
self._output_complete_event = multiprocessing.Event()
def __init__(self, name, pipe_instance):
threading.Thread.__init__(self, name=name)
self.pipe = pipe_instance
self._output_complete_event = threading.Event()
def __init__(self, name, pipe_instance):
self.pipe = pipe_instance
self._output_complete_event = threading.Event()
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(None)
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def run(self):
if not self.enabled():
logging.info("Oplog tailer is disabled, skipping")
return
logging.info("Starting oplog tailers on all replica sets (options: compression=%s, status_secs=%i)" % (self.compression(), self.status_secs))
self.timer.start(self.timer_name)
for shard in self.replsets:
tail_stop = Event()
secondary = self.replsets[shard].find_secondary()
mongo_uri = secondary['uri']
shard_name = mongo_uri.replset
oplog_file = self.prepare_oplog_files(shard_name)
oplog_state = OplogState(self.manager, mongo_uri, oplog_file)
thread = TailThread(
self.backup_stop,
tail_stop,
mongo_uri,
self.config,
self.timer,
oplog_file,
oplog_state,
self.do_gzip()
)
self.shards[shard] = {
'stop': tail_stop,
'thread': thread,
'state': oplog_state
}
self.shards[shard]['thread'].start()
while not oplog_state.get('running'):
if self.shards[shard]['thread'].exitcode:
raise OperationError("Oplog tailer for %s failed with exit code %i!" % (mongo_uri, self.shards[shard]['thread'].exitcode))
sleep(0.5)
def test_stop_performer(self):
"""
Given that I have started a performer
When I stop the performer
Then it should terminate the pump
:return:
"""
request = MyCommand()
pipeline = Queue()
connection = Connection(config.broker_uri, "examples.perfomer.exchange")
configuration = BrightsideConsumerConfiguration(pipeline, "performer.test.queue", "examples.tests.mycommand")
performer = Performer("test_channel", connection, configuration, mock_consumer_factory, mock_command_processor_factory, map_my_command_to_request)
header = BrightsideMessageHeader(uuid4(), request.__class__.__name__, BrightsideMessageType.MT_COMMAND)
body = BrightsideMessageBody(JsonRequestSerializer(request=request).serialize_to_json(),
BrightsideMessageBodyType.application_json)
message = BrightsideMessage(header, body)
pipeline.put(message)
started_event = Event()
p = performer.run(started_event)
started_event.wait()
time.sleep(1)
performer.stop()
p.join()
self.assertTrue(True)
def _sub_process_main(started_event: Event,
channel_name: str,
connection: Connection,
consumer_configuration: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request]) -> None:
"""
This is the main method for the sub=process, everything we need to create the message pump and
channel it needs to be passed in as parameters that can be pickled as when we run they will be serialized
into this process. The data should be value types, not reference types as we will receive a copy of the original.
Inter-process communication is signalled by the event - to indicate startup - and the pipeline to facilitate a
sentinel or stop message
:param started_event: Used by the sub-process to signal that it is ready
:param channel_name: The name we want to give the channel to the broker for identification
:param connection: The 'broker' connection
:param consumer_configuration: How to configure our consumer of messages from the channel
:param consumer_factory: Callback to create the consumer. User code as we don't know what consumer library they
want to use. Arame? Something else?
:param command_processor_factory: Callback to register subscribers, policies, and task queues then build command
processor. User code that provides us with their requests and handlers
:param mapper_func: We need to map between messages on the wire and our handlers
:return:
"""
logger = logging.getLogger(__name__)
consumer = consumer_factory(connection, consumer_configuration, logger)
channel = Channel(name=channel_name, consumer=consumer, pipeline=consumer_configuration.pipeline)
# TODO: Fix defaults that need passed in config values
command_processor = command_processor_factory(channel_name)
message_pump = MessagePump(command_processor=command_processor, channel=channel, mapper_func=mapper_func,
timeout=500, unacceptable_message_limit=None, requeue_count=None)
logger.debug("Starting the message pump for %s", channel_name)
message_pump.run(started_event)
def __init__(self, addr=None, base=NameServer):
super().__init__()
self._daemon = None
self.base = base
if isinstance(addr, int):
addr = '127.0.0.1:%s' % addr
self.addr = addr
self.host, self.port = address_to_host_port(addr)
self.shutdown_event = multiprocessing.Event()
self.uri = None
self.queue = multiprocessing.Queue()
def setUp(self):
self.app = FakeApp()
self.port = self.app.start_server()
self.params = dict(simulation.Pipe.PARAMS)
self.child = None # multiprocessing.Process
self.ready = multiprocessing.Event()
self.shared = multiprocessing.Manager().Namespace()
def __init__(self, *args, **kwargs):
super(BaseWorker, self).__init__(*args, **kwargs)
self.should_exit = Event()
def __init__(self, # type: ignore # (mypy doesn't like multiprocessing lib)
options: seproxer.mitmproxy_extensions.options,
server: mitmproxy.proxy.server,
results_queue: multiprocessing.Queue,
push_event: multiprocessing.Event,
active_flows_state: multiprocessing.Value,
) -> None:
"""
:param options: The extended mitmproxy options, used to configure our addons
:param server: The mitmproxy server that the proxy will be interfacing with
:param results_queue: The mitmproxy flows will be pushed into this queue
:param push_event: When this event is set, the stored flows will
be pushed into the `results_queue`
:param active_flows_state: A shared state that determines if there are any active flows,
that is, if any requests have pending responses
"""
super().__init__(options, server)
# This addon will allow us to modify headers, this is particularly useful for appending
# authentication cookies since selenium_extensions cannot modify HTTP ONLY cookies
self.addons.add(mitmproxy.addons.setheaders.SetHeaders())
# This add-on hooks into javascript window.onerror and all the console logging
# methods to log message into our defined "window.__seproxer_logs" object
self.addons.add(mitmproxy_extensions.addons.JSConsoleErrorInjection())
# This addon will be responsible for storing our requests / responses in memory
# and will allow us to push the results through out results_queue
self._memory_stream_addon = mitmproxy_extensions.addons.MemoryStream()
self.addons.add(self._memory_stream_addon)
self.results_queue = results_queue
self.push_event = push_event
self.active_flows_state = active_flows_state
def __init__(self,
mitmproxy_options: mitmproxy_extensions.options.MitmproxyExtendedOptions) -> None:
self.mitmproxy_options = mitmproxy_options
# setup proxy server from options
proxy_config = mitmproxy.proxy.config.ProxyConfig(mitmproxy_options)
self._proxy_server = mitmproxy.proxy.server.ProxyServer(proxy_config)
self._results_queue = multiprocessing.Queue()
self._producer_push_event = multiprocessing.Event() # type: ignore
self._has_active_flows_state = multiprocessing.Value(ctypes.c_bool, False)
self._proxy_proc = None # type: t.Optional[ProxyProc]
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(None)
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def __init__(self, product_manager, logging_queue: Queue, exit_event: Event, ready_event: Event) -> None:
Process.__init__(self)
self.products = product_manager
self.exit = exit_event
self.ready_event = ready_event
self.logging_queue = logging_queue
self.order_book_manager = OrderBookManager(product_manager)
def __init__(self, product_manager: ProductManager, websocket_feed_queue: Queue, logging_queue: Queue,
exit_event: Event, ready_event: Event) -> None:
Process.__init__(self)
self.websocket_feed_queue = websocket_feed_queue
self.product_manager = product_manager
self.exit = exit_event
self.logging_queue = logging_queue
self.ready_event = ready_event
self.order_book_manager = OrderBookManager(self.product_manager)
def __init__(self, product_manager: ProductManager, websocket_feed_queue: Queue, logging_queue: Queue,
exit_event: Event, ready_events: List[Event]) -> None:
Process.__init__(self)
self.websocket_feed_queue = websocket_feed_queue
self.logging_queue = logging_queue
self.exit = exit_event
self.product_manager = product_manager
self.order_book = PortfolioOrderBook(self.product_manager)
self.portfolio = BasePortfolioGroup(self.order_book)
self.ready_events = ready_events
self.registered_orders = []