def __init__(self, id, prediction_q, training_q, episode_log_q):
super(Agent, self).__init__(name="Agent_{}".format(id))
self.id = id
self.prediction_q = prediction_q
self.training_q = training_q
self.episode_log_q = episode_log_q
gym_env = gym.make(FLAGS.game)
gym_env.seed(FLAGS.seed)
self.env = AtariEnvironment(gym_env=gym_env, resized_width=FLAGS.resized_width,
resized_height=FLAGS.resized_height,
agent_history_length=FLAGS.agent_history_length)
self.nb_actions = len(self.env.gym_actions)
self.wait_q = Queue(maxsize=1)
self.stop = Value('i', 0)
python类Value()的实例源码
def __init__(self, account, password, notifier, ocr_service, debug_single_step=False):
self.__account = account
self.__password = password
self.__notifier = notifier
self.__ocr_service = ocr_service
self.__manager = Manager()
self.__job_list = self.__manager.list()
self.__job_list_lock = Lock()
self.__map = self.__manager.dict()
self.__entrust_map = self.__manager.dict()
self.__process = None
self.__keep_working = Value('i', 1)
if debug_single_step:
self.__debug_single_step = Value('i', 1)
else:
self.__debug_single_step = Value('i', 0)
self.__debug_single_step_go = Value('i', 0)
self.__debug_single_step_lock = Lock()
def __init__(self, id, prediction_q, training_q, episode_log_q):
super(ProcessAgent, self).__init__()
self.id = id
self.prediction_q = prediction_q
self.training_q = training_q
self.episode_log_q = episode_log_q
self.env = Environment()
self.num_actions = self.env.get_num_actions()
self.actions = np.arange(self.num_actions)
self.discount_factor = Config.DISCOUNT
# one frame at a time
self.wait_q = Queue(maxsize=1)
self.exit_flag = Value('i', 0)
def start(self, config: dict):
"""Start the arbiter worker process"""
with self.lock:
if self.arbiter and not self.arbiter_stop_flag.value:
msg = 'Failed to start. Arbiter process already running.'
raise ServiceException(msg)
if self.arbiter_process and self.arbiter_process.is_alive():
LOG.info('Arbiter process is still alive. Terminating...')
self.arbiter_process.terminate()
self.arbiter_stop_flag = Value('b', False)
self.config = Configuration().update(config)
self.arbiter = Arbiter(self.config)
self.arbiter_process = Process(target=self._loop,
args=(self.arbiter,
self.arbiter_stop_flag))
self.arbiter_process.daemon = True
self.arbiter_process.start()
self.start_time = time.time()
LOG.info('Arbiter process was started')
return True
def __init__(self, id, prediction_q, training_q, episode_log_q):
super(ProcessAgent, self).__init__()
self.id = id
self.prediction_q = prediction_q
self.training_q = training_q
self.episode_log_q = episode_log_q
self.env = Environment()
self.num_actions = self.env.get_num_actions()
self.actions = np.arange(self.num_actions)
self.discount_factor = Config.DISCOUNT
# one frame at a time
self.wait_q = Queue(maxsize=1)
self.exit_flag = Value('i', 0)
def __init__(self, num_processes=1, tasks_per_requests=1, bitmap_size=(64 << 10)):
self.to_update_queue = multiprocessing.Queue()
self.to_master_queue = multiprocessing.Queue()
self.to_master_from_mapserver_queue = multiprocessing.Queue()
self.to_master_from_slave_queue = multiprocessing.Queue()
self.to_mapserver_queue = multiprocessing.Queue()
self.to_slave_queues = []
for i in range(num_processes):
self.to_slave_queues.append(multiprocessing.Queue())
self.slave_locks_A = []
self.slave_locks_B = []
for i in range(num_processes):
self.slave_locks_A.append(multiprocessing.Lock())
self.slave_locks_B.append(multiprocessing.Lock())
self.slave_locks_B[i].acquire()
self.reload_semaphore = multiprocessing.Semaphore(multiprocessing.cpu_count()/2)
self.num_processes = num_processes
self.tasks_per_requests = tasks_per_requests
self.stage_abortion_notifier = multiprocessing.Value('b', False)
self.slave_termination = multiprocessing.Value('b', False, lock=False)
self.sampling_failed_notifier = multiprocessing.Value('b', False)
self.effector_mode = multiprocessing.Value('b', False)
self.files = ["/dev/shm/kafl_fuzzer_master_", "/dev/shm/kafl_fuzzer_mapserver_", "/dev/shm/kafl_fuzzer_bitmap_"]
self.sizes = [(65 << 10), (65 << 10), bitmap_size]
self.tmp_shm = [{}, {}, {}]
def __init__(self):
self._playlist = []
self._counter = Value('i', 0)
def __init__(self, poseNet, config, di, comrefNet=None):
"""
Initialize data
:param poseNet: network for pose estimation
:param config: configuration
:param di: depth importer
:param comrefNet: refinement network from center of mass detection
:return: None
"""
# handpose CNN
self.importer = di
self.poseNet = poseNet
self.comrefNet = comrefNet
# configuration
self.config = config
self.initialconfig = config
# synchronization between threads
self.queue = Queue()
self.stop = Value('b', False)
# for calculating FPS
self.lastshow = time.time()
# hand left/right
self.hand = self.HAND_LEFT
# initial state
self.state = self.STATE_IDLE
# hand size estimation
self.handsizes = []
self.numinitframes = 50
# hand tracking or detection
self.tracking = False
self.lastcom = (0, 0, 0)
# Force network to compile output in the beginning
self.poseNet.computeOutput(numpy.zeros(self.poseNet.cfgParams.inputDim, dtype='float32'))
if self.comrefNet is not None:
self.comrefNet.computeOutput([numpy.zeros(sz, dtype='float32') for sz in self.comrefNet.cfgParams.inputDim])
def start(self):
import StaticUPnP_Settings
permissions = Namespace(**StaticUPnP_Settings.permissions)
print(permissions)
if permissions.drop_permissions:
self.drop_privileges(permissions.user, permissions.group)
self.setup_sockets()
self.running = Value(ctypes.c_int, 1)
self.queue = Queue()
self.reciever_thread = Process(target=self.socket_handler, args=(self.queue, self.running))
self.reciever_thread.start()
self.runner_thread = Process(target=self.run, args=(self.queue, self.running))
self.runner_thread.start()
def init_marker_cacher(self):
from marker_detector_cacher import fill_cache
visited_list = [False if x == False else True for x in self.cache]
video_file_path = self.g_pool.capture.source_path
timestamps = self.g_pool.capture.timestamps
self.cache_queue = mp.Queue()
self.cacher_seek_idx = mp.Value('i',0)
self.cacher_run = mp.Value(c_bool,True)
self.cacher = mp.Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher))
self.cacher.start()
def __init__(self, opt, world):
super().__init__(opt)
self.inner_world = world
self.numthreads = opt['numthreads']
self.sync = { # syncronization primitives
# semaphores for counting queued examples
'queued_sem': Semaphore(0), # counts num exs to be processed
'threads_sem': Semaphore(0), # counts threads
'reset_sem': Semaphore(0), # allows threads to reset
# flags for communicating with threads
'reset_flag': Value('b', False), # threads should reset
'term_flag': Value('b', False), # threads should terminate
# counters
'epoch_done_ctr': Value('i', 0), # number of done threads
'total_parleys': Value('l', 0), # number of parleys in threads
}
# don't let threads create more threads!
self.threads = []
for i in range(self.numthreads):
self.threads.append(HogwildProcess(i, opt, world, self.sync))
for t in self.threads:
t.start()
for _ in self.threads:
self.sync['threads_sem'].acquire()
def main():
env = Env()
play_pipe, predict_pipe = Pipe()
train_pipe1, train_pipe2 = Pipe()
is_training = Value("b", True)
manager = DQNManager(env.state_n, env.action_n, train_pipe1, predict_pipe, is_training)
controller = AIControl(env, train_pipe2, play_pipe, is_training)
manager.start()
controller.control_start()
manager.join()
def __init__(self, hosts):
self._task_ctx = None
self._item_ctx = None
# ansible doesn't have callback to tell us
# there is no host matched, so we do that
# check by ourself using this list.
self._hosts = copy.deepcopy(hosts)
# ansible run jobs after `fork()` workers,
# shm will make those control vars synced.
self._reaper_queue = multiprocessing.Queue()
self._playbook_mode = multiprocessing.Value('I', 0)
def __init__(self, # type: ignore # (mypy doesn't like multiprocessing lib)
options: seproxer.mitmproxy_extensions.options,
server: mitmproxy.proxy.server,
results_queue: multiprocessing.Queue,
push_event: multiprocessing.Event,
active_flows_state: multiprocessing.Value,
) -> None:
"""
:param options: The extended mitmproxy options, used to configure our addons
:param server: The mitmproxy server that the proxy will be interfacing with
:param results_queue: The mitmproxy flows will be pushed into this queue
:param push_event: When this event is set, the stored flows will
be pushed into the `results_queue`
:param active_flows_state: A shared state that determines if there are any active flows,
that is, if any requests have pending responses
"""
super().__init__(options, server)
# This addon will allow us to modify headers, this is particularly useful for appending
# authentication cookies since selenium_extensions cannot modify HTTP ONLY cookies
self.addons.add(mitmproxy.addons.setheaders.SetHeaders())
# This add-on hooks into javascript window.onerror and all the console logging
# methods to log message into our defined "window.__seproxer_logs" object
self.addons.add(mitmproxy_extensions.addons.JSConsoleErrorInjection())
# This addon will be responsible for storing our requests / responses in memory
# and will allow us to push the results through out results_queue
self._memory_stream_addon = mitmproxy_extensions.addons.MemoryStream()
self.addons.add(self._memory_stream_addon)
self.results_queue = results_queue
self.push_event = push_event
self.active_flows_state = active_flows_state
def __init__(self,
mitmproxy_options: mitmproxy_extensions.options.MitmproxyExtendedOptions) -> None:
self.mitmproxy_options = mitmproxy_options
# setup proxy server from options
proxy_config = mitmproxy.proxy.config.ProxyConfig(mitmproxy_options)
self._proxy_server = mitmproxy.proxy.server.ProxyServer(proxy_config)
self._results_queue = multiprocessing.Queue()
self._producer_push_event = multiprocessing.Event() # type: ignore
self._has_active_flows_state = multiprocessing.Value(ctypes.c_bool, False)
self._proxy_proc = None # type: t.Optional[ProxyProc]
def __init__(self, q_function, optimizer,
t_max, gamma, i_target, explorer, phi=lambda x: x,
average_q_decay=0.999, logger=getLogger(__name__),
batch_states=batch_states):
self.shared_q_function = q_function
self.target_q_function = copy.deepcopy(q_function)
self.q_function = copy.deepcopy(self.shared_q_function)
async.assert_params_not_shared(self.shared_q_function, self.q_function)
self.optimizer = optimizer
self.t_max = t_max
self.gamma = gamma
self.explorer = explorer
self.i_target = i_target
self.phi = phi
self.logger = logger
self.average_q_decay = average_q_decay
self.batch_states = batch_states
self.t_global = mp.Value('l', 0)
self.t = 0
self.t_start = 0
self.past_action_values = {}
self.past_states = {}
self.past_rewards = {}
self.average_q = 0
def test_run_async(self):
counter = mp.Value('l', 0)
def run_func(process_idx):
for _ in range(1000):
with counter.get_lock():
counter.value += 1
async.run_async(4, run_func)
self.assertEqual(counter.value, 4000)
def initSharedMemoryState(self):
self._builtBox = multiprocessing.Value('b', 1 if self.built else 0)
def pquery(self, x_list, k=1, eps=0, p=2,
distance_upper_bound=np.inf):
x = np.array(x_list)
nx, mx = x.shape
shmem_x = mp.Array(ctypes.c_double, nx * mx)
shmem_d = mp.Array(ctypes.c_double, nx * k)
shmem_i = mp.Array(ctypes.c_double, nx * k)
_x = shmem_as_nparray(shmem_x).reshape((nx, mx))
_d = shmem_as_nparray(shmem_d).reshape((nx, k))
_i = shmem_as_nparray(shmem_i)
if k != 1:
_i = _i.reshape((nx, k))
_x[:, :] = x
nprocs = num_cpus()
scheduler = Scheduler(nx, nprocs)
ierr = mp.Value(ctypes.c_int, 0)
query_args = (scheduler,
self.shmem_data, self.n, self.m, self.leafsize,
shmem_x, nx, shmem_d, shmem_i,
k, eps, p, distance_upper_bound,
ierr
)
pool = [mp.Process(target=_pquery, args=query_args) for n in
range(nprocs)]
for p in pool: p.start()
for p in pool: p.join()
if ierr.value != 0:
raise RuntimeError('%d errors in worker processes' % (ierr.value))
return _d.copy(), _i.astype(int).copy()
def test_server_singleproc(restore_signal):
started = mp.Value('i', 0)
terminated = mp.Value('i', 0)
def interrupt():
os.kill(0, signal.SIGINT)
@aiotools.actxmgr
async def myserver(loop, proc_idx, args):
nonlocal started, terminated
assert proc_idx == 0
assert len(args) == 0
await asyncio.sleep(0)
with started.get_lock():
started.value += 1
loop.call_later(0.2, interrupt)
yield
await asyncio.sleep(0)
with terminated.get_lock():
terminated.value += 1
aiotools.start_server(myserver)
assert started.value == 1
assert terminated.value == 1