python类JoinableQueue()的实例源码

util.py 文件源码 项目:RTCR 作者: uubram 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, n_consumers, initializer = None, initargs = ()):
        self._tasks = mp.JoinableQueue()
        self._ntasks = 0
        self._results = mp.Queue()
        self._state = "OPEN"
        self._done = False

        self._consumers = [ ConnectedConsumer(self._tasks, self._results,
            initializer = initializer, initargs = initargs) \
                    for i in xrange(n_consumers) ]
        for consumer in self._consumers:
            consumer.daemon = True
storage_continous_parallel_image.py 文件源码 项目:trpo 作者: jjkke88 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self):
        self.args = pms
        self.tasks = multiprocessing.JoinableQueue()
        self.results = multiprocessing.Queue()
        self.actors = []
        self.actors.append(Actor(self.args, self.tasks, self.results, 9999, self.args.record_movie))
        for i in xrange(self.args.jobs-1):
            self.actors.append(Actor(self.args, self.tasks, self.results, 37*(i+3), False))
        for a in self.actors:
            a.start()
        # we will start by running 20,000 / 1000 = 20 episodes for the first ieration
        self.average_timesteps_in_episode = 1000
storage_continous_parallel.py 文件源码 项目:trpo 作者: jjkke88 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self):
        self.args = pms
        self.tasks = multiprocessing.JoinableQueue()
        self.results = multiprocessing.Queue()
        self.actors = []
        self.actors.append(Actor(self.args, self.tasks, self.results, 9999, self.args.record_movie))
        for i in xrange(self.args.jobs-1):
            self.actors.append(Actor(self.args, self.tasks, self.results, 37*(i+3), False))
        for a in self.actors:
            a.start()
        # we will start by running 20,000 / 1000 = 20 episodes for the first ieration
        self.average_timesteps_in_episode = 1000
refactor.py 文件源码 项目:oil 作者: oilshell 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in xrange(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in xrange(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None
refactor.py 文件源码 项目:python2-tracer 作者: extremecoders-re 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in xrange(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in xrange(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None
multiprocessor.py 文件源码 项目:sparks 作者: ImpactHorizon 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, 
                    functions, 
                    output_names=['val'], 
                    initializator=None, 
                    initializator_args={}, 
                    max_size=1024, 
                    threads_num=8, 
                    update=False,
                    resources_demanded=1,
                    mode="consumer",
                    counter=None):

        self.functions = functions
        self.output_names = output_names
        self.initializator = initializator
        self.initializator_args = initializator_args
        self.threads_num = threads_num
        self.update = update
        self.resources_demanded = resources_demanded
        self.mode = mode
        self.counter = counter

        self.out_queue = JoinableQueue(max_size)

        self.in_queue = None
        self.runners = None
        self.runners_events = None
multiprocessor.py 文件源码 项目:sparks 作者: ImpactHorizon 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def init_input_queue(self, func, max_size=0):
        self.in_queue = JoinableQueue(max_size)
        func(self.in_queue)
processpool.py 文件源码 项目:finance_news_analysis 作者: pskun 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, num_processes, handle_queue_size, output_queue_size=0):
        self.__workers = []
        self.__num_processes = num_processes
        self.__queue = multiprocessing.JoinableQueue(handle_queue_size)
        if output_queue_size != 0:
            self.__output = multiprocessing.JoinableQueue(output_queue_size)
        else:
            self.__output = None
        pass
StatisticsManagerThread.py 文件源码 项目:chainer-deconv 作者: germanRos 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, opts):
            self.opts = opts
        self.mode = None
        self.dic_stats = {}
        self.debug = False
        self.labels = self.opts['labels']
        self.colorsM = self.opts['colormap']
        self.LUT = fromHEX2RGB(self.colorsM)
        self.L = len(self.labels)
        self.last_net = []

        self.listManagers = self.opts['bmanagers']
        self.num_random_imgs = self.opts['samples.vis']
        self.policy = self.opts['policy']

        self.listSampleImages = []
        if(self.policy == 'fixed'):
            for i in range(len(self.listManagers)):
                self.listSampleImages.append(self.listManagers[i].getRandomSamples(self.num_random_imgs[i]))

        self.messages_queue = JoinableQueue()
        self.results_queue = JoinableQueue()
        self.inner_process = Process(target=self.dummy, args=(self.messages_queue, self.results_queue))
            self.inner_process.start()

    # --------------------------------:
replay_mineral.py 文件源码 项目:pysc2-examples 作者: chris-chris 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def main(unused_argv):
  """Dump stats about all the actions that are in use in a set of replays."""
  run_config = run_configs.get()

  if not gfile.Exists(FLAGS.replays):
    sys.exit("{} doesn't exist.".format(FLAGS.replays))

  stats_queue = multiprocessing.Queue()
  stats_thread = threading.Thread(target=stats_printer, args=(stats_queue,))
  stats_thread.start()
  try:
    # For some reason buffering everything into a JoinableQueue makes the
    # program not exit, so save it into a list then slowly fill it into the
    # queue in a separate thread. Grab the list synchronously so we know there
    # is work in the queue before the SC2 processes actually run, otherwise
    # The replay_queue.join below succeeds without doing any work, and exits.
    print("Getting replay list:", FLAGS.replays)
    replay_list = sorted(run_config.replay_paths(FLAGS.replays))
    print(len(replay_list), "replays found.\n")
    replay_queue = multiprocessing.JoinableQueue(FLAGS.parallel * 10)
    replay_queue_thread = threading.Thread(target=replay_queue_filler,
                                           args=(replay_queue, replay_list))
    replay_queue_thread.daemon = True
    replay_queue_thread.start()

    for i in range(FLAGS.parallel):
      p = ReplayProcessor(i, run_config, replay_queue, stats_queue)
      p.daemon = True
      p.start()
      time.sleep(1)  # Stagger startups, otherwise they seem to conflict somehow

    replay_queue.join()  # Wait for the queue to empty.
  except KeyboardInterrupt:
    print("Caught KeyboardInterrupt, exiting.")
  finally:
    stats_queue.put(None)  # Tell the stats_thread to print and exit.
    stats_thread.join()
parallel.py 文件源码 项目:baiji 作者: bodylabs 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def parallel_for(a, cls, args=[], kwargs={}, num_processes=None):
    from multiprocessing import Process, JoinableQueue, cpu_count, Pipe
    if num_processes is None:
        num_processes = cpu_count()
    # Note that JoinableQueue uses an integer for tracking locations in the queue.
    # Because it's using shared memory it's not terribly flexible and gives annoyingly
    # unclear errors if you go over the limit. We'd like the queue to be as large as
    # possible so that we can avoid contention, but without allocating a max possible
    # size queue unless we need it, thus the calculation below. 32767 is a hard limit.
    q = JoinableQueue(maxsize=min(len(a)+num_processes, 2**15 - 1))

    output_pipes = [Pipe(duplex=False) for _ in range(num_processes)]
    send_pipes = [p for _, p in output_pipes]
    recv_pipes = [p for p, _ in output_pipes]
    pool = [Process(target=_parallel_for, args=(q, cls, pipe) + tuple(args), kwargs=kwargs)
            for pipe in send_pipes]
    output_watcher = MultiPipeWatcher(recv_pipes)
    try:
        for p in pool:
            p.start()
        output_watcher.start()
        for x in a:
            q.put(x)
        for _ in range(num_processes):
            q.put(None) # End markers
        q.close()
        q.join_thread()
        q.join()
        for p in pool:
            p.join()
        output_watcher.flush()
        output_watcher.join()
        combined_output = output_watcher.merged
        return combined_output
    except KeyboardInterrupt:
        print "Interrupted -- terminating worker processes"
        for p in pool:
            p.terminate()
        for p in pool:
            p.join()
        raise
refactor.py 文件源码 项目:specto 作者: mrknow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in xrange(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in xrange(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None
core.py 文件源码 项目:netcrawl 作者: Wyko 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _kill_workers(task_queue, num_workers):
    '''
    Sends a NoneType poision pill to all active workers.

    Args:
        task_queue (JoinableQueue): The task queue upon which
            to put the poision pills
        num_workers (int): The number of workers, which translates
            to the number of poision pills to put in the queue
    '''

    for w in range(num_workers): task_queue.put(None)
__init__.py 文件源码 项目:AIFun 作者: Plottel 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, num_workers = 20):
        self.queue = Queue()
        self.pool = []
        self._setup_workers(num_workers)
refactor.py 文件源码 项目:sublimeTextConfig 作者: luoye-fe 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in range(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in range(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None
save.py 文件源码 项目:taktyk 作者: kosior 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, count, func, end_clause='end', **func_kwargs):
        self.count = count
        self.func = func
        self.queue = multiprocessing.JoinableQueue()
        self.end_clause = end_clause
        self.func_kwargs = func_kwargs
postgres.py 文件源码 项目:fabricio 作者: renskiy 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, *args, **kwargs):
        super(StreamingReplicatedPostgresqlContainer, self).__init__(
            *args, **kwargs)
        self.master_obtained = multiprocessing.Event()
        self.master_lock = multiprocessing.Lock()
        self.multiprocessing_data = data = multiprocessing.Manager().Namespace()
        data.db_exists = False
        data.exception = None
        self.instances = multiprocessing.JoinableQueue()
parallel.py 文件源码 项目:kaggle_dsb 作者: syagev 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __iter__(self):
        queue = JoinableQueue(maxsize=self.max_queue_size)

        n_batches, job_queue = self._start_producers(queue)

        # Run as consumer (read items from queue, in current thread)
        for x in xrange(n_batches):
            item = queue.get()
            #print queue.qsize(), "GET"
            yield item # Yield the item to the consumer (user)
            queue.task_done()

        queue.close()
        job_queue.close()
refactor.py 文件源码 项目:pefile.pypy 作者: cloudtracer 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in xrange(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in xrange(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None
refactor.py 文件源码 项目:ouroboros 作者: pybee 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in range(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in range(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None


问题


面经


文章

微信
公众号

扫码关注公众号