python类Queue()的实例源码

livemon.py 文件源码 项目:live-serial 作者: rosenbrockc 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _get_com(args):
    """Gets a list of configured COM ports for serial communication.
    """
    from liveserial.monitor import ComMonitorThread as CMT
    from multiprocessing import Queue
    dataq, errorq = Queue(), Queue()
    result = []
    msg.info("Starting setup of ports {}.".format(args["port"]), 2)
    if args["config"]:
        for port in args["port"]:
            if port.lower() != "aggregate":
                #The aggregate port name is just a shortcut so that we can plot
                #transforms between multiple sensor streams. It doesn't actually
                #represent a physical port that will be monitored.
                com = CMT.from_config(args["config"], port, dataq, errorq,
                                      args["listen"], args["sensors"])
                result.append(com)                               
    else:
        for port in args["port"]:
            com = CMT(dataq, errorq, port, args["baudrate"],
                      args["stopbits"], args["parity"], args["timeout"],
                      args["listen"], args["virtual"])
            result.append(com)
    return result
test_dummy_observer.py 文件源码 项目:pathspider 作者: mami-project 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_dummy_observer():
    flowqueue = mp.Queue(QUEUE_SIZE)
    observer_shutdown_queue = mp.Queue(QUEUE_SIZE)

    observer = DummyObserver()
    observer_process = mp.Process(
        args=(flowqueue,
              observer_shutdown_queue),
        target=observer.run_flow_enqueuer,
        name='observer',
        daemon=True)
    observer_process.start()

    observer_shutdown_queue.put(True)

    assert flowqueue.get(True, timeout=3) == SHUTDOWN_SENTINEL

    observer_process.join(3)

    assert not observer_process.is_alive()
mpqueue.py 文件源码 项目:easy-job 作者: inb-co 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def start(self, no_runner=False):
        from multiprocessing import Process, Queue
        queue = Queue()
        logging.getLogger(self.logger).log(logging.DEBUG, "Starting {} MPQueue workers...".format(self.count))
        if not no_runner:
            for process_index in range(self.count):
                process_name = self.options.pop('process_name_template',
                                                "MPQueueProcess_{index}").format(index=process_index)
                worker_instance = MPQueueWorker(
                    result_backend=self.result_backend,
                    queue=queue,
                    logger=self.logger,
                    **self.options
                )
                p = Process(
                    name=process_name,
                    target=worker,
                    args=(worker_instance,)
                )
                p.daemon = True
                p.start()
        return MPQueueRunner(queue=queue, logger=self.logger)
rollouts.py 文件源码 项目:-NIPS-2017-Learning-to-Run 作者: kyleliang919 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, args):
        self.args = args

        self.tasks = multiprocessing.JoinableQueue()
        self.results = multiprocessing.Queue()

        self.actors = []
        self.actors.append(Actor(self.args, self.tasks, self.results, 9999, args.monitor))

        for i in xrange(self.args.num_threads-1):
            self.actors.append(Actor(self.args, self.tasks, self.results, 37*(i+3), False))

        for a in self.actors:
            a.start()

        # we will start by running 20,000 / 1000 = 20 episodes for the first ieration

        self.average_timesteps_in_episode = 1000
layer.py 文件源码 项目:py-faster-rcnn-resnet-imagenet 作者: tianzhi0549 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def set_roidb(self, roidb):
        """Set the roidb to be used by this layer during training."""
        self._roidb = roidb
        self._shuffle_roidb_inds()
        if cfg.TRAIN.USE_PREFETCH:
            self._blob_queue = Queue(10)
            self._prefetch_process = BlobFetcher(self._blob_queue,
                                                 self._roidb,
                                                 self._num_classes)
            self._prefetch_process.start()
            # Terminate the child process when the parent exists
            def cleanup():
                print 'Terminating BlobFetcher'
                self._prefetch_process.terminate()
                self._prefetch_process.join()
            import atexit
            atexit.register(cleanup)
pump.py 文件源码 项目:mcp-watch 作者: bernard357 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def work_every_day(self, queue, region):
        """
        Handles data for one day and for one region

        :param queue: the list of days to consider
        :type queue: `Queue`

        :param region: the region to consider
        :type region: `str`

        This is ran as an independant process, so it works asynchronously
        from the rest.
        """

        try:

            for cursor in iter(queue.get, 'STOP'):
                self.pull(cursor, region)
                time.sleep(0.5)

        except KeyboardInterrupt:
            pass
        except:
            raise
pump.py 文件源码 项目:mcp-watch 作者: bernard357 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def work_every_minute(self, queue, region):
        """
        Handles data for one minute and for one region

        :param queue: the minute ticks for a given day
        :type queue: `Queue`

        :param region: the region to consider
        :type region: `str`

        This is ran as an independant process, so it works asynchronously
        from the rest.
        """

        try:

            for cursor in iter(queue.get, 'STOP'):
                self.tick(cursor, region)
                time.sleep(0.5)

        except KeyboardInterrupt:
            pass
        except:
            raise
runner.py 文件源码 项目:Stereo-Pose-Machines 作者: ppwwyyxx 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_parallel_runner_1(path):
    param_dict = np.load(path, encoding='latin1').item()
    cfg = PredictConfig(
        model=Model(),
        session_init=ParamRestore(param_dict),
        session_config=get_default_sess_config(0.99),
        input_names=['input'],
        output_names=['resized_map']
    )
    inque = mp.Queue()
    outque = mp.Queue()
    with change_gpu(0):
        proc = MultiProcessQueuePredictWorker(1, inque, outque, cfg)
        proc.start()
    with change_gpu(1):
        pred1 = OfflinePredictor(cfg)
    def func1(img):
        inque.put((0,[[img]]))
    func1.outque = outque
    def func2(img):
        return pred1([[img]])[0][0]
    return func1, func2
layer.py 文件源码 项目:RON 作者: taokong 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def set_roidb(self, roidb):
        """Set the roidb to be used by this layer during training."""
        self._roidb = roidb
        self._shuffle_roidb_inds()
        if cfg.TRAIN.USE_PREFETCH:
            self._blob_queue = Queue(10)
            self._prefetch_process = BlobFetcher(self._blob_queue,
                                                 self._roidb,
                                                 self._num_classes)
            self._prefetch_process.start()
            # Terminate the child process when the parent exists
            def cleanup():
                print 'Terminating BlobFetcher'
                self._prefetch_process.terminate()
                self._prefetch_process.join()
            import atexit
            atexit.register(cleanup)
core.py 文件源码 项目:clopure 作者: vbkaisetsu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def iter_split_evaluate_wrapper(self, fn, local_vars, in_size, q_in, q_out):
        l = Lock()
        idx_q = Queue()
        def split_iter():
            try:
                while True:
                    l.acquire()
                    i, data_in = q_in.get()
                    idx_q.put(i)
                    if data_in is EOFMessage:
                        return
                    yield data_in
            except BaseException:
                traceback.print_exc(file=sys.stdout)
        gs = itertools.tee(split_iter(), in_size)
        for data_out in self.evaluate((fn,) + tuple((lambda i: (x[i] for x in gs[i]))(i) for i in range(in_size)), local_vars=local_vars):
            q_out.put((idx_q.get(), data_out))
            l.release()
        q_out.put((0, EOFMessage))
data_process.py 文件源码 项目:3D-R2N2 作者: chrischoy 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_process():
    from multiprocessing import Queue
    from lib.config import cfg
    from lib.data_io import category_model_id_pair

    cfg.TRAIN.PAD_X = 10
    cfg.TRAIN.PAD_Y = 10

    data_queue = Queue(2)
    category_model_pair = category_model_id_pair(dataset_portion=[0, 0.1])

    data_process = ReconstructionDataProcess(data_queue, category_model_pair)
    data_process.start()
    batch_img, batch_voxel = data_queue.get()

    kill_processes(data_queue, [data_process])
gpu.py 文件源码 项目:wepy 作者: ADicksonLab 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, n_walkers, n_workers=None, gpu_indices=None):

        if gpu_indices is not None:
            self.gpu_indices = gpu_indices
            self.n_workers = len(gpu_indices)
        else:
            assert n_workers, "If gpu_indices are not given the n_workers must be given"
            self.n_workers = n_workers
            self.gpu_indices = range(n_workers)

        # make a Queue for free workers, when one is being used it is
        # popped off and locked
        self.free_workers = mulproc.Queue()
        # the semaphore provides the locks on the workers
        self.lock = mulproc.Semaphore(self.n_workers)
        # initialize a list to put results in
        self.results_list = mulproc.Manager().list()
        for i in range(n_walkers):
            self.results_list.append(None)

        # add the free worker indices (not device/gpu indices) to the
        # free workers queue
        for i in range(self.n_workers):
            self.free_workers.put(i)
s3_service.py 文件源码 项目:orca 作者: bdastur 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def list_buckets_fast(self):
        buckets = []
        jobs = []
        for profile in self.clients.keys():
            queue = mp.Queue()
            kwargs = {'profile_names': profile,
                      'queue': queue}

            process = mp.Process(target=self.list_buckets,
                                 kwargs=kwargs)
            process.start()
            jobs.append((process, queue))

        count = 0
        for job in jobs:
            process = job[0]
            queue = job[1]
            process.join()
            profile_buckets = queue.get()
            buckets.extend(profile_buckets)
            count += 1

        for job in jobs:
            process = job[0]
            if process.is_alive():
                process.terminate()

        return buckets
gfx.py 文件源码 项目:pyfds 作者: emtpb 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, field, observed_component=None, steps_per_frame=10, scale=1,
                 frame_delay=1e-2):
        """Class constructor.

        Args:
            field: Field to be observed.
            observed_component: Component to be observed (as string).
            steps_per_frame: Simulation steps between updates of the animation.
            scale: Scale of the animation.
            frame_delay: Delay between animation updates.
        """

        self.field = field
        self.field_components = {name: getattr(self.field, name) for name in dir(self.field)
                                 if type(getattr(self.field, name)) == fld.FieldComponent}
        if observed_component:
            if observed_component in self.field_components.keys():
                self.observed_component = observed_component
            else:
                raise KeyError('Field component {} not found in given field.'
                               .format(observed_component))
        else:
            self.observed_component = list(self.field_components.keys())[0]

        self.steps_per_frame = int(steps_per_frame)
        self.scale = scale
        self.frame_delay = frame_delay

        self.show_boundaries = True
        self.show_materials = True
        self.show_output = True

        self._plot_queue = mp.Queue()
        self._x_axis_prefix, self._x_axis_factor = get_prefix(max(self.field.x.vector))
        self._t_prefix, self._t_factor = get_prefix(max(self.field.t.vector))

        self.axes = None
        self.plot_title = ''
        self.x_label = '$x$'
        self.time_precision = 2
gfx.py 文件源码 项目:pyfds 作者: emtpb 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _sim_function(self, queue):
        """Simulation function to be started as a separate process.

        Args:
            queue: Instance of multiprocessing.Queue that is used to transfer data between
                simulation and visualization process.
        """

        for ii in range(int(self.field.t.samples / self.steps_per_frame)):
            self.field.simulate(self.steps_per_frame)
            queue.put((self.field.t.vector[self.field.step],
                       getattr(self.field, self.observed_component).values))

        # return field when simulation finishes to get output signals
        queue.put(self.field)
process.py 文件源码 项目:PyPlanet 作者: PyPlanet 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, queue, environment_name='default', pool=None, options=None):
        """
        Create an environment process of the controller itself.

        :param queue: Queue to hook on.
        :param environment_name: Name of environment.
        :param pool: Pool.
        :param options: Custom options.
        :type queue: multiprocessing.Queue
        :type environment_name: str
        :type pool: multiprocessing.Pool
        :type options: dict
        """
        self.queue = queue
        self.name = environment_name
        self.options = options or dict()

        self.max_restarts = 1
        self.restarts = 0

        self.process = multiprocessing.Process(target=_run, kwargs=dict(
            name=self.name,
            queue=self.queue,
            options=self.options,
        ))

        self.__last_state = True
molecule_counter.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_total_conf_mapped_reads_in_cells(filename, filtered_barcodes, mem_gb):
        """ Number of confidently mapped reads w/ valid, filtered barcodes.
            Because this is called from a 'split' function, we must stay within the given mem limit.
            NOTE: We re-open the file for each chunk IN ISOLATED PROCESSES
                  due to a possible memory leak in h5py. Tests show the mem usage is nondeterministic, too.
                  https://github.com/h5py/h5py/issues/763 (among many others)
        Args: filtered_barcodes (set) - set of barcode strings (e.g., ACGT-1)
              filename (str) - path to molecule info HDF5 file
              mem_gb (int) - limit memory usage to this value """

        filtered_bcs_set = set(MoleculeCounter.get_compressed_bc_iter(filtered_barcodes))

        entries_per_chunk = int(np.floor(float(mem_gb*1e9)) / MoleculeCounter.get_record_bytes())
        print 'Entries per chunk: %d' % entries_per_chunk

        with MoleculeCounter.open(filename, 'r') as mc:
            num_entries = mc.nrows()

        total_mapped_reads = 0
        for start in xrange(0, num_entries, entries_per_chunk):
            queue = multiprocessing.Queue()
            p = multiprocessing.Process(target=MoleculeCounter.get_total_conf_mapped_reads_in_cells_chunk,
                                        args=(filename, filtered_bcs_set, start, entries_per_chunk, queue))
            p.start()
            p.join()
            total_mapped_reads += queue.get()

        return total_mapped_reads
sentiment.py 文件源码 项目:fccforensics 作者: RagtagOpen 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def run(self):
        '''
            get documents without a sentiment tag that match significant terms:
            - significant terms from postive regex tagged vs others
            - extra multi match clause for stronger terms (in multiple term sets:
                positive vs negative, untagged, and all
            - phrase match net neutrality since both terms score high
        '''

        index_queue = multiprocessing.Queue()

        bulk_index_process = multiprocessing.Process(
            target=self.bulk_index, args=(index_queue,),
        )
        bulk_index_process.start()
        fetched = 0
        try:
            while fetched < self.limit:
                '''
                    use search instead of scan because keeping an ordered scan cursor
                    open negates the performance benefits
                '''
                resp = self.es.search(index='fcc-comments', body=self.query, size=self.limit)
                for doc in resp['hits']['hits']:
                    index_queue.put(doc['_id'])
                    fetched += 1
                    if not fetched % 100:
                        print('%s\t%s\t%s' % (fetched, doc['_score'],
                            doc['_source']['text_data']))
        except ConnectionTimeout:
            print('error fetching: connection timeout')

        index_queue.put(None)
        bulk_index_process.join()
installApp.py 文件源码 项目:Auto_Analysis 作者: ztwo 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, all_result_path, device):
        """
        Queue?????????????

        :param all_result_path: ??????????
        :param device: ??id
        """
        self.all_result_path = all_result_path
        self.device = device
        self.adb = lib.adbUtils.ADB(self.device)
        self.queue = Queue(10)


问题


面经


文章

微信
公众号

扫码关注公众号