python类Lock()的实例源码

core.py 文件源码 项目:clopure 作者: vbkaisetsu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def iter_split_evaluate_wrapper(self, fn, local_vars, in_size, q_in, q_out):
        l = Lock()
        idx_q = Queue()
        def split_iter():
            try:
                while True:
                    l.acquire()
                    i, data_in = q_in.get()
                    idx_q.put(i)
                    if data_in is EOFMessage:
                        return
                    yield data_in
            except BaseException:
                traceback.print_exc(file=sys.stdout)
        gs = itertools.tee(split_iter(), in_size)
        for data_out in self.evaluate((fn,) + tuple((lambda i: (x[i] for x in gs[i]))(i) for i in range(in_size)), local_vars=local_vars):
            q_out.put((idx_q.get(), data_out))
            l.release()
        q_out.put((0, EOFMessage))
mpEngineProdCons.py 文件源码 项目:appcompatprocessor 作者: mbevilacqua 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def removeProducer(self, noLock = False):
        if self.num_producers > 0:
            # Lock internal
            if not noLock: self.__internalLock__.acquire()

            # Remove last worker from worker pool
            (worker_num, producer, extra_arg_list) = self.producer_pool.pop()
            logger.debug("Removing Producer-%d" % worker_num)
            # Remove last worker's exitFlag
            producer_exitEvent = self.producer_pool_exitEvent.pop()

            # Set the worker's exit event
            if not producer_exitEvent.is_set():
                logger.debug("Producer-%d exitEvent SET" % worker_num)
                producer_exitEvent.set()

            # Update producer count
            self.num_producers -= 1

            # Release internal
            if not noLock: self.__internalLock__.release()
        else:
            logger.error("Attempted to remove producer from empty pool.")
sequence.py 文件源码 项目:shellbot 作者: bernard357 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, bot=None, machines=None, **kwargs):
        """
        Implements a sequence of multiple machines

        :param machines: the sequence of machines to be ran
        :type machines: list of Machine

        """
        self.bot = bot
        self.machines = machines

        self.lock = Lock()

        # prevent Manager() process to be interrupted
        handler = signal.signal(signal.SIGINT, signal.SIG_IGN)

        self.mutables = Manager().dict()

        # restore current handler for the rest of the program
        signal.signal(signal.SIGINT, handler)

        self.on_init(**kwargs)
context.py 文件源码 项目:shellbot 作者: bernard357 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, settings=None, filter=None):
        """
        Stores settings across multiple independent processing units

        :param settings: the set of variables managed in this context
        :type settings: dict

        :param filter: a function to interpret values on check()
        :type filter: callable

        """

        # prevent Manager() process to be interrupted
        handler = signal.signal(signal.SIGINT, signal.SIG_IGN)

        self.lock = Lock()
        self.values = Manager().dict()

        # restore current handler for the rest of the program
        signal.signal(signal.SIGINT, handler)

        self.filter = filter if filter else self._filter

        if settings:
            self.apply(settings)
photo2map.py 文件源码 项目:photo2map 作者: eatfears 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def main():
    threads = []

    output_image = Image.new('RGB', (output_width, output_height))
    output_image.save(output_filename)

    output_image_mutex = multiprocessing.Lock()

    for i in range(threads_num):
        t = multiprocessing.Process(target=worker, args=(i, output_image_mutex,))
        threads.append(t)
        t.start()

    for thread in threads:
        thread.join()

    print("Image saved")
_utils.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def __init__(self):
        '''Execute a function asynchronously in another thread.'''

        # management of execution queue
        res = multiprocessing.Lock()
        self.queue = multiprocessing.Condition(res)
        self.state = []

        # results
        self.result = Queue.Queue()

        # thread management
        self.ev_unpaused = multiprocessing.Event()
        self.ev_terminating = multiprocessing.Event()
        self.thread = threading.Thread(target=self.__run__, name="Thread-{:s}-{:x}".format(self.__class__.__name__, id(self)))

        # FIXME: we can support multiple threads, but since this is
        #        being bound by a single lock due to my distrust for IDA
        #        and race-conditions...we only use one.
        self.lock = multiprocessing.Lock()

        return self.__start()
replay.py 文件源码 项目:cebl 作者: idfah 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self, mgr, pollSize=2):
        """
        Construct a new source for replaying EEG saved to file.

            pollSize:   Number of data samples collected during each poll.
                        Higher values result in better timing and marker
                        resolution but more CPU usage while higher values
                        typically use less CPU but worse timing results.
        """
        self.pollSize = pollSize
        self.lock = mp.Lock()

        self.replayData = None
        self.startIndex = 0

        Source.__init__(self, mgr=mgr, sampRate=256, chans=[str(i) for i in range(8)],
            configPanelClass=ReplayConfigPanel)
data_loader.py 文件源码 项目:dbnet_tensorflow 作者: yuanluya 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, num_processor, batch_size, phase,
                 batch_idx_init = 0, data_ids_init = train_ids, capacity = 10):
        self.num_processor = num_processor
        self.batch_size = batch_size
        self.data_load_capacity = capacity
        self.manager = Manager()
        self.batch_lock = Lock()
        self.mutex = Lock()
        self.cv_full = Condition(self.mutex)
        self.cv_empty = Condition(self.mutex)
        self.data_load_queue = self.manager.list()
        self.cur_batch = self.manager.list([batch_idx_init])
        self.processors = []
        if phase == 'train':
            self.data_ids = self.manager.list(data_ids_init)
        elif phase == 'test':
            self.data_ids = self.manager.list(test_ids)
        else:
            raise ValueError('Could not set phase to %s' % phase)
_test_multiprocessing.py 文件源码 项目:ouroboros 作者: pybee 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_thousand(self):
        if self.TYPE == 'manager':
            self.skipTest('test not appropriate for {}'.format(self.TYPE))
        passes = 1000
        lock = self.Lock()
        conn, child_conn = self.Pipe(False)
        for j in range(self.N):
            p = self.Process(target=self._test_thousand_f,
                           args=(self.barrier, passes, child_conn, lock))
            p.start()

        for i in range(passes):
            for j in range(self.N):
                self.assertEqual(conn.recv(), i)

#
#
#
_test_multiprocessing.py 文件源码 项目:ouroboros 作者: pybee 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_getobj_getlock(self):
        val1 = self.Value('i', 5)
        lock1 = val1.get_lock()
        obj1 = val1.get_obj()

        val2 = self.Value('i', 5, lock=None)
        lock2 = val2.get_lock()
        obj2 = val2.get_obj()

        lock = self.Lock()
        val3 = self.Value('i', 5, lock=lock)
        lock3 = val3.get_lock()
        obj3 = val3.get_obj()
        self.assertEqual(lock, lock3)

        arr4 = self.Value('i', 5, lock=False)
        self.assertFalse(hasattr(arr4, 'get_lock'))
        self.assertFalse(hasattr(arr4, 'get_obj'))

        self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')

        arr5 = self.RawValue('i', 5)
        self.assertFalse(hasattr(arr5, 'get_lock'))
        self.assertFalse(hasattr(arr5, 'get_obj'))
data_layer_siamese.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def load_next_batch(self):
    res = {}
    #7
    lock = Lock()
    threads = [self.pool.apply_async(self.load_next_pair,(lock,)) for \
                i in range (self.batch_size)]
    thread_res = [thread.get() for thread in threads]
    res['data_s'] = np.asarray([tri['sketch'] for tri in thread_res])[:,None,:,:]
    res['data_i'] = np.asarray([tri['image'] for tri in thread_res])[:,None,:,:]
    res['label_s'] = np.asarray([tri['label_s'] for tri in thread_res],dtype=np.float32)[:,None]
    res['label_i'] = np.asarray([tri['label_i'] for tri in thread_res],dtype=np.float32)[:,None]
    return res
#==============================================================================
#     res['data_s'] = np.zeros((self.batch_size,1,self.outshape[0],\
#                             self.outshape[1]),dtype = np.float32)
#     res['data_i'] = np.zeros_like(res['data_a'],dtype=np.float32)
#     res['label'] = np.zeros((self.batch_size,1),dtype = np.float32)
#     for itt in range(self.batch_size):
#       trp = self.load_next_pair(1)
#       res['data_s'][itt,...] = trp['sketch']
#       res['data_i'][itt,...] = trp['image']
#       res['label'][itt,...] = trp['label']
#     return res
#==============================================================================
data_layer.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def load_next_batch(self):
    res = {}
    #7
    lock = Lock()
    threads = [self.pool.apply_async(self.load_next_image,(lock,)) for \
                i in range (self.batch_size)]
    thread_res = [thread.get() for thread in threads]
    res['data'] = np.asarray([datum[0] for datum in thread_res])[:,None,:,:]
    res['label'] = np.asarray([datum[1] for datum in thread_res],dtype=np.float32)
    return res

#==============================================================================
#     res['data'] = np.zeros((self.batch_size,1,self.outshape[0],self.outshape[1]),dtype = np.float32)
#     res['label'] = np.zeros(self.batch_size,dtype = np.float32)
#     for itt in range(self.batch_size):
#       img, label = self.load_next_image(1)
#       res['data'][itt,...] = img
#       res['label'][itt] = label
#     return res
#==============================================================================
data_layer_2branchs.py 文件源码 项目:Triplet_Loss_SBIR 作者: TuBui 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def load_next_batch(self):
    res = {}
    #7
    lock = Lock()
    threads = [self.pool.apply_async(self.load_next_pair,(lock,)) for \
                i in range (self.batch_size)]
    thread_res = [thread.get() for thread in threads]
    res['data_s'] = np.asarray([tri['sketch'] for tri in thread_res])[:,None,:,:]
    res['data_i'] = np.asarray([tri['image'] for tri in thread_res])[:,None,:,:]
    res['label_s'] = np.asarray([tri['label_s'] for tri in thread_res],dtype=np.float32)[:,None]
    res['label_i'] = np.asarray([tri['label_i'] for tri in thread_res],dtype=np.float32)[:,None]
    return res
#==============================================================================
#     res['data_s'] = np.zeros((self.batch_size,1,self.outshape[0],\
#                             self.outshape[1]),dtype = np.float32)
#     res['data_i'] = np.zeros_like(res['data_a'],dtype=np.float32)
#     res['label'] = np.zeros((self.batch_size,1),dtype = np.float32)
#     for itt in range(self.batch_size):
#       trp = self.load_next_pair(1)
#       res['data_s'][itt,...] = trp['sketch']
#       res['data_i'][itt,...] = trp['image']
#       res['label'][itt,...] = trp['label']
#     return res
#==============================================================================
main.py 文件源码 项目:ccdetection 作者: tommiu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def printResults(flow):
    try:
        stats_path = getArg(flow, 's', 'statsfile')
    except:
        stats_path = DEFAULT_STATS_PATH

    if os.path.isfile(stats_path):
        clones = SharedData(stats_path, multiprocessing.Lock()).getClones()
        print "Found %d code clones saved in file '%s':" % (
                                                        len(clones), stats_path
                                                        )
        for i, clone in enumerate(clones):
            print str(i) + ".", clone

    else:
        print "Given path is not a valid file: '%s'" % (stats_path)
run_daily.py 文件源码 项目:memex_ad_features 作者: giantoak 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_phone_files(dataframe):
    # Drop all rows without a phone number
    dataframe = dataframe.dropna(subset=['phone'])
    dataframe['phone'] = dataframe['phone'].map(lambda x: re.sub('[^0-9]', '', str(x)))

    # Break file up by phone
    phone_numbers = dataframe.phone.unique()
    phone_dataframe = {phone_number: pandas.DataFrame() for phone_number in phone_numbers}
    for key in phone_dataframe.keys():
        phone_dataframe[key] = dataframe[:][dataframe.phone == key]

    # Check if file already exists for each location, if so then append, if not then create a new file
    print 'Appending location data to existing files'

    # Lock all processes while work is being done to save files
    for key, value in phone_dataframe.iteritems():
        if os.path.isfile('{0}phone_{1}.csv'.format(config['phone_data'], str(key))):
            lock.acquire()
            print 'lock has been set for file {0}'.format(file)
            value.to_csv('{0}phone_{1}.csv'.format(config['phone_data'], str(key)), mode='a', header=False, encoding='utf-8')
            lock.release()
        else:
            value.to_csv('{0}phone_{1}.csv'.format(config['phone_data'], str(key)), header=True, encoding='utf-8')
    print 'finished file {0}'.format(file)
create_location_files.py 文件源码 项目:memex_ad_features 作者: giantoak 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def create_phone_files(dataframe):
    # Drop all rows without a phone number
    dataframe = dataframe.dropna(subset=['phone'])
    dataframe['phone'] = dataframe['phone'].map(lambda x: re.sub('[^0-9]', '', str(x)))

    # Break file up by phone
    phone_numbers = dataframe.phone.unique()
    phone_dataframe = {phone_number: pandas.DataFrame() for phone_number in phone_numbers}
    for key in phone_dataframe.keys():
        phone_dataframe[key] = dataframe[:][dataframe.phone == key]

    # Check if file already exists for each location, if so then append, if not then create a new file
    print 'Appending location data to existing files'

    # Lock all processes while work is being done to save files
    for key, value in phone_dataframe.iteritems():
        if os.path.isfile('{0}phone_{1}.csv'.format(config['phone_data'], str(key))):
            lock.acquire()
            print 'lock has been set for file {0}'.format(file)
            value.to_csv('{0}phone_{1}.csv'.format(config['phone_data'], str(key)), mode='a', header=False, encoding='utf-8')
            lock.release()
        else:
            value.to_csv('{0}phone_{1}.csv'.format(config['phone_data'], str(key)), header=True, encoding='utf-8')
    print 'finished file {0}'.format(file)
test_multiprocess.py 文件源码 项目:crawler_old 作者: salmonx 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def manager():

    cpus = mp.cpu_count()
    que = mp.Queue()

    lock = mp.Lock()
    plist = []
    for i in xrange(cpus-1):
        p = mp.Process(target=worker, args=(que, lock, i+1))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()     

    ret=set()
    while que.qsize() > 0:

            item = que.get()
            ret.add(item)
    print ret
extractdoneurls.py 文件源码 项目:crawler_old 作者: salmonx 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def manager(dbs):
    # leave one cpu 
    tasks = mp.cpu_count() -1 
    tasks = 1
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1 ))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
extractdoneurls.py 文件源码 项目:crawler_old 作者: salmonx 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def manager(dbs):
    # leave one cpu 
    tasks = mp.cpu_count() -1 
    tasks = 1
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1 ))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
sqlite_addvalidfiled_update_fix.py 文件源码 项目:crawler_old 作者: salmonx 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def manager(dbs):

    cpus = mp.cpu_count()
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(cpus-1):
        p = mp.Process(target=worker, args=(que, lock, i+1))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
sqlite_addvalidfiled_update.py 文件源码 项目:crawler_old 作者: salmonx 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def manager(dbs, urldict):

    cpus = mp.cpu_count()
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(cpus-1):
        p = mp.Process(target=worker, args=(que, lock, i+1, urldict))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
extractseeds.py 文件源码 项目:crawler_old 作者: salmonx 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def manager(dbs):
    tasks = mp.cpu_count()-1

    #que to store the db tasks , outque to store result for each cpu
    que = mp.Queue()
    outque = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1, outque))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()

    #here we got all extract tasks done
    #then merge it and insert into redis
finger_mainpage.py 文件源码 项目:crawler_old 作者: salmonx 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def manager():

    tasks = mp.cpu_count() - 1
    que = mp.Queue()
    lock = mp.Lock()
    plist = []

    initque(que)    

    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
count.py 文件源码 项目:crawler_old 作者: salmonx 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def manager(dbs):
    # leave one cpu 
    tasks = mp.cpu_count() -1 
    #tasks = 1
    que = mp.Queue()
    for db in dbs:
        que.put(db)

    lock = mp.Lock()
    plist = []
    for i in xrange(tasks):
        p = mp.Process(target=worker, args=(que, lock, i+1))
        p.start()
        plist.append(p)

    for p in plist:
        p.join()
bakH.py 文件源码 项目:PBSuite 作者: dbrowneup 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, filename, version=None, columns=None, parameters=None, mode='r'):
        if mode not in ['r', 'w', 'a']:
            logging.error("Mode must be r(ead), w(rite), or (a)ppend!")
            logging.error("Forcing 'r'")
        self.filename = filename
        if mode == 'w' and os.path.exists(self.filename):
            logging.error("Ouput H5 %s already exists!" % self.filename)
            exit(1)
        if mode == 'r' and not os.path.exists(self.filename):
            logging.error("Output H5 %s doesn't exist!" % self.filename)
            exit(1)
        if mode != 'r':
            self.__reopen(mode)
            self.__results.attrs["version"] = version
            self.__results.attrs["columns"] = columns
            self.__results.attrs["parameters"] = parameters
            self.__close()   

        self.__lock = multiprocessing.Lock()
HSpots.py 文件源码 项目:PBSuite 作者: dbrowneup 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, filename, version=None, columns=None, parameters=None, mode='r'):
        if mode not in ['r', 'w', 'a']:
            logging.error("Mode must be r(ead), w(rite), or (a)ppend!")
            logging.error("Forcing 'r'")
        self.filename = filename
        if mode == 'w' and os.path.exists(self.filename):
            logging.error("Ouput H5 %s already exists!" % self.filename)
            exit(1)
        if mode == 'r' and not os.path.exists(self.filename):
            logging.error("Output H5 %s doesn't exist!" % self.filename)
            exit(1)
        if mode != 'r':
            self.__reopen(mode)
            self.__results.attrs["version"] = version
            self.__results.attrs["columns"] = columns
            self.__results.attrs["parameters"] = parameters
            self.__close()   

        self.__lock = multiprocessing.Lock()
HSpots.py 文件源码 项目:PBSuite 作者: dbrowneup 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, filename, version=None, columns=None, parameters=None, mode='r'):
        if mode not in ['r', 'w', 'a']:
            logging.error("Mode must be r(ead), w(rite), or (a)ppend!")
            logging.error("Forcing 'r'")
        self.filename = filename
        if mode == 'w' and os.path.exists(self.filename):
            logging.error("Ouput H5 %s already exists!" % self.filename)
            exit(1)
        if mode == 'r' and not os.path.exists(self.filename):
            logging.error("Output H5 %s doesn't exist!" % self.filename)
            exit(1)
        if mode != 'r':
            self.__reopen(mode)
            self.__results.attrs["version"] = version
            self.__results.attrs["columns"] = columns
            self.__results.attrs["parameters"] = parameters
            self.__close()   

        self.__lock = multiprocessing.Lock()
_test_multiprocessing.py 文件源码 项目:kbe_server 作者: xiaohaoppy 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_thousand(self):
        if self.TYPE == 'manager':
            self.skipTest('test not appropriate for {}'.format(self.TYPE))
        passes = 1000
        lock = self.Lock()
        conn, child_conn = self.Pipe(False)
        for j in range(self.N):
            p = self.Process(target=self._test_thousand_f,
                           args=(self.barrier, passes, child_conn, lock))
            p.start()

        for i in range(passes):
            for j in range(self.N):
                self.assertEqual(conn.recv(), i)

#
#
#
_test_multiprocessing.py 文件源码 项目:kbe_server 作者: xiaohaoppy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_getobj_getlock(self):
        val1 = self.Value('i', 5)
        lock1 = val1.get_lock()
        obj1 = val1.get_obj()

        val2 = self.Value('i', 5, lock=None)
        lock2 = val2.get_lock()
        obj2 = val2.get_obj()

        lock = self.Lock()
        val3 = self.Value('i', 5, lock=lock)
        lock3 = val3.get_lock()
        obj3 = val3.get_obj()
        self.assertEqual(lock, lock3)

        arr4 = self.Value('i', 5, lock=False)
        self.assertFalse(hasattr(arr4, 'get_lock'))
        self.assertFalse(hasattr(arr4, 'get_obj'))

        self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')

        arr5 = self.RawValue('i', 5)
        self.assertFalse(hasattr(arr5, 'get_lock'))
        self.assertFalse(hasattr(arr5, 'get_obj'))
Trader.py 文件源码 项目:AutoTrade 作者: changye 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, account, password, notifier, ocr_service, debug_single_step=False):
        self.__account = account
        self.__password = password
        self.__notifier = notifier
        self.__ocr_service = ocr_service

        self.__manager = Manager()

        self.__job_list = self.__manager.list()
        self.__job_list_lock = Lock()

        self.__map = self.__manager.dict()
        self.__entrust_map = self.__manager.dict()

        self.__process = None
        self.__keep_working = Value('i', 1)

        if debug_single_step:
            self.__debug_single_step = Value('i', 1)
        else:
            self.__debug_single_step = Value('i', 0)

        self.__debug_single_step_go = Value('i', 0)
        self.__debug_single_step_lock = Lock()


问题


面经


文章

微信
公众号

扫码关注公众号