def __init__(self, account, password, notifier, ocr_service, debug_single_step=False):
self.__account = account
self.__password = password
self.__notifier = notifier
self.__ocr_service = ocr_service
self.__manager = Manager()
self.__job_list = self.__manager.list()
self.__job_list_lock = Lock()
self.__map = self.__manager.dict()
self.__entrust_map = self.__manager.dict()
self.__process = None
self.__keep_working = Value('i', 1)
if debug_single_step:
self.__debug_single_step = Value('i', 1)
else:
self.__debug_single_step = Value('i', 0)
self.__debug_single_step_go = Value('i', 0)
self.__debug_single_step_lock = Lock()
python类Manager()的实例源码
def test_node_creation_args():
"""Checks that a node can be passed an argument using inheritance"""
ns = multiprocessing.Manager().Namespace()
ns.arg = 42
class TestArgNode(pyzmp.Node):
def update(self, *args, **kwargs):
ns.arg -= args[0]
return ns.arg
n1 = TestArgNode(args=(ns.arg,))
assert not n1.is_alive()
svc_url = n1.start()
assert n1.is_alive()
assert svc_url
# starting and shutdown should at least guarantee ONE call of update function.
exitcode = n1.shutdown()
assert exitcode == 0
assert not n1.is_alive()
assert ns.arg == 0
def test_node_creation_args_delegate():
"""Checks that a node can be passed an argument using delegation"""
ns = multiprocessing.Manager().Namespace()
ns.arg = 42
def arguser(fortytwo, **kwargs): # kwargs is there to accept extra arguments nicely (timedelta)
ns.arg -= fortytwo
return ns.arg
n1 = pyzmp.Node(args=(ns.arg,), target=arguser)
assert not n1.is_alive()
svc_url = n1.start()
assert n1.is_alive()
assert svc_url
exitcode = n1.shutdown()
assert exitcode == 0
assert not n1.is_alive()
assert ns.arg == 0
def test_node_creation_kwargs():
"""Checks that a node can be passed a keyword argument using inheritance"""
ns = multiprocessing.Manager().Namespace()
ns.kwarg = 42
class TestKWArgNode(pyzmp.Node):
def update(self, *args, **kwargs):
ns.kwarg -= kwargs.get('intval')
return ns.kwarg
n1 = TestKWArgNode(kwargs={'intval': ns.kwarg, })
assert not n1.is_alive()
svc_url = n1.start()
assert n1.is_alive()
assert svc_url
exitcode = n1.shutdown()
assert exitcode == 0
assert not n1.is_alive()
assert ns.kwarg == 0
def test_node_creation_kwargs_delegate():
"""Checks that a node can be passed a keyword argument using delegation"""
ns = multiprocessing.Manager().Namespace()
ns.kwarg = 42
def kwarguser(intval, **kwargs): # kwargs is there to accept extra arguments nicely (timedelta)
ns.kwarg -= intval
return ns.kwarg
n1 = pyzmp.Node(kwargs={'intval': ns.kwarg, }, target=kwarguser)
assert not n1.is_alive()
svc_url = n1.start()
assert n1.is_alive()
assert svc_url
exitcode = n1.shutdown()
assert exitcode == 0
assert not n1.is_alive()
assert ns.kwarg == 0
# @nose.SkipTest # to help debugging ( FIXME : how to programmatically start only one test - maybe in fixture - ? )
def Main():
oldPath = input('please input folder path?')
newPath = oldPath+'-backups'
os.makedirs(newPath)
fileNames = os.listdir(oldPath)
pool = Pool(5)
queue = Manager().Queue()
for name in fileNames:
pool.apply_async(CopyFile,args=(oldPath,newPath,name,queue))
num = 0
allNum = len(fileNames)
while num<allNum:
queue.get()
num += 1
copyRate = num/allNum
print('\r??copy???%.2f%%'%(copyRate*100),end='')
print('\n ???copy?')
def main(haplotypeMatrix):
#dictAccuracy = {} # {0 : [no. of snp , correctly identified]}\n",
manager = Manager()
dictAccuracy = manager.dict() # synchronize dictionary for multiprocessing
nprocs = [] # saves the process
for item in list_split(haplotypeMatrix.shape[0],arg2):
print 'range of haplotype given to each thread: ' ,item
for item in list_split(haplotypeMatrix.shape[0],arg2): #Specify number of thread
n = multiprocessing.Process(target=Computation, args=(item,haplotypeMatrix,dictAccuracy )) # multiprocessing
nprocs.append(n)
n.start()
for i in nprocs:
i.join() # waiting for all the process to finish
#print dictAccuracy
print 'Switch Accuracy is : ', SwitchAccuracy(dictAccuracy)
print 'Accuracy is : ', NewAccuracy(dictAccuracy)
def main(n_pairs=7):
n_pairs = int(n_pairs)
barrier = mp.Barrier(n_pairs + 1)
mgr = mp.Manager()
sync_dict = mgr.dict()
workers = [mp.Process(target=worker, args=(rank + 1, barrier, sync_dict))
for rank in range(n_pairs)]
for w in workers:
w.start()
master(n_pairs, barrier, sync_dict)
for w in workers:
w.join()
def _multi_cpu(self, _func, job_queue: list, timeout: int) -> list:
if _getLen(job_queue) == 0:
return []
index = _get_index(job_queue, self.cpu_num)
cpu_pool = multiprocessing.Pool(processes=self.cpu_num)
mgr = multiprocessing.Manager()
process_bar = mgr.list()
for i in range(self.cpu_num):
process_bar.append(0)
result_queue = cpu_pool.map(
_multi_thread,
[[_func, self.cpu_num, self.thread_num,
job_queue[int(index[i][0]): int(index[i][1] + 1)],
timeout, process_bar, i]
for i in range(len(index))])
result = []
for rl in result_queue:
for r in rl:
result.append(r)
return result
def add(self, params):
vif = self._do_work(params, b_base.connect)
# NOTE(dulek): Saving containerid to be able to distinguish old DEL
# requests that we should ignore. We need to replace whole
# object in the dict for multiprocessing.Manager to work.
pod_name = params.args.K8S_POD_NAME
d = self.registry[pod_name]
d['containerid'] = params.CNI_CONTAINERID
self.registry[pod_name] = d
LOG.debug('Saved containerid = %s for pod %s', params.CNI_CONTAINERID,
pod_name)
return vif
def __init__(self):
manager = Manager()
self.non_finding = manager.dict()
self.crash = manager.dict()
self.timeout = manager.dict()
self.kasan = manager.dict()
def __init__(self, blocks=None):
self.blocks_lock = Lock()
self.unconfirmed_transactions_lock = Lock()
self.unconfirmed_transactions = Manager().list
if blocks is None:
genesis_block = self.get_genesis_block()
self.add_block(genesis_block)
else:
for block in blocks:
self.add_block(block)
def __init__(self):
# per advice at:
# http://docs.python.org/library/multiprocessing.html#all-platforms
self.__master = getpid()
self.__queue = Manager().Queue()
self.__buffer = StringIO()
self.softspace = 0
def upload_documents(self, collection_id, corpus, max_concurrent_child_processes=20):
"""
:param str collection_id: collection to upload to
:param Iterable corpus: an iterable which yields (doc_id, doc_as_json)
:param int max_concurrent_child_processes: the maximum number of concurrent processes that are spawned
to help parrallelize the document upload requests
"""
stats = defaultdict(int)
# Setup manager so we can do multiprocessing to speed things up
file_processors = list()
manager = Manager()
response_from_processors = manager.dict()
for doc_id, body in corpus:
stats['num_docs'] += 1
self._wait_for_processors_to_free_up(max_concurrent_child_processes)
file_processors.append(Process(target=upload_file_to_discovery_collection,
args=(self.config, self.environment_id, collection_id, doc_id, body,
response_from_processors)))
file_processors[-1].start()
if self.logger.isEnabledFor(logging.DEBUG) or stats['num_docs'] % 1000 == 0:
self.logger.info('Submitted %d upload requests' % stats['num_docs'])
stats['num_requests_submitted'] += 1
self.logger.info('Done submitted requests, checking up on the status of the requests')
# check for failures
stats['counts_by_status'] = self._check_file_processes(file_processors, response_from_processors)
self.logger.info('Processed %d docs' % stats['num_docs'])
json.dump(stats, sys.stdout, sort_keys=True, indent=4)
def initializeCSVFileQueueHandler():
import multiprocessing
mpQueue = multiprocessing.Manager().Queue()
mpQueueHandler = multiprocessing.Process(target=CSVFileQueueHandler, args=(mpQueue,))
mpQueueHandler.start()
return (mpQueue, mpQueueHandler)
def initializeStdQueueHandler(stdtype, gmGlobals, gcValues):
import multiprocessing
mpQueue = multiprocessing.Manager().Queue()
mpQueueHandler = multiprocessing.Process(target=StdQueueHandler, args=(mpQueue, stdtype, gmGlobals, gcValues))
mpQueueHandler.start()
return (mpQueue, mpQueueHandler)
def __init__(self):
super(ActivePool, self).__init__()
self.mgr = multiprocessing.Manager()
self.active = self.mgr.list()
self.lock = multiprocessing.Lock()
def __init__(self, valmax=100, barsize=None, title=None, bar=True, up_every=2):
self._q = _Manager().Queue(maxsize=0)
self.reset(valmax=valmax, barsize=barsize, title=title, bar=bar, up_every=up_every)
manager.py 文件源码
项目:Learning-Concurrency-in-Python
作者: PacktPublishing
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def main():
manager = mp.Manager()
ns = manager.Namespace()
ns.x = 1
print(ns)
process = mp.Process(target=myProcess, args=(ns,))
process.start()
process.join()
print(ns)
mpQueue.py 文件源码
项目:Learning-Concurrency-in-Python
作者: PacktPublishing
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def main():
m = multiprocessing.Manager()
sharedQueue = m.Queue()
sharedQueue.put(2)
sharedQueue.put(3)
sharedQueue.put(4)
process1 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
process1.start()
process2 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
process2.start()
process3 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
process3.start()
process2.join()
process1.join()
process3.join()