def driller_callback(self, fuzz):
l.warning("Driller stuck callback triggered!")
# remove any workers that aren't running
self._running_workers = [x for x in self._running_workers if x.is_alive()]
# get the files in queue
queue = self._queue_files(fuzz)
#for i in range(1, fuzz.fuzz_id):
# fname = "fuzzer-%d" % i
# queue.extend(self.queue_files(fname))
# start drilling
not_drilled = set(queue) - self._already_drilled_inputs
if len(not_drilled) == 0:
l.warning("no inputs left to drill")
while len(self._running_workers) < self._num_workers and len(not_drilled) > 0:
to_drill_path = list(not_drilled)[0]
not_drilled.remove(to_drill_path)
self._already_drilled_inputs.add(to_drill_path)
proc = multiprocessing.Process(target=_run_drill, args=(self, fuzz, to_drill_path))
proc.start()
self._running_workers.append(proc)
python类Process()的实例源码
def run(self, started_event: Event) -> Process:
p = Process(target=_sub_process_main, args=(
started_event,
self._channel_name,
self._connection,
self._consumer_configuration,
self._consumer_factory,
self._command_processor_factory,
self._mapper_func))
self._logger.debug("Starting worker process for channel: %s on exchange %s on server %s",
self._channel_name, self._connection.exchange, self._connection.amqp_uri)
p.start()
started_event.wait(timeout=1)
return p
def multiWorker(processNum=4,serverHost='127.0.0.1',serverAuthkey='',serverPort=5000,logFile=None,color=True,debug=4):
'''
:param processNum: default=4 <class int>
:param serverHost: default='127.0.0.1' <class str>
:param serverAuthkey: default='' <class bytes>
:param serverPort: default=5000 <class int>
:param logFile: default=None <class str>
:param color: default=True <class bool>
:param debug: default=4 <class int|0 NONE,1 [Error],2 [Error][WARING],3 [Error][WARING][INFO],4 ALL>
:function: multiprocessing download
'''
while True:
pools = []
for num in range(processNum):
pools.append(Process(target=DownloadWorker(serverHost,serverAuthkey,serverPort,logFile,color,debug).run,args=('Worker%d'%num,)))
for p in pools:
p.start()
for p in pools:
p.join()
def __init__(self, *args, **kwargs):
test_notes = global_vars['test_notes']
pause_reporting = global_vars['pause_reporting']
def wrapper(func, test_notes, pause_reporting, **kwargs):
"""
:param func: function to pass to multiprocessing.Process.
:param test_notes: multiprocessing Queue() instance. Allows us to add notes to
:param disable_reporting: multiprocessing Event() instance. Turns off reporting to terminal when input needed.
:param kwargs: dictionary that contains all args and kwargs being sent to wrapped function.
:return:
"""
global_vars['test_notes'] = test_notes
global_vars['pause_reporting'] = pause_reporting
args_ = kwargs['args'] if 'args' in kwargs else ()
kwargs_ = kwargs['kwargs'] if 'kwargs' in kwargs else {}
return func(*args_, **kwargs_)
wrapper_args = [kwargs['target'], test_notes, pause_reporting]
wrapper_kwargs = kwargs
multiprocessing.Process.__init__(self, target=wrapper, args=wrapper_args, kwargs=wrapper_kwargs)
def _process_single_event(self, socket):
"""
Process a socket's event.
Parameters
----------
socket : zmq.Socket
Socket that generated the event.
"""
data = socket.recv()
address = self.address[socket]
if address.kind == 'SUB':
self._process_sub_event(socket, address, data)
elif address.kind == 'PULL':
self._process_pull_event(socket, address, data)
elif address.kind == 'REP':
self._process_rep_event(socket, address, data)
else:
self._process_single_event_complex(address, socket, data)
def _process_single_event_complex(self, address, socket, data):
"""
Process a socket's event for complex sockets (channels).
Parameters
----------
address : AgentAddress or AgentChannel
Agent address or channel associated to the socket.
socket : zmq.Socket
Socket that generated the event.
data
Received in the socket.
"""
if address.kind == 'ASYNC_REP':
self._process_async_rep_event(socket, address, data)
elif address.kind == 'PULL_SYNC_PUB':
self._process_sync_pub_event(socket, address.channel, data)
else:
raise NotImplementedError('Unsupported kind %s!' % address.kind)
def _process_rep_event(self, socket, addr, data):
"""
Process a REP socket's event.
Parameters
----------
socket : zmq.Socket
Socket that generated the event.
addr : AgentAddress
AgentAddress associated with the socket that generated the event.
data : bytes
Data received on the socket.
"""
message = deserialize_message(message=data, serializer=addr.serializer)
handler = self.handler[socket]
if inspect.isgeneratorfunction(handler):
generator = handler(self, message)
socket.send(serialize_message(next(generator), addr.serializer))
execute_code_after_yield(generator)
else:
reply = handler(self, message)
socket.send(serialize_message(reply, addr.serializer))
def _process_pull_event(self, socket, addr, data):
"""
Process a PULL socket's event.
Parameters
----------
socket : zmq.Socket
Socket that generated the event.
addr : AgentAddress
AgentAddress associated with the socket that generated the event.
data : bytes
Data received on the socket.
"""
message = deserialize_message(message=data, serializer=addr.serializer)
handler = self.handler[socket]
if not isinstance(handler, (list, dict, tuple)):
handler = [handler]
for h in handler:
h(self, message)
def test_nameserver_spawn_process(nsproxy):
"""
A name server should be able to spawn child processes.
It is a way to make sure name servers are run as non-daemonic processes,
which are not allowed to have children.
"""
class Spawner(NameServer):
def spawn_process(self):
p = multiprocessing.Process()
p.start()
return True
ns = run_nameserver(base=Spawner)
assert ns.spawn_process()
ns.shutdown()
def setUpClass(self):
self.smr_proc = Process(target=SpecificManagerRegistry)
self.smr_proc.daemon = True
self.manoconn = ManoBrokerRequestResponseConnection('son-plugin.SpecificManagerRegistry')
self.wait_for_ssm_event = threading.Event()
self.wait_for_ssm_event.clear()
self.wait_for_fsm_event = threading.Event()
self.wait_for_fsm_event.clear()
self.event1 = False
self.event2 = False
self.smr_proc.start()
time.sleep(4)
def _main_process(logger, downwards, upwards, process_count, configs):
"""Main process."""
try:
while True:
message = _receive_message(upwards, block=True)
if message.type == _MESSAGE_ERROR:
logger.error("Process '%s' [%d]:\n%s" % (
message.process_name, message.process_id, message.message))
return Status.FAILURE
elif message.type == _MESSAGE_QUIT:
break
finally:
for _ in _range(process_count):
_send_message(downwards, _MESSAGE_STOP)
downwards.close()
return Status.SUCCESS
def test_concurrent_access(self):
st = SharedTable({'cnt': 0})
def inc():
for _ in range(50):
with st.get_lock():
st['cnt'] += 1
time.sleep(random.randint(1, 5) / 10000)
threads = []
for _ in range(5): # numthreads
threads.append(Process(target=inc))
for t in threads:
t.start()
for t in threads:
t.join()
assert st['cnt'] == 250
concurrentCalculation.py 文件源码
项目:Learning-Concurrency-in-Python
作者: PacktPublishing
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def main():
print("Starting number crunching")
t0 = time.time()
procs = []
# Here we create our processes and kick them off
for i in range(10):
proc = Process(target=executeProc, args=())
procs.append(proc)
proc.start()
# Again we use the .join() method in order to wait for
# execution to finish for all of our processes
for proc in procs:
proc.join()
t1 = time.time()
totalTime = t1 - t0
# we print out the total execution time for our 10
# procs.
print("Execution Time: {}".format(totalTime))
def main():
ring = ringbuffer.RingBuffer(slot_bytes=50000, slot_count=10)
ring.new_writer()
processes = [
multiprocessing.Process(target=reader, args=(ring, ring.new_reader())),
multiprocessing.Process(target=reader, args=(ring, ring.new_reader())),
multiprocessing.Process(target=writer, args=(ring, 1, 1000)),
]
for p in processes:
p.daemon = True
p.start()
for p in processes:
p.join(timeout=20)
assert not p.is_alive()
assert p.exitcode == 0
def new_reader(self):
"""Returns a new unique reader into the buffer.
This must only be called in the parent process. It must not be
called in a child multiprocessing.Process. See class docstring. To
enforce this policy, no readers may be allocated after the first
write has occurred.
"""
with self.lock.for_write():
writer_position = self.writer.get()
if writer_position.counter > 0:
raise MustCreatedReadersBeforeWritingError
reader = Pointer(self.slot_count, start=writer_position.counter)
self.readers.append(reader)
return reader
def main():
ring = ringbuffer.RingBuffer(slot_bytes=50000, slot_count=100)
ring.new_writer()
processes = [
multiprocessing.Process(target=writer, args=(ring,)),
]
for i in range(10):
processes.append(multiprocessing.Process(
target=reader, args=(ring, ring.new_reader())))
for p in processes:
p.start()
for p in processes:
p.join(timeout=20)
assert not p.is_alive()
assert p.exitcode == 0
def main():
while True:
indata=raw_input("Enter domain name and port, example: google.com:443 : ")
print indata
domain=indata.split(':')[0]
try:
port=int(indata.split(':')[1])
except:
port=443
if len(domain)<5:
print 'wrong domain'
continue
print 'fucking '+domain+' @ port '+str(port)+'...'
fn=open(domain+'.bin','ab')
for j in xrange(THREADS):
t = Process(target=fuckit,args=(domain,port,fn))
t.daemon=True
t.start()
train_faster_rcnn_alt_opt.py 文件源码
项目:py-faster-rcnn-tk1
作者: joeking11829
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.ROOT_DIR, 'models', *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 40000, 80000, 40000]
# max_iters = [100, 100, 100, 100]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.ROOT_DIR, 'models', net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def addProcess(self, method, arguments):
p = multiprocessing.Process(target=method, args=(arguments,))
p.start()
def addProcess(self, method, arguments):
p = multiprocessing.Process(target=method, args=(arguments,))
p.start()
def _validate(self):
"""Raise exception if this handle is closed or not registered to be
used in the current process.
Intended to be called before every operation on `self._fd`.
Reveals wrong usage of this module in the context of multiple
processes. Might prevent tedious debugging sessions. Has little
performance impact.
"""
if self._closed:
raise GIPCClosed(
"GIPCHandle has been closed before.")
if os.getpid() != self._legit_pid:
raise GIPCError(
"GIPCHandle %s not registered for current process %s." % (
self, os.getpid()))
def _winapi_childhandle_after_createprocess_child(self):
"""Called on Windows in the child process after the CreateProcess()
system call. This is required for making the handle usable in the child.
"""
if WINAPI_HANDLE_TRANSFER_STEAL:
# In this case the handle has not been inherited by the child
# process during CreateProcess(). Steal it from the parent.
new_winapihandle = multiprocessing.reduction.steal_handle(
self._parent_pid, self._parent_winapihandle)
del self._parent_winapihandle
del self._parent_pid
# Restore C file descriptor with (read/write)only flag.
self._fd = msvcrt.open_osfhandle(new_winapihandle, self._fd_flag)
return
# In this case the handle has been inherited by the child process during
# the CreateProcess() system call. Get C file descriptor from Windows
# file handle.
self._fd = msvcrt.open_osfhandle(
self._inheritable_winapihandle, self._fd_flag)
del self._inheritable_winapihandle
def run_trial(self, trial_num, param):
'''
algo step 2, construct and run Trial with the next param
args trial_num, param must be provided externally,
otherwise they will not progress within mp.process
'''
experiment_spec = self.compose_experiment_spec(param)
trial = self.Trial(
experiment_spec, trial_num=trial_num,
times=self.times,
num_of_trials=self.num_of_trials,
run_timestamp=self.run_timestamp,
experiment_id_override=self.experiment_id_override)
trial_data = trial.run()
del trial
import gc
gc.collect()
debug_mem_usage()
return trial_data
# retrieve the trial_num, param, fitness_score from trial_data
def start(self):
# Start grabbing SIGCHLD within libev event loop.
gevent.get_hub().loop.install_sigchld()
# Run new process (based on `fork()` on POSIX-compliant systems).
super(_GProcess, self).start()
# The occurrence of SIGCHLD is recorded asynchronously in libev.
# This guarantees proper behavior even if the child watcher is
# started after the child exits. Start child watcher now.
self._sigchld_watcher = gevent.get_hub().loop.child(self.pid)
self._returnevent = gevent.event.Event()
self._sigchld_watcher.start(
self._on_sigchld, self._sigchld_watcher)
log.debug("SIGCHLD watcher for %s started.", self.pid)
def __repr__(self):
"""Based on original __repr__ from CPython 3.4's mp package.
Reasons for re-implementing:
* The original code would invoke os.waitpid() through
_popen.poll(). This is forbidden in the context of gipc.
This method instead reads the exitcode property which is set
asynchronously by a libev child watcher callback.
* The original code distinguishes 'initial' state from 'started'
state. This is not necessary, as gipc starts processes right
away.
* This method removes the `if self is _current_process` check
without changing output behavior (that's still 'started' status).
"""
exitcodedict = multiprocessing.process._exitcode_to_name
status = 'started'
if self._parent_pid != os.getpid():
status = 'unknown'
elif self.exitcode is not None:
status = self.exitcode
if status == 0:
status = 'stopped'
elif isinstance(status, int):
status = 'stopped[%s]' % exitcodedict.get(status, status)
return '<%s(%s, %s%s)>' % (
type(self).__name__,
self._name,
status,
self.daemon and ' daemon' or ''
)
def join(self, timeout=None):
"""
Wait cooperatively until child process terminates or timeout occurs.
:arg timeout: ``None`` (default) or a a time in seconds. The method
simply returns upon timeout expiration. The state of the process
has to be identified via ``is_alive()``.
"""
assert self._parent_pid == os.getpid(), "I'm not parent of this child."
assert self._popen is not None, 'Can only join a started process.'
if not WINDOWS:
# Resemble multiprocessing's join() method while replacing
# `self._popen.wait(timeout)` with
# `self._returnevent.wait(timeout)`
self._returnevent.wait(timeout)
if self._popen.returncode is not None:
if hasattr(multiprocessing.process, '_children'):
# This is for Python 3.4.
kids = multiprocessing.process._children
else:
# For Python 2.6, 2.7, 3.3.
kids = multiprocessing.process._current_process._children
kids.discard(self)
return
with gevent.Timeout(timeout, False):
while self.is_alive():
# This frequency seems reasonable, but that's not 100 % certain.
gevent.sleep(0.01)
# Clean up after child as designed by Process class (non-blocking).
super(_GProcess, self).join(timeout=0)
def _set_legit_process(self):
log.debug("Legitimate %s for current process.", self)
self._legit_pid = os.getpid()
def _filter_handles(l):
"""Iterate through `l`, filter and yield `_GIPCHandle` instances.
"""
for o in l:
if isinstance(o, _GIPCHandle):
yield o
elif isinstance(o, _GIPCDuplexHandle):
yield o._writer
yield o._reader
# Container for keeping track of valid `_GIPCHandle`s in current process.
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except socket.error:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)