def get(self, timeout=None):
"""
Return the result when it arrives. If timeout is not None and the
result does not arrive within timeout seconds then
multiprocessing.TimeoutError is raised. If the remote call raised an
exception then that exception will be reraised by get().
"""
try:
res = self._q.get(timeout=timeout)
except Queue.Empty:
raise multiprocessing.TimeoutError("Timed out")
if isinstance(res, Exception):
raise res
return res
python类TimeoutError()的实例源码
def wait(self, max_wait_secs=6 * 3600, poll_secs=2):
if len(self._pooled) > 0:
waited_secs = 0
self._pool.close()
while len(self._pooled):
logging.debug("Waiting for %i oplog resolver thread(s) to stop" % len(self._pooled))
try:
for thread_name in self._pooled:
thread = self._results[thread_name]
thread.get(poll_secs)
except TimeoutError:
if waited_secs < max_wait_secs:
waited_secs += poll_secs
else:
raise OperationError("Waited more than %i seconds for Oplog resolver! I will assume there is a problem and exit")
self._pool.terminate()
logging.debug("Stopped all oplog resolver threads")
self.stopped = True
self.running = False
def _multitest_binary_pov(self, pov_path, cb_path, enable_randomness, debug, bitflip, timeout, times):
pool = Pool(processes=4)
res = [pool.apply_async(self._test_binary_pov,
(pov_path, cb_path, enable_randomness, debug, bitflip, timeout))
for _ in range(times)]
results = [ ]
for r in res:
try:
results.append(r.get(timeout=timeout + 5))
except TimeoutError:
results.append(False)
return results
def map(self, func, iterable, chunksize=None):
"""
Equivalent of `map()` built-in, without swallowing
`KeyboardInterrupt`.
:param func:
The function to apply to the items.
:param iterable:
An iterable of items that will have `func` applied to them.
"""
# The key magic is that we must call r.get() with a timeout, because
# a Condition.wait() without a timeout swallows KeyboardInterrupts.
r = self.map_async(func, iterable, chunksize)
while True:
try:
return r.get(self.wait_timeout)
except multiprocessing.TimeoutError:
pass
except KeyboardInterrupt:
self.terminate()
self.join()
raise
def get(self, timeout=None):
"""
Return the result when it arrives. If timeout is not None and the
result does not arrive within timeout seconds then
multiprocessing.TimeoutError is raised. If the remote call raised an
exception then that exception will be reraised by get().
"""
try:
res = self._q.get(timeout=timeout)
except Queue.Empty:
raise multiprocessing.TimeoutError("Timed out")
if isinstance(res, Exception):
raise res
return res
def get(self, timeout=None):
"""
Return the result when it arrives. If timeout is not None and the
result does not arrive within timeout seconds then
multiprocessing.TimeoutError is raised. If the remote call raised an
exception then that exception will be reraised by get().
"""
try:
res = self._q.get(timeout=timeout)
except Queue.Empty:
raise multiprocessing.TimeoutError("Timed out")
if isinstance(res, Exception):
raise res
return res
def kill(self, retries=3, pause=.1):
try:
try:
self.task.get(1)
self.__set_info('KILLED', "None")
except (AttributeError, TimeoutError):
self.__set_info('CANCELLED', "None")
except UnicodeEncodeError:
self.__set_info('CRASHED', "None")
for pid in self.pids:
try:
with open(pid) as f:
os.kill(int(f.read().strip()), signal.SIGTERM)
os.remove(pid)
except (IOError, OSError):
pass # simply fail silently when no PID or OS cannot kill it as it is already terminated
if self.command.__name__.lstrip('_') == 'run' and retries > 0:
time.sleep(pause) # wait ... sec that the next call from the command starts
# this is necessary e.g. with cooja command (when Cooja starts a first time for
# a simulation without a malicious mote then a second time with)
self.kill(retries - 1, 2 * pause) # then kill it
except KeyboardInterrupt:
self.kill(0, 0)
def exploit_challenges():
challenges = get_challenges_paths()
status = {n: False for n,_ in challenges}
start = time.time()
results = []
with Pool(processes=len(challenges)) as pool:
multiple_results = [pool.apply_async(exploit, (name,path,)) for name, path in challenges]
for res in multiple_results:
try:
results.append(res.get(timeout=timeout+1))
except TimeoutError:
print("Got a timeout.")
duration = time.time() - start
print("All challenges exploited in " + str(duration) + " sec.")
for chall_name, exploitable in results:
status[chall_name] = exploitable
return status
def _ParallelSymbolizeBacktrace(backtrace):
# Disable handling of SIGINT during sub-process creation, to prevent
# sub-processes from consuming Ctrl-C signals, rather than the parent
# process doing so.
saved_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
p = multiprocessing.Pool(multiprocessing.cpu_count())
# Restore the signal handler for the parent process.
signal.signal(signal.SIGINT, saved_sigint_handler)
symbolized = []
try:
result = p.map_async(_SymbolizeEntry, backtrace)
symbolized = result.get(SYMBOLIZATION_TIMEOUT_SECS)
if not symbolized:
return []
except multiprocessing.TimeoutError:
return ['(timeout error occurred during symbolization)']
except KeyboardInterrupt: # SIGINT
p.terminate()
return symbolized
def _get_result(self,resultl):
result=''
# get result from list
for cur in resultl:
try:
result+=cur.get(timeout=self._timeout)
except TimeoutError as e:
continue
# deal with result
if result=='':
result='no dir or file'
else:
result=result[:-1]
return result
def _get_result(self,resultl):
result=''
# get result from list
for cur in resultl:
try:
result+=cur.get(timeout=self._timeout)
except TimeoutError as e:
continue
# deal with result
if result=='':
result='no subdomain'
else:
result=result[:-1]
return result
def _get_result(self,resultl):
result=''
# get result from list
for cur in resultl:
try:
result+=cur.get(timeout=self._timeout)
except TimeoutError as e:
continue
# deal with result
if result=='':
result='nothing here'
else:
result=result[:-1]
return result
def _get_resultl(self,resultl):
retl=[]
# get result from eflist
for cur in resultl:
try:
retl.extend(cur.get(timeout=self._timeout))
except TimeoutError as e:
continue
return retl
def parse_structure(self, structure_file, timeout, model=None,
parser=RegularStructureParser):
"""
Call StructureParser.parse_structure() in a separate process and return
the output. Raise TimeoutError if the parser does not respond within
C{timeout} seconds.
@param structure_file: structure file to parse
@type structure_file: str
@param timeout: raise multiprocessing.TimeoutError if C{timeout} seconds
elapse before the parser completes its job
@type timeout: int
@param parser: any implementing L{AbstractStructureParser} class
@type parser: type
@return: parsed structure
@rtype: L{csb.structure.Structure}
"""
r = self.parse_async([structure_file], timeout, model, parser)
if len(r) > 0:
if r[0].exception is not None:
raise r[0].exception
else:
return r[0].result
return None
def abortable_worker(func, *args, **kwargs):
# returns ("null",) if timeout
timeout = kwargs.get('timeout', None)
p = multiprocessing.dummy.Pool(1)
res = p.apply_async(func, args=args)
try:
out = res.get(timeout) # Wait timeout seconds for func to complete.
return out
except multiprocessing.TimeoutError:
return ("null",)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def pnk_request(self, url):
pool = ThreadPool(processes = 1)
async_result = pool.apply_async(self.pnk_request_raw, (url,))
try:
ret_val = async_result.get(timeout = self.hard_timeout)
except TimeoutError as te:
traceback.print_exc()
#raise requests ConnectionError for easier handling if there's a hard timeout
raise ConnectionError("Request received a hard timeout")
return ret_val
def pnk_request(url):
pool = ThreadPool(processes = 1)
async_result = pool.apply_async(pnk_request_raw, (url,))
try:
ret_val = async_result.get(timeout = int(conf.get("punkcrawler", "hard_timeout")))
except TimeoutError as te:
traceback.print_exc()
pnk_log(mod, "Received hard timeout, raising timeout exception")
#raise requests ConnectionError for easier handling if there's a hard timeout
raise ConnectionError("Request received a hard timeout")
return ret_val
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
res = p.map_async(backend_gtk3agg_internal_check, [0])
success, msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
success = False
raise CheckFailed("Check timed out")
except:
p.close()
# Some other error.
success = False
msg = "Could not determine"
raise
else:
p.close()
finally:
p.join()
if success:
return msg
else:
raise CheckFailed(msg)
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
res = p.map_async(backend_gtk3cairo_internal_check, [0])
success, msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
success = False
raise CheckFailed("Check timed out")
except:
p.close()
success = False
raise
else:
p.close()
finally:
p.join()
if success:
return msg
else:
raise CheckFailed(msg)
def timeout(seconds):
""" """
def handler(*args, **kwargs):
logger.debug("TimeoutError in timeout context manager handler.")
raise TimeoutError("Timeout after {} seconds".format(seconds))
signal.signal(signal.SIGPROF, handler)
signal.setitimer(signal.ITIMER_PROF, seconds)
try:
yield
finally:
signal.setitimer(signal.ITIMER_PROF, 0)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
test_deepanimebot_classifiers.py 文件源码
项目:DeepClassificationBot
作者: AntreasAntoniou
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def test_fetch_cvimage_from_url_timeout(monkeypatch):
def long_func(*args, **kwargs):
time.sleep(3)
monkeypatch.setattr(requests, 'get', long_func)
with pytest.raises(TimeoutError):
classifiers.fetch_cvimage_from_url('this url is ignored', timeout_max_timeout=1)
def timeout(max_timeout):
"""Timeout decorator, parameter in seconds."""
def timeout_decorator(f):
"""Wrap the original function."""
@functools.wraps(f)
def func_wrapper(self, *args, **kwargs):
"""Closure for function."""
pool = multiprocessing.pool.ThreadPool(processes=1)
async_result = pool.apply_async(f, (self,) + args, kwargs)
timeout = kwargs.pop('timeout_max_timeout', max_timeout) or max_timeout
# raises a TimeoutError if execution exceeds max_timeout
return async_result.get(timeout)
return func_wrapper
return timeout_decorator
def pnk_request(self, url):
pool = ThreadPool(processes = 1)
async_result = pool.apply_async(self.pnk_request_raw, (url,))
try:
ret_val = async_result.get(timeout = self.hard_timeout)
except TimeoutError as te:
traceback.print_exc()
#raise requests ConnectionError for easier handling if there's a hard timeout
raise ConnectionError("Request received a hard timeout")
return ret_val
def pnk_request(url):
pool = ThreadPool(processes = 1)
async_result = pool.apply_async(pnk_request_raw, (url,))
try:
ret_val = async_result.get(timeout = int(conf.get("punkcrawler", "hard_timeout")))
except TimeoutError as te:
traceback.print_exc()
pnk_log(mod, "Received hard timeout, raising timeout exception")
#raise requests ConnectionError for easier handling if there's a hard timeout
raise ConnectionError("Request received a hard timeout")
return ret_val