def _setup_env(self):
prefix = "/tmp/tracer_"
curdir = os.getcwd()
tmpdir = tempfile.mkdtemp(prefix=prefix)
# allow cores to be dumped
saved_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
binaries_old = [ ]
for binary in self._binaries:
binaries_old.append(binary)
binary_replacements = [ ]
for i, binary in enumerate(self._binaries):
binary_replacements.append(os.path.join(tmpdir,"binary_replacement_%d" % i))
for binary_o, binary_r in zip(binaries_old, binary_replacements):
shutil.copy(binary_o, binary_r)
self._binaries = binary_replacements
if self.argv is not None and not self.is_multicb:
self.argv = self._binaries + self.argv[1:]
os.chdir(tmpdir)
try:
yield (tmpdir,binary_replacements)
finally:
assert tmpdir.startswith(prefix)
shutil.rmtree(tmpdir)
os.chdir(curdir)
resource.setrlimit(resource.RLIMIT_CORE, saved_limit)
self._binaries = binaries_old
python类getrlimit()的实例源码
def setUp(self):
self.openSockets = []
if resource is not None:
self.originalFileLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (128, self.originalFileLimit[1]))
self.socketLimit = 256
def tearDown(self):
while self.openSockets:
self.openSockets.pop().close()
if resource is not None:
# OS X implicitly lowers the hard limit in the setrlimit call
# above. Retrieve the new hard limit to pass in to this
# setrlimit call, so that it doesn't give us a permission denied
# error.
currentHardLimit = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
newSoftLimit = min(self.originalFileLimit[0], currentHardLimit)
resource.setrlimit(resource.RLIMIT_NOFILE, (newSoftLimit, currentHardLimit))
def test_above_fd_setsize(self):
# A scalable implementation should have no problem with more than
# FD_SETSIZE file descriptors. Since we don't know the value, we just
# try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
(soft, hard))
NUM_FDS = min(hard, 2**16)
except (OSError, ValueError):
NUM_FDS = soft
# guard for already allocated FDs (stdin, stdout...)
NUM_FDS -= 32
s = self.SELECTOR()
self.addCleanup(s.close)
for i in range(NUM_FDS // 2):
try:
rd, wr = self.make_socketpair()
except OSError:
# too many FDs, skip - note that we should only catch EMFILE
# here, but apparently *BSD and Solaris can fail upon connect()
# or bind() with EADDRNOTAVAIL, so let's be safe
self.skipTest("FD limit reached")
try:
s.register(rd, selectors.EVENT_READ)
s.register(wr, selectors.EVENT_WRITE)
except OSError as e:
if e.errno == errno.ENOSPC:
# this can be raised by epoll if we go over
# fs.epoll.max_user_watches sysctl
self.skipTest("FD limit reached")
raise
self.assertEqual(NUM_FDS // 2, len(s.select()))
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def test_above_fd_setsize(self):
# A scalable implementation should have no problem with more than
# FD_SETSIZE file descriptors. Since we don't know the value, we just
# try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if hard == resource.RLIM_INFINITY:
self.skipTest("RLIMIT_NOFILE is infinite")
try: # If we're on a *BSD system, the limit tag is different.
_, bsd_hard = resource.getrlimit(resource.RLIMIT_OFILE)
if bsd_hard == resource.RLIM_INFINITY:
self.skipTest("RLIMIT_OFILE is infinite")
if bsd_hard < hard:
hard = bsd_hard
# NOTE: AttributeError resource.RLIMIT_OFILE is not defined on Mac OS.
except (OSError, resource.error, AttributeError):
pass
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
(soft, hard))
limit_nofile = min(hard, 2 ** 16)
except (OSError, ValueError):
limit_nofile = soft
# Guard against already allocated FDs
limit_nofile -= 256
limit_nofile = max(0, limit_nofile)
s = self.make_selector()
for i in range(limit_nofile // 2):
rd, wr = self.make_socketpair()
s.register(rd, selectors2.EVENT_READ)
s.register(wr, selectors2.EVENT_WRITE)
self.assertEqual(limit_nofile // 2, len(s.select()))
def _setup_env(self):
prefix = "/dev/shm/tracer_"
curdir = os.getcwd()
tmpdir = tempfile.mkdtemp(prefix=prefix)
# dont prefilter the core
if len(self.binaries) > 1:
with open("/proc/self/coredump_filter", "wb") as f:
f.write("00000077")
# allow cores to be dumped
saved_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
binaries_old = [ ]
for binary in self.binaries:
binaries_old.append(os.path.abspath(binary))
self.binaries = list(binaries_old)
os.chdir(tmpdir)
try:
yield (tmpdir, self.binaries[0])
finally:
assert tmpdir.startswith(prefix)
shutil.rmtree(tmpdir)
os.chdir(curdir)
resource.setrlimit(resource.RLIMIT_CORE, saved_limit)
self.binaries = binaries_old
def set_max_runtime(seconds):
# Install the signal handler and set a resource limit
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (seconds, hard))
signal.signal(signal.SIGXCPU, time_exceeded)
def clean_fds():
"""Close all non-stdio file descriptors.
This should be called at the beginning of a program to avoid inheriting any
unwanted file descriptors from the invoking process. Unfortunately, this
is really common in unix!
"""
rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
total_descriptors = min(4096, rlimit_nofile)
for fd in range(3, total_descriptors):
try:
os.close(fd)
except OSError:
pass
def collect_snapshot(self):
try:
if self.sensor.service_name:
appname = self.sensor.service_name
elif "FLASK_APP" in os.environ:
appname = os.environ["FLASK_APP"]
elif "DJANGO_SETTINGS_MODULE" in os.environ:
appname = os.environ["DJANGO_SETTINGS_MODULE"].split('.')[0]
else:
appname = os.path.basename(sys.argv[0])
s = Snapshot(name=appname,
version=sys.version,
rlimit_core=resource.getrlimit(resource.RLIMIT_CORE),
rlimit_cpu=resource.getrlimit(resource.RLIMIT_CPU),
rlimit_fsize=resource.getrlimit(
resource.RLIMIT_FSIZE),
rlimit_data=resource.getrlimit(resource.RLIMIT_DATA),
rlimit_stack=resource.getrlimit(
resource.RLIMIT_STACK),
rlimit_rss=resource.getrlimit(resource.RLIMIT_RSS),
rlimit_nproc=resource.getrlimit(
resource.RLIMIT_NPROC),
rlimit_nofile=resource.getrlimit(
resource.RLIMIT_NOFILE),
rlimit_memlock=resource.getrlimit(
resource.RLIMIT_MEMLOCK),
rlimit_as=resource.getrlimit(resource.RLIMIT_AS),
versions=self.collect_modules())
return s
except Exception as e:
log.debug("collect_snapshot: ", str(e))
return None
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def set_open_files_limit():
"""Detect maximum supported number of open file and set it"""
max_files = getrlimit(RLIMIT_NOFILE)[0]
while True:
try:
setrlimit(RLIMIT_NOFILE, (max_files, max_files))
max_files += 1
except ValueError:
break
LOG.debug('Setting max files limit to %d', max_files)
def main():
# use number of open files soft limit and num cores to determinate Popen limit
# use lesser of 4 * num cores or half max open files - 10
default_parallel_limit = os.sysconf('SC_NPROCESSORS_ONLN') * 4
parallel_limit = globals().get("parallel_limit", None)
if parallel_limit is None:
parallel_limit = default_parallel_limit
parallel_limit = min(parallel_limit, (resource.getrlimit(resource.RLIMIT_NOFILE)[0] - 20) / 2)
cmd_list = globals()["cmd_list"]
results = list(multicall(cmd_list, parallel_limit))
json.dump(results, sys.stdout)
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def limit_resources():
_, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) # XXX compare to max threads etc.
_, hard = resource.getrlimit(resource.RLIMIT_AS) # RLIMIT_VMEM does not exist?!
resource.setrlimit(resource.RLIMIT_AS, (16 * 1024 * 1024 * 1024, hard)) # XXX config
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def setUp(self):
self.openSockets = []
if resource is not None:
self.originalFileLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (128, self.originalFileLimit[1]))
self.socketLimit = 256
def tearDown(self):
while self.openSockets:
self.openSockets.pop().close()
if resource is not None:
# OS X implicitly lowers the hard limit in the setrlimit call
# above. Retrieve the new hard limit to pass in to this
# setrlimit call, so that it doesn't give us a permission denied
# error.
currentHardLimit = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
newSoftLimit = min(self.originalFileLimit[0], currentHardLimit)
resource.setrlimit(resource.RLIMIT_NOFILE, (newSoftLimit, currentHardLimit))
def setMemoryLimit(rsrc, megs = 200):
size = megs * 1048576
soft, hard = getrlimit(rsrc)
setrlimit(rsrc, (size, hard)) #limit to one kilobyte
soft, hard = getrlimit(rsrc)
info ('Limit changed to :'+ str( soft))
def SetMemoryLimits():
try:
from resource import getrlimit, setrlimit, RLIMIT_AS
soft, hard = getrlimit(RLIMIT_AS)
setrlimit(RLIMIT_AS, (hard, hard))
soft, hard = getrlimit(RLIMIT_AS)
except:
pass