def acquire(self, blocking=False):
import msvcrt # @UnresolvedImport
flags = os.O_CREAT | os.O_WRONLY
mode = msvcrt.LK_NBLCK
if blocking:
mode = msvcrt.LK_LOCK
self.fd = os.open(self.filename, flags)
try:
msvcrt.locking(self.fd, mode, 1)
return True
except IOError:
e = sys.exc_info()[1]
if e.errno not in (errno.EAGAIN, errno.EACCES, errno.EDEADLK):
raise
os.close(self.fd)
self.fd = None
return False
python类EDEADLK的实例源码
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (
cpu, allcpus))
raise
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (
cpu, allcpus))
raise
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (
cpu, allcpus))
raise
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (
cpu, allcpus))
raise
def acquire(self, blocking=False):
import msvcrt # @UnresolvedImport
flags = os.O_CREAT | os.O_WRONLY
mode = msvcrt.LK_NBLCK
if blocking:
mode = msvcrt.LK_LOCK
self.fd = os.open(self.filename, flags)
try:
msvcrt.locking(self.fd, mode, 1)
return True
except IOError:
e = sys.exc_info()[1]
if e.errno not in (errno.EAGAIN, errno.EACCES, errno.EDEADLK):
raise
os.close(self.fd)
self.fd = None
return False
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (
cpu, allcpus))
raise
def acquire(self, blocking=False):
import msvcrt # @UnresolvedImport
flags = os.O_CREAT | os.O_WRONLY
mode = msvcrt.LK_NBLCK
if blocking:
mode = msvcrt.LK_LOCK
self.fd = os.open(self.filename, flags)
try:
msvcrt.locking(self.fd, mode, 1)
return True
except IOError:
e = sys.exc_info()[1]
if e.errno not in (errno.EAGAIN, errno.EACCES, errno.EDEADLK):
raise
os.close(self.fd)
self.fd = None
return False
def read(socket, n=4096):
"""
Reads at most n bytes from socket
"""
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
# wait for data to become available
if not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
return os.read(socket.fileno(), n)
except EnvironmentError as e:
if e.errno not in recoverable_errors:
raise
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (
cpu, allcpus))
raise
def acquire(self, blocking=False):
import msvcrt # @UnresolvedImport
flags = os.O_CREAT | os.O_WRONLY
mode = msvcrt.LK_NBLCK
if blocking:
mode = msvcrt.LK_LOCK
self.fd = os.open(self.filename, flags)
try:
msvcrt.locking(self.fd, mode, 1)
return True
except IOError:
e = sys.exc_info()[1]
if e.errno not in (errno.EAGAIN, errno.EACCES, errno.EDEADLK):
raise
os.close(self.fd)
self.fd = None
return False
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (
cpu, allcpus))
raise
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (
cpu, allcpus))
raise
def cpu_affinity_set(self, cpus):
# Pre-emptively check if CPUs are valid because the C
# function has a weird behavior in case of invalid CPUs,
# see: https://github.com/giampaolo/psutil/issues/586
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (
cpu, allcpus))
raise
def acquire(self, blocking=False):
import msvcrt # @UnresolvedImport
flags = os.O_CREAT | os.O_WRONLY
mode = msvcrt.LK_NBLCK
if blocking:
mode = msvcrt.LK_LOCK
self.fd = os.open(self.filename, flags)
try:
msvcrt.locking(self.fd, mode, 1)
return True
except IOError:
e = sys.exc_info()[1]
if e.errno not in (errno.EAGAIN, errno.EACCES, errno.EDEADLK):
raise
os.close(self.fd)
self.fd = None
return False
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
try:
self._file.open_and_lock()
except IOError as e:
if e.errno == errno.ENOSYS:
logger.warn('File system does not support locking the '
'credentials file.')
elif e.errno == errno.ENOLCK:
logger.warn('File system is out of resources for writing the '
'credentials file (is your disk full?).')
elif e.errno == errno.EDEADLK:
logger.warn('Lock contention on multistore file, opening '
'in read-only mode.')
elif e.errno == errno.EACCES:
logger.warn('Cannot access credentials file.')
else:
raise
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. '
'Opening in read-only mode. Any refreshed '
'credentials will only be '
'valid for this run.', self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
try:
self._file.open_and_lock()
except (IOError, OSError) as e:
if e.errno == errno.ENOSYS:
logger.warn('File system does not support locking the '
'credentials file.')
elif e.errno == errno.ENOLCK:
logger.warn('File system is out of resources for writing the '
'credentials file (is your disk full?).')
elif e.errno == errno.EDEADLK:
logger.warn('Lock contention on multistore file, opening '
'in read-only mode.')
elif e.errno == errno.EACCES:
logger.warn('Cannot access credentials file.')
else:
raise
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. '
'Opening in read-only mode. Any refreshed '
'credentials will only be '
'valid for this run.', self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
try:
self._file.open_and_lock()
except (IOError, OSError) as e:
if e.errno == errno.ENOSYS:
logger.warn('File system does not support locking the '
'credentials file.')
elif e.errno == errno.ENOLCK:
logger.warn('File system is out of resources for writing the '
'credentials file (is your disk full?).')
elif e.errno == errno.EDEADLK:
logger.warn('Lock contention on multistore file, opening '
'in read-only mode.')
elif e.errno == errno.EACCES:
logger.warn('Cannot access credentials file.')
else:
raise
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. '
'Opening in read-only mode. Any refreshed '
'credentials will only be '
'valid for this run.', self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
try:
self._file.open_and_lock()
except (IOError, OSError) as e:
if e.errno == errno.ENOSYS:
logger.warn('File system does not support locking the '
'credentials file.')
elif e.errno == errno.ENOLCK:
logger.warn('File system is out of resources for writing the '
'credentials file (is your disk full?).')
elif e.errno == errno.EDEADLK:
logger.warn('Lock contention on multistore file, opening '
'in read-only mode.')
elif e.errno == errno.EACCES:
logger.warn('Cannot access credentials file.')
else:
raise
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. '
'Opening in read-only mode. Any refreshed '
'credentials will only be '
'valid for this run.', self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
try:
self._file.open_and_lock()
except (IOError, OSError) as e:
if e.errno == errno.ENOSYS:
logger.warn('File system does not support locking the '
'credentials file.')
elif e.errno == errno.ENOLCK:
logger.warn('File system is out of resources for writing the '
'credentials file (is your disk full?).')
elif e.errno == errno.EDEADLK:
logger.warn('Lock contention on multistore file, opening '
'in read-only mode.')
elif e.errno == errno.EACCES:
logger.warn('Cannot access credentials file.')
else:
raise
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. '
'Opening in read-only mode. Any refreshed '
'credentials will only be '
'valid for this run.', self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
try:
self._file.open_and_lock()
except (IOError, OSError) as e:
if e.errno == errno.ENOSYS:
logger.warn('File system does not support locking the '
'credentials file.')
elif e.errno == errno.ENOLCK:
logger.warn('File system is out of resources for writing the '
'credentials file (is your disk full?).')
elif e.errno == errno.EDEADLK:
logger.warn('Lock contention on multistore file, opening '
'in read-only mode.')
elif e.errno == errno.EACCES:
logger.warn('Cannot access credentials file.')
else:
raise
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. '
'Opening in read-only mode. Any refreshed '
'credentials will only be '
'valid for this run.', self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
try:
self._file.open_and_lock()
except IOError as e:
if e.errno == errno.ENOSYS:
logger.warn('File system does not support locking the '
'credentials file.')
elif e.errno == errno.ENOLCK:
logger.warn('File system is out of resources for writing the '
'credentials file (is your disk full?).')
elif e.errno == errno.EDEADLK:
logger.warn('Lock contention on multistore file, opening '
'in read-only mode.')
else:
raise
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. '
'Opening in read-only mode. Any refreshed '
'credentials will only be '
'valid for this run.', self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
try:
self._file.open_and_lock()
except IOError as e:
if e.errno == errno.ENOSYS:
logger.warn('File system does not support locking the '
'credentials file.')
elif e.errno == errno.ENOLCK:
logger.warn('File system is out of resources for writing the '
'credentials file (is your disk full?).')
elif e.errno == errno.EDEADLK:
logger.warn('Lock contention on multistore file, opening '
'in read-only mode.')
else:
raise
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. '
'Opening in read-only mode. Any refreshed '
'credentials will only be '
'valid for this run.', self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()