def __enter__(self):
if self.path is None:
return self.pidfile
self.pidfile = open(self.path, "a+")
try:
fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
self.pidfile = None
raise SystemExit("Already running according to " + self.path)
self.pidfile.seek(0)
self.pidfile.truncate()
self.pidfile.write(str(os.getpid()))
self.pidfile.flush()
self.pidfile.seek(0)
return self.pidfile
python类LOCK_NB的实例源码
def do_magic(self):
if OS_WIN:
try:
if os.path.exists(LOCK_PATH):
os.unlink(LOCK_PATH)
self.fh = os.open(LOCK_PATH, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except EnvironmentError as err:
if err.errno == 13:
self.is_running = True
else:
raise
else:
try:
self.fh = open(LOCK_PATH, 'w')
fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
except EnvironmentError as err:
if self.fh is not None:
self.is_running = True
else:
raise
def open(self):
if self.opened:
return
self.lock = open(SETTINGS.CACHE_PATH + '.lock', 'ab')
try:
fcntl.flock(self.lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
mode = 'c'
except IOError:
logger.warn("Cache locked, using read-only")
mode = 'r'
self.lock.close()
self.lock = None
try:
self.storage = shelve.open(SETTINGS.CACHE_PATH, mode)
except Exception as e:
if mode != 'c':
raise
logger.warn("Dropping corrupted cache on %s", e)
self.lock.truncate(0)
self.storage = shelve.open(SETTINGS.CACHE_PATH, mode)
self.opened = True
def lock_method(lock_filename):
''' Use an OS lock such that a method can only be called once at a time. '''
def decorator(func):
@functools.wraps(func)
def lock_and_run_method(*args, **kwargs):
# Only run this program if it's not already running
# Snippet based on
# http://linux.byexamples.com/archives/494/how-can-i-avoid-running-a-python-script-multiple-times-implement-file-locking/
fp = open(lock_filename, 'w')
try:
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise SystemExit(
"This program is already running. Please stop the current process or " +
"remove " + lock_filename + " to run this script."
)
return func(*args, **kwargs)
return lock_and_run_method
return decorator
def store(proxys):
pidfile = open(proxyFilePath, "a")
for i in range(10):
try:
fcntl.flock(pidfile, fcntl.LOCK_EX | fcntl.LOCK_NB) # LOCK_EX ???:????????????????????????
# LOCK_NB ????: ?????????????????????????????????????
if type(proxys) == type([]):
for proxy in proxys:
pidfile.write(proxy + '\n')
else:
pidfile.write(proxys + '\n')
pidfile.close()
break
except:
# print "another instance is running..."
time.sleep(3)
def lock_path(path, timeout=0):
fd = os.open(path, os.O_CREAT)
flags = fcntl.fcntl(fd, fcntl.F_GETFD, 0)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
started = time.time()
while True:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
if started < time.time() - timeout:
raise LockError("Couldn't obtain lock")
else:
break
time.sleep(0.1)
def unlock_path():
fcntl.flock(fd, fcntl.LOCK_UN)
os.close(fd)
return unlock_path
def __init__(self, addr, conf, log, fd=None):
if fd is None:
try:
st = os.stat(addr)
except OSError as e:
if e.args[0] != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(addr)
else:
raise ValueError("%r is not a socket" % addr)
self.parent = os.getpid()
super(UnixSocket, self).__init__(addr, conf, log, fd=fd)
# each arbiter grabs a shared lock on the unix socket.
fcntl.lockf(self.sock, fcntl.LOCK_SH | fcntl.LOCK_NB)
def write_pid(path):
"""Writes our PID to *path*."""
try:
pid = os.getpid()
with io.open(path, mode='w', encoding='utf-8') as pidfile:
# Get a non-blocking exclusive lock
fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
pidfile.seek(0)
pidfile.truncate(0)
pidfile.write(unicode(pid))
except:
logging.error(_("Could not write PID file: %s") % path)
raise # This raises the original exception
finally:
try:
pidfile.close()
except:
pass
def isSingleInstance(flavor_id=""):
global fp
basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace(
"/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename)
if sys.platform == 'win32':
try:
if os.path.exists(lockfile):
os.unlink(lockfile)
fp = os.open(
lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except OSError:
return False
else: # non Windows
fp = open(lockfile, 'w')
try:
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
return False
return True
def acquire(self):
if self.mutex_debug:
print("I2C mutex acquire")
acquired = False
while not acquired:
try:
self.DexterLockI2C_handle = open('/run/lock/DexterLockI2C', 'w')
# lock
fcntl.lockf(self.DexterLockI2C_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
acquired = True
except IOError: # already locked by a different process
time.sleep(0.001)
except Exception as e:
print(e)
if self.mutex_debug:
print("I2C mutex acquired {}".format(time.time()))
def lock(self):
"""
Locks the package to avoid concurrent operations on its shared
resources.
Currently, the only resource shared among scripts executed from
different directories is the repository.
"""
if not self.locking_enabled:
LOG.debug("This package has no shared resources to lock")
return
LOG.debug("Checking for lock on file {}.".format(self.lock_file_path))
self.lock_file = open(self.lock_file_path, "w")
try:
fcntl.lockf(self.lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as exc:
RESOURCE_UNAVAILABLE_ERROR = 11
if exc.errno == RESOURCE_UNAVAILABLE_ERROR:
LOG.info("Waiting for other process to finish operations "
"on {}.".format(self.name))
else:
raise
fcntl.lockf(self.lock_file, fcntl.LOCK_EX)
def write_data(self):
if self.config_newer_than_data():
self.read_data()
return False
else:
try:
self.data = {"clouds":self.quads.clouds.data, "hosts":self.quads.hosts.data, "history":self.quads.history.data, "cloud_history":self.quads.cloud_history.data}
with open(self.config, 'w') as yaml_file:
fcntl.flock(yaml_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
yaml_file.write(yaml.dump(self.data, default_flow_style=False))
fcntl.flock(yaml_file, fcntl.LOCK_UN)
self.read_data()
return True
except Exception, ex:
self.logger.error("There was a problem with your file %s" % ex)
return False
def _try_lock():
"""Check and create lock file - prevent running application twice.
Return lock file handler.
"""
lock_file_path = _find_config_file("app.lock", False)
_check_dir_for_file(lock_file_path)
try:
if fcntl is not None:
lock_file = open(lock_file_path, "w")
fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
else:
if os.path.isfile(lock_file_path):
_LOG.error("another instance detected (lock file exists) "
"- exiting")
return None
lock_file = open(lock_file_path, "w")
return lock_file
except IOError as err:
import errno
if err.errno == errno.EAGAIN:
_LOG.error("another instance detected - exiting")
else:
_LOG.exception("locking failed: %s", err)
return None
def _lock(fileno):
"""Try to lock a file. Return True on success."""
# closing the file unlocks it, so we don't need to unlock here
if platform.system() == 'Windows':
try:
msvcrt.locking(fileno, msvcrt.LK_NBLCK, 10)
return True
except PermissionError:
return False
else:
try:
fcntl.lockf(fileno, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
# the docs recommend catching both of these
except (BlockingIOError, PermissionError):
return False
def is_tor_data_dir_usable(tor_data_dir):
"""
Checks if the Tor data dir specified is usable. This means that
it is not being locked and we have permissions to write to it.
"""
if not os.path.exists(tor_data_dir):
return True
try:
fcntl.flock(open(os.path.join(tor_data_dir, 'lock'), 'w'),
fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
except (IOError, OSError) as err:
if err.errno == errno.EACCES:
# Permission error
return False
elif err.errno == errno.EAGAIN:
# File locked
return False
def flock(path):
"""Attempt to acquire a POSIX file lock.
"""
with open(path, "w+") as lf:
try:
fcntl.flock(lf, fcntl.LOCK_EX | fcntl.LOCK_NB)
acquired = True
yield acquired
except OSError:
acquired = False
yield acquired
finally:
if acquired:
fcntl.flock(lf, fcntl.LOCK_UN)
def open_with_lock(filename, mode):
with open(filename, mode) as f:
while True:
try:
fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
yield f
break
except IOError as e:
# raise on unrelated IOErrors
if e.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
try:
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
except Exception:
pass
def acquire_lock(path):
"""
little tool to do EAGAIN until lockfile released
:param path:
:return: path
"""
lock_file = open(path, 'w')
while True:
send_to_syslog("attempting to acquire lock %s" % path)
try:
fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
send_to_syslog("acquired lock %s" % path)
return lock_file
except IOError as e:
send_to_syslog("failed to acquire lock %s because '%s' - waiting 1 second" % (path, e))
time.sleep(1)
def uidNumber_getnext(self):
"""Get the next available uidNumber for adding a new user.
Locks uidNumber file, reads number. Returns (file descriptor,
uidNumber). uidNumber_savenext() must be called once the
uidNumber is used successfully."""
uid_num_file = os.open(rbconfig.file_uidNumber, os.O_RDWR)
retries = 0
while 1:
try:
fcntl.lockf(uid_num_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
retries += 1
if retries == 20:
raise RBFatalError(
('Could not lock uidNumber.txt file after 20 attempts.'
'Please try again!'))
time.sleep(0.5)
else:
break
num_uid = int(os.read(uid_num_file, 32))
return uid_num_file, num_uid
def main():
exit_status = 1
try:
args = parse_args()
# Make sure the exporter is only running once.
lock_file = '/var/lock/{}.lock'.format(os.path.basename(sys.argv[0]))
lock_fd = os.open(lock_file, os.O_CREAT)
lock_success = False
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
lock_success = True
except IOError:
msg = 'Failed to export metrics, another instance is running.'
syslog.syslog(syslog.LOG_INFO, msg)
sys.stderr.write(msg + '\n')
if lock_success:
# Create a new registry, otherwise unwanted default collectors are
# added automatically.
registry = prometheus_client.CollectorRegistry()
# Register our own collector and write metrics to STDOUT.
registry.register(CephRgwCollector(**vars(args)))
sys.stdout.write(prometheus_client.generate_latest(registry))
sys.stdout.flush()
# Unlock the lock file.
fcntl.flock(lock_fd, fcntl.LOCK_UN)
exit_status = 0
except Exception as e:
syslog.syslog(syslog.LOG_ERR, str(e))
# Cleanup
os.close(lock_fd)
if lock_success:
try:
os.unlink(lock_file)
except:
pass
sys.exit(exit_status)
def _lockonly(file):
_msg('got file #', file.fileno())
try:
flock(file, LOCK_EX | LOCK_NB)
except IOError:
_msg('failed to lock')
return False
else:
_msg('locked successfully')
return True
def lock (self):
'''
Creates and holds on to the lock file with exclusive access.
Returns True if lock successful, False if it is not, and raises
an exception upon operating system errors encountered creating the
lock file.
'''
try:
#
# Create or else open and trucate lock file, in read-write mode.
#
# A crashed app might not delete the lock file, so the
# os.O_CREAT | os.O_EXCL combination that guarantees
# atomic create isn't useful here. That is, we don't want to
# fail locking just because the file exists.
#
# Could use os.O_EXLOCK, but that doesn't exist yet in my Python
#
self.lockfd = os.open (self.lockfile,
os.O_TRUNC | os.O_CREAT | os.O_RDWR)
# Acquire exclusive lock on the file, but don't block waiting for it
fcntl.flock (self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# Writing to file is pointless, nobody can see it
os.write (self.lockfd, "My Lockfile")
return True
except (OSError, IOError), e:
# Lock cannot be acquired is okay, everything else reraise exception
if e.errno in (errno.EACCES, errno.EAGAIN):
return False
else:
raise
def __enter__(self):
self.pidfile = open(self.path, "a+")
try:
fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise SystemExit("Already running according to " + self.path)
self.pidfile.seek(0)
self.pidfile.truncate()
self.pidfile.write(str(os.getpid()))
self.pidfile.flush()
self.pidfile.seek(0)
return self.pidfile
def __init__(self, mutex_name):
check_valid_mutex_name(mutex_name)
filename = os.path.join(tempfile.gettempdir(), mutex_name)
try:
handle = open(filename, 'w')
fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
except:
self._release_mutex = NULL
self._acquired = False
try:
handle.close()
except:
pass
else:
def release_mutex(*args, **kwargs):
# Note: can't use self here!
if not getattr(release_mutex, 'called', False):
release_mutex.called = True
try:
fcntl.flock(handle, fcntl.LOCK_UN)
except:
traceback.print_exc()
try:
handle.close()
except:
traceback.print_exc()
try:
# Removing is pretty much optional (but let's do it to keep the
# filesystem cleaner).
os.unlink(filename)
except:
pass
# Don't use __del__: this approach doesn't have as many pitfalls.
self._ref = weakref.ref(self, release_mutex)
self._release_mutex = release_mutex
self._acquired = True
def write_pid_file(pid_file, pid):
import fcntl
import stat
try:
fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR)
except OSError as e:
shell.print_exception(e)
return -1
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert flags != -1
flags |= fcntl.FD_CLOEXEC
r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
assert r != -1
# There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
# via fcntl.fcntl. So use lockf instead
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
except IOError:
r = os.read(fd, 32)
if r:
logging.error('already started at pid %s' % common.to_str(r))
else:
logging.error('already started')
os.close(fd)
return -1
os.ftruncate(fd, 0)
os.write(fd, common.to_bytes(str(pid)))
return 0
def trylock_or_exit(self, timeout=10):
interval = 0.1
n = int(timeout / interval) + 1
flag = fcntl.LOCK_EX | fcntl.LOCK_NB
for ii in range(n):
fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT)
fcntl.fcntl(fd, fcntl.F_SETFD,
fcntl.fcntl(fd, fcntl.F_GETFD, 0)
| fcntl.FD_CLOEXEC)
try:
fcntl.lockf(fd, flag)
self.lockfp = os.fdopen(fd, 'w+r')
break
except IOError as e:
os.close(fd)
if e[0] == errno.EAGAIN:
time.sleep(interval)
else:
raise
else:
logger.info("Failure acquiring lock %s" % (self.lockfile, ))
sys.exit(1)
logger.info("OK acquired lock %s" % (self.lockfile))
def lock_file_nonblocking(fileobj):
# Use fcntl.flock instead of fcntl.lockf. lockf on pypy 1.7 seems
# to ignore existing locks.
try:
fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, ioe:
if ioe.errno not in (errno.EACCES, errno.EAGAIN):
raise
return False
return True
def lockfile(filename):
with open(filename, "wb") as opened:
fd = opened.fileno()
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as ioe:
if ioe.errno not in (errno.EACCES, errno.EAGAIN):
raise
yield False
else:
try:
yield True
finally:
fcntl.flock(fd, fcntl.LOCK_UN)
def acquire(self):
try:
self._lock = open(self.lock_file, "w")
flock(self._lock, LOCK_EX | LOCK_NB)
logging.debug("Acquired exclusive lock on file: %s" % self.lock_file)
return self._lock
except Exception:
logging.debug("Error acquiring lock on file: %s" % self.lock_file)
if self._lock:
self._lock.close()
raise OperationError("Could not acquire lock on file: %s!" % self.lock_file)
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)