def start():
if os.path.exists(settings.pid_file):
with open(settings.pid_file) as f:
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
fcntl.flock(f, fcntl.LOCK_UN)
except IOError:
sys.stdout.write("zmapd is already started\n")
return
run_daemon_process(pid_file=settings.pid_file, start_msg="Start zmapd(%s)\n")
pid_file = open(settings.pid_file)
fcntl.flock(pid_file, fcntl.LOCK_SH)
while True:
time.sleep(1)
running_jobs = Job.objects.filter(status=Job.STATUS_RUNNING)
total_bandwidth = 0
for job in running_jobs:
total_bandwidth += job.bandwidth
if total_bandwidth >= settings.max_bandwidth:
logger.debug(u"Achieve maximum bandwidth:%sM", settings.max_bandwidth)
continue
jobs = [x for x in Job.objects.filter(status=Job.STATUS_PENDING).order_by('-priority')]
db.close_old_connections()
for j in jobs:
p = multiprocessing.Process(target=execute_job, args=(j.id,))
p.start()
python类LOCK_SH的实例源码
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if not 'a' in mode:
self.file.seek(0)
self.file.truncate(0)
else:
raise RuntimeError("invalid LockedFile(...,mode)")
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if not 'a' in mode:
self.file.seek(0)
self.file.truncate(0)
else:
raise RuntimeError("invalid LockedFile(...,mode)")
def lock(self, cmd, owner, **kw):
#return -EROFS
# The code here is much rather just a demonstration of the locking
# API than something which actually was seen to be useful.
# Advisory file locking is pretty messy in Unix, and the Python
# interface to this doesn't make it better.
# We can't do fcntl(2)/F_GETLK from Python in a platfrom independent
# way. The following implementation *might* work under Linux.
#
# if cmd == fcntl.F_GETLK:
# import struct
#
# lockdata = struct.pack('hhQQi', kw['l_type'], os.SEEK_SET,
# kw['l_start'], kw['l_len'], kw['l_pid'])
# ld2 = fcntl.fcntl(self.fd, fcntl.F_GETLK, lockdata)
# flockfields = ('l_type', 'l_whence', 'l_start', 'l_len', 'l_pid')
# uld2 = struct.unpack('hhQQi', ld2)
# res = {}
# for i in xrange(len(uld2)):
# res[flockfields[i]] = uld2[i]
#
# return fuse.Flock(**res)
# Convert fcntl-ish lock parameters to Python's weird
# lockf(3)/flock(2) medley locking API...
op = { fcntl.F_UNLCK : fcntl.LOCK_UN,
fcntl.F_RDLCK : fcntl.LOCK_SH,
fcntl.F_WRLCK : fcntl.LOCK_EX }[kw['l_type']]
if cmd == fcntl.F_GETLK:
return -EOPNOTSUPP
elif cmd == fcntl.F_SETLK:
if op != fcntl.LOCK_UN:
op |= fcntl.LOCK_NB
elif cmd == fcntl.F_SETLKW:
pass
else:
return -EINVAL
fcntl.lockf(self.fd, op, kw['l_start'], kw['l_len'])
def flopen(*args, **kwargs):
'''
Shortcut for fopen with lock and context manager
'''
with fopen(*args, **kwargs) as fhandle:
try:
if is_fcntl_available(check_sunos=True):
fcntl.flock(fhandle.fileno(), fcntl.LOCK_SH)
yield fhandle
finally:
if is_fcntl_available(check_sunos=True):
fcntl.flock(fhandle.fileno(), fcntl.LOCK_UN)
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _acquire(self, exclusive=True, blocking=True):
flag = fcntl.LOCK_SH
if exclusive:
flag = fcntl.LOCK_EX
if not blocking:
flag |= fcntl.LOCK_NB
try:
logger.debug("Acquiring lock on {}".format(self.filename))
fcntl.flock(self.handle, flag)
except IOError as e:
if e.errno == errno.EAGAIN:
raise AlreadyLocked()
self._locked = True
return self
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def __lock(self, *, write: bool=True):
"""
A context manager to lock the storage for reading or writing.
If no processes hold the lock for writing, then several
processes can simultaneously lock the storage for reading.
If a process holds the lock for writing, then no other process
can take the lock (either for reading or writing).
If the lock cannot be obtained, the process is paused until
the situation allows to take it.
Args:
write -- (bool) if False, the storage is locked for
reading. If True, the storage is locked for writing.
"""
assert not self.__locked, 'Nested lock'
if self.__lockable:
flags = fcntl.LOCK_SH if not write else fcntl.LOCK_EX
fcntl.lockf(self.__file, flags, 1)
self.__locked = True
try:
yield
finally:
fcntl.lockf(self.__file, fcntl.LOCK_UN, 1)
self.__locked = False
else:
yield
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if not 'a' in mode:
self.file.seek(0)
self.file.truncate()
else:
raise RuntimeError("invalid LockedFile(...,mode)")
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def get_last_n_messages(self, n):
"""
Generator intended to return the last n messages from the queue.
As far as the last records are located at the end of the file, we read
the file backwards until the number of desired lines is reached or the
whole file has been read. -1 means no limit.
"""
buf_size = 8192
try:
with open(self.file_path, 'r') as fd:
fcntl.flock(fd, fcntl.LOCK_SH)
segment = None
offset = 0
n_line = 0
# Move to the EOF
fd.seek(0, os.SEEK_END)
# Get file size using tell()
file_size = total_size = remaining_size = fd.tell()
while (remaining_size > 0):
offset = min(total_size, offset + buf_size)
# Move pointer to the next position.
fd.seek(file_size - offset)
# Read a chunk into the buffer.
buffer = fd.read(min(remaining_size, buf_size))
remaining_size -= buf_size
# Split buffer content by EOL.
lines = buffer.split('\n')
if segment is not None:
# Case when we need to concatenate the first uncomplete
# line of the last loop iter. with the last one of this
# current iteration.
if buffer[-1] is not '\n':
lines[-1] += segment
else:
n_line += 1
if (n > -1 and n_line > n):
fcntl.flock(fd, fcntl.LOCK_UN)
break
yield json.loads(
self.parse_row_message(segment).content)
segment = lines[0]
# Read each line.
for idx in range(len(lines) - 1, 0, -1):
if len(lines[idx]):
n_line += 1
if (n > -1 and n_line > n):
fcntl.flock(fd, fcntl.LOCK_UN)
break
yield json.loads(
self.parse_row_message(lines[idx]).content)
if segment is not None:
yield json.loads(self.parse_row_message(segment).content)
fcntl.flock(fd, fcntl.LOCK_UN)
except Exception:
return
def lockfile(name, shared=False, retry=True, block=False):
"""
Use the specified file as a lock file, return when the lock has
been acquired. Returns a variable to pass to unlockfile().
Parameters:
retry: True to re-try locking if it fails, False otherwise
block: True to block until the lock succeeds, False otherwise
The retry and block parameters are kind of equivalent unless you
consider the possibility of sending a signal to the process to break
out - at which point you want block=True rather than retry=True.
"""
dirname = os.path.dirname(name)
mkdirhier(dirname)
if not os.access(dirname, os.W_OK):
logger.error("Unable to acquire lock '%s', directory is not writable",
name)
sys.exit(1)
op = fcntl.LOCK_EX
if shared:
op = fcntl.LOCK_SH
if not retry and not block:
op = op | fcntl.LOCK_NB
while True:
# If we leave the lockfiles lying around there is no problem
# but we should clean up after ourselves. This gives potential
# for races though. To work around this, when we acquire the lock
# we check the file we locked was still the lock file on disk.
# by comparing inode numbers. If they don't match or the lockfile
# no longer exists, we start again.
# This implementation is unfair since the last person to request the
# lock is the most likely to win it.
try:
lf = open(name, 'a+')
fileno = lf.fileno()
fcntl.flock(fileno, op)
statinfo = os.fstat(fileno)
if os.path.exists(lf.name):
statinfo2 = os.stat(lf.name)
if statinfo.st_ino == statinfo2.st_ino:
return lf
lf.close()
except Exception:
try:
lf.close()
except Exception:
pass
pass
if not retry:
return None
def lockfile(name, shared=False, retry=True):
"""
Use the file fn as a lock file, return when the lock has been acquired.
Returns a variable to pass to unlockfile().
"""
config.logger.debug("take lockfile %s", name)
dirname = os.path.dirname(name)
mkdirhier(dirname)
if not os.access(dirname, os.W_OK):
logger.error("Unable to acquire lock '%s', directory is not writable",
name)
sys.exit(1)
operation = fcntl.LOCK_EX
if shared:
operation = fcntl.LOCK_SH
if not retry:
operation = operation | fcntl.LOCK_NB
while True:
# If we leave the lockfiles lying around there is no problem
# but we should clean up after ourselves. This gives potential
# for races though. To work around this, when we acquire the lock
# we check the file we locked was still the lock file on disk.
# by comparing inode numbers. If they don't match or the lockfile
# no longer exists, we start again.
# This implementation is unfair since the last person to request the
# lock is the most likely to win it.
# pylint: disable=broad-except
# we disable the broad-except because we want to actually catch all possible exceptions
try:
lock_file = open(name, 'a+')
fileno = lock_file.fileno()
fcntl.flock(fileno, operation)
statinfo = os.fstat(fileno)
if os.path.exists(lock_file.name):
statinfo2 = os.stat(lock_file.name)
if statinfo.st_ino == statinfo2.st_ino:
return lock_file
lock_file.close()
except Exception as exc:
try:
lock_file.close()
except Exception as exc2:
config.logger.error("Failed to close the lockfile: %s", exc2)
config.logger.error("Failed to acquire the lockfile: %s", exc)
if not retry:
return None