def get_content_all_messages(self):
"""
Get all messages
"""
try:
buffers = list()
with open(self.file_path, 'r') as fd:
fcntl.flock(fd, fcntl.LOCK_SH)
for row_msg in fd.readlines():
try:
msg = self.parse_row_message(row_msg)
buffers.append(json.loads(msg.content))
except Exception:
pass
fcntl.flock(fd, fcntl.LOCK_UN)
return buffers
except Exception:
return
python类LOCK_SH的实例源码
def __init__(self, addr, conf, log, fd=None):
if fd is None:
try:
st = os.stat(addr)
except OSError as e:
if e.args[0] != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(addr)
else:
raise ValueError("%r is not a socket" % addr)
self.parent = os.getpid()
super(UnixSocket, self).__init__(addr, conf, log, fd=fd)
# each arbiter grabs a shared lock on the unix socket.
fcntl.lockf(self.sock, fcntl.LOCK_SH | fcntl.LOCK_NB)
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def latent_plan(init,goal,mode):
bits = np.concatenate((init,goal))
###### preprocessing ################################################################
## old code for caching...
lock = problem(network("lock"))
import fcntl
try:
with open(lock) as f:
print("lockfile found!")
fcntl.flock(f, fcntl.LOCK_SH)
except FileNotFoundError:
with open(lock,'wb') as f:
fcntl.flock(f, fcntl.LOCK_EX)
preprocess(bits)
###### do planning #############################################
sasp = problem(network("{}.sasp".format(action_type)))
plan_raw = problem(network("{}.sasp.plan".format(action_type)))
plan = problem(network("{}.{}.plan".format(action_type,mode)))
echodo(["planner-scripts/limit.sh","-v", "-o",options[mode], "--","fd-sas-clean", sasp])
assert os.path.exists(plan_raw)
echodo(["mv",plan_raw,plan])
out = echo_out(["lisp/parse-plan.bin",plan, *list(init.astype('str'))])
lines = out.splitlines()
return np.array([ [ int(s) for s in l.split() ] for l in lines ])
def __init__(self, fd, keys, excl=True, lock=False, mod=1048573, _ctx_cleanup=None):
self.fd = fd
self.locked = False
self.mode = fcntl.LOCK_EX if excl else fcntl.LOCK_SH
self._ctx_cleanup = _ctx_cleanup
# sort so locks are acquired in consistent order
# guarantees no inter-process deadlocks
locks = set(hash(key) % mod for key in keys)
self.locks = tuple(sorted(locks))
if lock:
self.lock()
def shared(self):
self._update(fcntl.LOCK_SH)
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open_file(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open_file(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if 'a' not in mode:
self.file.seek(0)
self.file.truncate(0)
else:
raise RuntimeError("invalid LockedFile(...,mode)")
def get_rpc_port_by_rank(self, rank, num_servers):
if self.mpirun_proc is None:
raise RuntimeError("Launch mpirun_proc before reading of rpc ports")
if self._rpc_ports is not None:
return self._rpc_ports[rank]
server_info_pattern = re.compile("^" + LINE_TOKEN +
":([\d]+):([\d]+):([\d]+):" +
LINE_TOKEN + "$")
self._tmpfile.seek(0)
while True:
fcntl.lockf(self._tmpfile, fcntl.LOCK_SH)
line_count = sum(1 for line in self._tmpfile if server_info_pattern.match(line))
self._tmpfile.seek(0)
fcntl.lockf(self._tmpfile, fcntl.LOCK_UN)
if line_count == num_servers:
break
else:
time.sleep(0.1)
server_infos = [tuple([int(server_info_pattern.match(line).group(1)),
int(server_info_pattern.match(line).group(3))])
for line in self._tmpfile]
server_infos = sorted(server_infos, key=lambda x: x[0])
self._rpc_ports = [row[1] for row in server_infos]
logger.debug("get_rpc_ports: ports (in MPI rank order): %s", self._rpc_ports)
self._tmpfile.close()
return self._rpc_ports[rank]
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def do_acquire_read_lock(self, wait):
filedescriptor = self._open(os.O_CREAT | os.O_RDONLY)
if not wait:
try:
fcntl.flock(filedescriptor, fcntl.LOCK_SH | fcntl.LOCK_NB)
return True
except IOError:
os.close(filedescriptor)
self._filedescriptor.remove()
return False
else:
fcntl.flock(filedescriptor, fcntl.LOCK_SH)
return True
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if not 'a' in mode:
self.file.seek(0)
self.file.truncate(0)
else:
raise RuntimeError("invalid LockedFile(...,mode)")
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def get_length(self,):
""" Read the whole queue file and return the number of lines. """
try:
with open(self.file_path, 'r') as fd:
fcntl.flock(fd, fcntl.LOCK_SH)
n = 0
for _ in fd.readlines():
n += 1
fcntl.flock(fd, fcntl.LOCK_UN)
return n
except Exception:
return 0
def get_size(self):
""" Return queue file size. """
try:
with open(self.file_path, 'r') as fd:
fcntl.flock(fd, fcntl.LOCK_SH)
size = os.fstat(fd.fileno()).st_size
fcntl.flock(fd, fcntl.LOCK_UN)
return size
except Exception:
return 0
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if not 'a' in mode:
self.file.seek(0)
self.file.truncate()
else:
raise RuntimeError("invalid LockedFile(...,mode)")
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if not 'a' in mode:
self.file.seek(0)
self.file.truncate(0)
else:
raise RuntimeError("invalid LockedFile(...,mode)")
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if not 'a' in mode:
self.file.seek(0)
self.file.truncate(0)
else:
raise RuntimeError("invalid LockedFile(...,mode)")
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def do_acquire_read_lock(self, wait):
filedescriptor = self._open(os.O_CREAT | os.O_RDONLY)
if not wait:
try:
fcntl.flock(filedescriptor, fcntl.LOCK_SH | fcntl.LOCK_NB)
return True
except IOError:
os.close(filedescriptor)
self._filedescriptor.remove()
return False
else:
fcntl.flock(filedescriptor, fcntl.LOCK_SH)
return True
def _lock_buildroot(self, exclusive):
lock_type = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
if not self._lock_file:
self._open_lock()
try:
fcntl.lockf(self._lock_file.fileno(), lock_type | fcntl.LOCK_NB)
except IOError:
raise BuildRootLocked("Build root is locked by another process.")
def lock(self, exclusive, block=False):
lock_type = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
try:
fcntl.lockf(self.lock_file.fileno(),
lock_type | (0 if block else fcntl.LOCK_NB))
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
raise LvmLocked("LVM is locked")
raise
def _rootCacheLock(self, shared=1):
lockType = fcntl.LOCK_EX
if shared:
lockType = fcntl.LOCK_SH
try:
fcntl.lockf(self.rootCacheLock.fileno(), lockType | fcntl.LOCK_NB)
except IOError:
self.state.start("Waiting for rootcache lock")
fcntl.lockf(self.rootCacheLock.fileno(), lockType)
self.state.finish("Waiting for rootcache lock")
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def do_acquire_read_lock(self, wait):
filedescriptor = self._open(os.O_CREAT | os.O_RDONLY)
if not wait:
try:
fcntl.flock(filedescriptor, fcntl.LOCK_SH | fcntl.LOCK_NB)
return True
except IOError:
os.close(filedescriptor)
self._filedescriptor.remove()
return False
else:
fcntl.flock(filedescriptor, fcntl.LOCK_SH)
return True
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open_file(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open_file(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if 'a' not in mode:
self.file.seek(0)
self.file.truncate(0)
else:
raise RuntimeError("invalid LockedFile(...,mode)")