python类LOCK_EX的实例源码

pidfile.py 文件源码 项目:pscheduler 作者: perfsonar 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __enter__(self):
        if self.path is None:
            return self.pidfile

        self.pidfile = open(self.path, "a+")
        try:
            fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
            self.pidfile = None
            raise SystemExit("Already running according to " + self.path)
        self.pidfile.seek(0)
        self.pidfile.truncate()
        self.pidfile.write(str(os.getpid()))
        self.pidfile.flush()
        self.pidfile.seek(0)
        return self.pidfile
recipe-578476.py 文件源码 项目:code 作者: ActiveState 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def do_magic(self):
        if OS_WIN:
            try:
                if os.path.exists(LOCK_PATH):
                    os.unlink(LOCK_PATH)
                self.fh = os.open(LOCK_PATH, os.O_CREAT | os.O_EXCL | os.O_RDWR)
            except EnvironmentError as err:
                if err.errno == 13:
                    self.is_running = True
                else:
                    raise
        else:
            try:
                self.fh = open(LOCK_PATH, 'w')
                fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except EnvironmentError as err:
                if self.fh is not None:
                    self.is_running = True
                else:
                    raise
cache.py 文件源码 项目:jenkins-epo 作者: peopledoc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def open(self):
        if self.opened:
            return

        self.lock = open(SETTINGS.CACHE_PATH + '.lock', 'ab')
        try:
            fcntl.flock(self.lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
            mode = 'c'
        except IOError:
            logger.warn("Cache locked, using read-only")
            mode = 'r'
            self.lock.close()
            self.lock = None

        try:
            self.storage = shelve.open(SETTINGS.CACHE_PATH, mode)
        except Exception as e:
            if mode != 'c':
                raise
            logger.warn("Dropping corrupted cache on %s", e)
            self.lock.truncate(0)
            self.storage = shelve.open(SETTINGS.CACHE_PATH, mode)
        self.opened = True
lock.py 文件源码 项目:Citation-Fetcher 作者: andrewhead 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def lock_method(lock_filename):
    ''' Use an OS lock such that a method can only be called once at a time. '''

    def decorator(func):

        @functools.wraps(func)
        def lock_and_run_method(*args, **kwargs):

            # Only run this program if it's not already running
            # Snippet based on
            # http://linux.byexamples.com/archives/494/how-can-i-avoid-running-a-python-script-multiple-times-implement-file-locking/
            fp = open(lock_filename, 'w')
            try:
                fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError:
                raise SystemExit(
                    "This program is already running.  Please stop the current process or " +
                    "remove " + lock_filename + " to run this script."
                )

            return func(*args, **kwargs)

        return lock_and_run_method

    return decorator
storeProxy.py 文件源码 项目:searchForAll 作者: MemoryAndDream 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def store(proxys):
    pidfile = open(proxyFilePath, "a")
    for i in range(10):
        try:
            fcntl.flock(pidfile, fcntl.LOCK_EX | fcntl.LOCK_NB)  # LOCK_EX ???:????????????????????????
            # LOCK_NB ????: ?????????????????????????????????????
            if type(proxys) == type([]):
                for proxy in proxys:
                    pidfile.write(proxy + '\n')
            else:
                pidfile.write(proxys + '\n')
            pidfile.close()
            break
        except:
            # print "another instance is running..."
            time.sleep(3)
queue.py 文件源码 项目:temboard-agent 作者: dalibo 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def push(self, message):
        """ Push a new message. """
        if self.overflow_mode == 'drop':
            if self.max_length > -1 and self.get_length() >= self.max_length:
                return
            if self.max_size > -1 and self.get_size() >= self.max_size:
                return

        with open(self.file_path, 'a') as fd:
            # Let's hold an exclusive lock.
            fcntl.flock(fd, fcntl.LOCK_EX)
            fd.write(message.serialize())
            fcntl.flock(fd, fcntl.LOCK_UN)
            fd.close()

        if self.overflow_mode == 'slide':
            if self.max_size == -1 and self.max_length > -1:
                while self.get_length() > self.max_length:
                    self.shift()
            elif self.max_size > -1 and self.max_length == -1:
                while self.get_size() > self.max_size:
                    self.shift()
engine.py 文件源码 项目:cobra 作者: wufeifei 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def init_list(self, data=None):
        """
        Initialize asid_list file.
        :param data: list or a string
        :return:
        """
        file_path = os.path.join(running_path, '{sid}_list'.format(sid=self.sid))
        if not os.path.exists(file_path):
            if isinstance(data, list):
                with open(file_path, 'w') as f:
                    fcntl.flock(f, fcntl.LOCK_EX)
                    f.write(json.dumps({
                        'sids': {},
                        'total_target_num': len(data),
                    }))
            else:
                with open(file_path, 'w') as f:
                    fcntl.flock(f, fcntl.LOCK_EX)
                    f.write(json.dumps({
                        'sids': {},
                        'total_target_num': 1,
                    }))
engine.py 文件源码 项目:cobra 作者: wufeifei 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def list(self, data=None):
        """
        Update asid_list file.
        :param data: tuple (s_sid, target)
        :return:
        """
        file_path = os.path.join(running_path, '{sid}_list'.format(sid=self.sid))
        if data is None:
            with open(file_path, 'r') as f:
                fcntl.flock(f, fcntl.LOCK_EX)
                result = f.readline()
                return json.loads(result)
        else:
            with open(file_path, 'r+') as f:  # w+ causes a file reading bug
                fcntl.flock(f, fcntl.LOCK_EX)
                result = f.read()
                if result == '':
                    result = {'sids': {}}
                else:
                    result = json.loads(result)
                result['sids'][data[0]] = data[1]
                f.seek(0)
                f.truncate()
                f.write(json.dumps(result))
lock.py 文件源码 项目:landscape-client 作者: CanonicalLtd 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def lock_path(path, timeout=0):
    fd = os.open(path, os.O_CREAT)
    flags = fcntl.fcntl(fd, fcntl.F_GETFD, 0)
    flags |= fcntl.FD_CLOEXEC
    fcntl.fcntl(fd, fcntl.F_SETFD, flags)

    started = time.time()

    while True:
        try:
            fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
            if started < time.time() - timeout:
                raise LockError("Couldn't obtain lock")
        else:
            break
        time.sleep(0.1)

    def unlock_path():
        fcntl.flock(fd, fcntl.LOCK_UN)
        os.close(fd)

    return unlock_path
minc_tools.py 文件源码 项目:nist_mni_pipelines 作者: vfonov 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cache(self,name,suffix=''):
        """Allocate a name in cache, if cache was setup
        also lock the file , so that another process have to wait before using the same file name

        Important: call unlock() on result
        """
        #TODO: something more clever here?
        fname=''
        if self.work_dir is not None:
            fname=self.cache_dir+os.sep+name+suffix
            lock_name=fname+'.lock'
            f=self._locks[lock_name]=open(lock_name, 'a')
            fcntl.lockf(f.fileno(), fcntl.LOCK_EX )
        else:
            fname=self.tmp(name+suffix)

        return fname
watch.py 文件源码 项目:OperatingSystemLab 作者: wzc1995 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def edit_hosts():
    f = os.popen('/usr/local/bin/etcdctl ls --sort --recursive /hosts')
    hosts_str = f.read()


    hosts_arr = hosts_str.strip('\n').split('\n')
    hosts_fd = open('/tmp/hosts', 'w')

    fcntl.flock(hosts_fd.fileno(), fcntl.LOCK_EX)

    hosts_fd.write('127.0.0.1 localhost cluster' + '\n')
    i = 0
    for host_ip in hosts_arr:
        host_ip = host_ip[host_ip.rfind('/') + 1:]
        if host_ip[0] == '0':
            hosts_fd.write(host_ip[1:] + ' cluster-' + str(i) + '\n')
        else:
            hosts_fd.write(host_ip + ' cluster-' + str(i) + '\n')
        i += 1

    hosts_fd.flush()
    os.system('/bin/cp /tmp/hosts /etc/hosts')
    hosts_fd.close()
utils.py 文件源码 项目:django-gateone 作者: jimmy201602 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def write_pid(path):
    """Writes our PID to *path*."""
    try:
        pid = os.getpid()
        with io.open(path, mode='w', encoding='utf-8') as pidfile:
            # Get a non-blocking exclusive lock
            fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
            pidfile.seek(0)
            pidfile.truncate(0)
            pidfile.write(unicode(pid))
    except:
        logging.error(_("Could not write PID file: %s") % path)
        raise # This raises the original exception
    finally:
        try:
            pidfile.close()
        except:
            pass
singleton.py 文件源码 项目:Linalfred 作者: PeterHo 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def isSingleInstance(flavor_id=""):
    global fp
    basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace(
        "/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
    lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename)

    if sys.platform == 'win32':
        try:
            if os.path.exists(lockfile):
                os.unlink(lockfile)
            fp = os.open(
                lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
        except OSError:
            return False

    else:  # non Windows
        fp = open(lockfile, 'w')
        try:
            fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
            return False

    return True
I2C_mutex.py 文件源码 项目:GoPiGo3 作者: DexterInd 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def acquire(self):
        if self.mutex_debug:
            print("I2C mutex acquire")

        acquired = False
        while not acquired:
            try:
                self.DexterLockI2C_handle = open('/run/lock/DexterLockI2C', 'w')
                # lock
                fcntl.lockf(self.DexterLockI2C_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
                acquired = True
            except IOError: # already locked by a different process
                time.sleep(0.001)
            except Exception as e:
                print(e)

        if self.mutex_debug:
            print("I2C mutex acquired {}".format(time.time()))
package.py 文件源码 项目:builds 作者: open-power-host-os 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def lock(self):
        """
        Locks the package to avoid concurrent operations on its shared
        resources.
        Currently, the only resource shared among scripts executed from
        different directories is the repository.
        """
        if not self.locking_enabled:
            LOG.debug("This package has no shared resources to lock")
            return

        LOG.debug("Checking for lock on file {}.".format(self.lock_file_path))
        self.lock_file = open(self.lock_file_path, "w")
        try:
            fcntl.lockf(self.lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError as exc:
            RESOURCE_UNAVAILABLE_ERROR = 11
            if exc.errno == RESOURCE_UNAVAILABLE_ERROR:
                LOG.info("Waiting for other process to finish operations "
                         "on {}.".format(self.name))
            else:
                raise
        fcntl.lockf(self.lock_file, fcntl.LOCK_EX)
Quads.py 文件源码 项目:quads 作者: redhat-performance 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def write_data(self):
        if self.config_newer_than_data():
            self.read_data()
            return False
        else:
            try:
                self.data = {"clouds":self.quads.clouds.data, "hosts":self.quads.hosts.data, "history":self.quads.history.data, "cloud_history":self.quads.cloud_history.data}
                with open(self.config, 'w') as yaml_file:
                    fcntl.flock(yaml_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
                    yaml_file.write(yaml.dump(self.data, default_flow_style=False))
                    fcntl.flock(yaml_file, fcntl.LOCK_UN)
                self.read_data()
                return True
            except Exception, ex:
                self.logger.error("There was a problem with your file %s" % ex)
                return False
config.py 文件源码 项目:webmon 作者: KarolBedkowski 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _try_lock():
    """Check and create lock file - prevent running application twice.

    Return lock file handler.
    """
    lock_file_path = _find_config_file("app.lock", False)
    _check_dir_for_file(lock_file_path)
    try:
        if fcntl is not None:
            lock_file = open(lock_file_path, "w")
            fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
        else:
            if os.path.isfile(lock_file_path):
                _LOG.error("another instance detected (lock file exists) "
                           "- exiting")
                return None
            lock_file = open(lock_file_path, "w")
        return lock_file
    except IOError as err:
        import errno
        if err.errno == errno.EAGAIN:
            _LOG.error("another instance detected - exiting")
        else:
            _LOG.exception("locking failed: %s", err)
    return None
common.py 文件源码 项目:ops_agent 作者: sjqzhang 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def getLock(self,lock_path,force=False,timeout=30,filename="easyops.lock"):
        import fcntl
        lockFile = os.path.join(lock_path,filename)
        #fp = open(lockFile,'w')
        try:
            if os.path.isfile(lockFile):
                os.chmod(lockFile, 0o777)
        except:
            pass
        self.fp[lockFile] = open(lockFile,'w')
        count = 0
        while True:
            if count > timeout:
                return False
            count += 1
            try:
                fcntl.flock(self.fp[lockFile],fcntl.LOCK_EX|fcntl.LOCK_NB)
            except IOError:
                if force == True:
                    return True
                gevent.sleep(1)
            else:
                return True
_logs.py 文件源码 项目:porcupine 作者: Akuli 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _lock(fileno):
    """Try to lock a file. Return True on success."""
    # closing the file unlocks it, so we don't need to unlock here
    if platform.system() == 'Windows':
        try:
            msvcrt.locking(fileno, msvcrt.LK_NBLCK, 10)
            return True
        except PermissionError:
            return False
    else:
        try:
            fcntl.lockf(fileno, fcntl.LOCK_EX | fcntl.LOCK_NB)
            return True
        # the docs recommend catching both of these
        except (BlockingIOError, PermissionError):
            return False
load.py 文件源码 项目:raptiformica 作者: vdloo 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def config_cache_lock():
    """
    Obtain the config cache lock, perform the code
    in the context and then let the lock go.
    :yield None
    :return None:
    """
    with open(conf().CONFIG_CACHE_LOCK, 'w+') as lock:
        try:
            log.debug(
                "Getting config cache lock. "
                "If this blocks forever, try deleting file "
                "{} and restart the process.".format(conf().CONFIG_CACHE_LOCK)
            )
            flock(lock, LOCK_EX)  # Blocks until lock becomes available
            yield
        finally:
            log.debug("Releasing the config cache lock")
            flock(lock, LOCK_UN)
onion.py 文件源码 项目:ooniprobe-debian 作者: TheTorProject 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def is_tor_data_dir_usable(tor_data_dir):
    """
    Checks if the Tor data dir specified is usable. This means that
     it is not being locked and we have permissions to write to it.
    """
    if not os.path.exists(tor_data_dir):
        return True

    try:
        fcntl.flock(open(os.path.join(tor_data_dir, 'lock'), 'w'),
                    fcntl.LOCK_EX | fcntl.LOCK_NB)
        return True
    except (IOError, OSError) as err:
        if err.errno == errno.EACCES:
            # Permission error
            return False
        elif err.errno == errno.EAGAIN:
            # File locked
            return False
metadata.py 文件源码 项目:rca-evaluation 作者: sieve-microservices 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def update(path):
    """
    allow concurrent update of metadata
    """
    p = os.path.join(path, "metadata.json")
    # we have to open writeable to get a lock
    with open(p, "a") as f:
        fcntl.lockf(f, fcntl.LOCK_EX)
        data = load(path)
        yield(data)
        save(path, data)
        fcntl.lockf(f, fcntl.LOCK_UN)
ceph_rgw.py 文件源码 项目:DeepSea 作者: SUSE 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def main():
    exit_status = 1
    try:
        args = parse_args()
        # Make sure the exporter is only running once.
        lock_file = '/var/lock/{}.lock'.format(os.path.basename(sys.argv[0]))
        lock_fd = os.open(lock_file, os.O_CREAT)
        lock_success = False
        try:
            fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
            lock_success = True
        except IOError:
            msg = 'Failed to export metrics, another instance is running.'
            syslog.syslog(syslog.LOG_INFO, msg)
            sys.stderr.write(msg + '\n')
        if lock_success:
            # Create a new registry, otherwise unwanted default collectors are
            # added automatically.
            registry = prometheus_client.CollectorRegistry()
            # Register our own collector and write metrics to STDOUT.
            registry.register(CephRgwCollector(**vars(args)))
            sys.stdout.write(prometheus_client.generate_latest(registry))
            sys.stdout.flush()
            # Unlock the lock file.
            fcntl.flock(lock_fd, fcntl.LOCK_UN)
            exit_status = 0
    except Exception as e:
        syslog.syslog(syslog.LOG_ERR, str(e))
    # Cleanup
    os.close(lock_fd)
    if lock_success:
        try:
            os.unlink(lock_file)
        except:
            pass
    sys.exit(exit_status)
recipe-577404.py 文件源码 项目:code 作者: ActiveState 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _lockonly(file):
    _msg('got file #', file.fileno())
    try:
        flock(file, LOCK_EX | LOCK_NB)
    except IOError:
        _msg('failed to lock')
        return False
    else:
        _msg('locked successfully')
        return True
recipe-576891.py 文件源码 项目:code 作者: ActiveState 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def lock (self):
        '''
        Creates and holds on to the lock file with exclusive access.
        Returns True if lock successful, False if it is not, and raises
        an exception upon operating system errors encountered creating the
        lock file.
        '''
        try:
            #
            # Create or else open and trucate lock file, in read-write mode.
            #
            # A crashed app might not delete the lock file, so the
            # os.O_CREAT | os.O_EXCL combination that guarantees
            # atomic create isn't useful here.  That is, we don't want to
            # fail locking just because the file exists.
            #
            # Could use os.O_EXLOCK, but that doesn't exist yet in my Python
            #
            self.lockfd = os.open (self.lockfile,
                                   os.O_TRUNC | os.O_CREAT | os.O_RDWR)

            # Acquire exclusive lock on the file, but don't block waiting for it
            fcntl.flock (self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)

            # Writing to file is pointless, nobody can see it
            os.write (self.lockfd, "My Lockfile")

            return True
        except (OSError, IOError), e:
            # Lock cannot be acquired is okay, everything else reraise exception
            if e.errno in (errno.EACCES, errno.EAGAIN):
                return False
            else:
                raise
recipe-577911.py 文件源码 项目:code 作者: ActiveState 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __enter__(self):
        self.pidfile = open(self.path, "a+")
        try:
            fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
            raise SystemExit("Already running according to " + self.path)
        self.pidfile.seek(0)
        self.pidfile.truncate()
        self.pidfile.write(str(os.getpid()))
        self.pidfile.flush()
        self.pidfile.seek(0)
        return self.pidfile
recipe-578998.py 文件源码 项目:code 作者: ActiveState 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, mutex_name):
            check_valid_mutex_name(mutex_name)
            filename = os.path.join(tempfile.gettempdir(), mutex_name)
            try:
                handle = open(filename, 'w')
                fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except:
                self._release_mutex = NULL
                self._acquired = False
                try:
                    handle.close()
                except:
                    pass
            else:
                def release_mutex(*args, **kwargs):
                    # Note: can't use self here!
                    if not getattr(release_mutex, 'called', False):
                        release_mutex.called = True
                        try:
                            fcntl.flock(handle, fcntl.LOCK_UN)
                        except:
                            traceback.print_exc()
                        try:
                            handle.close()
                        except:
                            traceback.print_exc()
                        try:
                            # Removing is pretty much optional (but let's do it to keep the
                            # filesystem cleaner).
                            os.unlink(filename)
                        except:
                            pass

                # Don't use __del__: this approach doesn't have as many pitfalls.
                self._ref = weakref.ref(self, release_mutex)

                self._release_mutex = release_mutex
                self._acquired = True
daemon.py 文件源码 项目:shadowsocksR-b 作者: hao35954514 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def write_pid_file(pid_file, pid):
    import fcntl
    import stat

    try:
        fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
                     stat.S_IRUSR | stat.S_IWUSR)
    except OSError as e:
        shell.print_exception(e)
        return -1
    flags = fcntl.fcntl(fd, fcntl.F_GETFD)
    assert flags != -1
    flags |= fcntl.FD_CLOEXEC
    r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
    assert r != -1
    # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
    # via fcntl.fcntl. So use lockf instead
    try:
        fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
    except IOError:
        r = os.read(fd, 32)
        if r:
            logging.error('already started at pid %s' % common.to_str(r))
        else:
            logging.error('already started')
        os.close(fd)
        return -1
    os.ftruncate(fd, 0)
    os.write(fd, common.to_bytes(str(pid)))
    return 0
daemonize.py 文件源码 项目:pykit 作者: baishancloud 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def trylock_or_exit(self, timeout=10):

        interval = 0.1
        n = int(timeout / interval) + 1
        flag = fcntl.LOCK_EX | fcntl.LOCK_NB

        for ii in range(n):

            fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT)

            fcntl.fcntl(fd, fcntl.F_SETFD,
                        fcntl.fcntl(fd, fcntl.F_GETFD, 0)
                        | fcntl.FD_CLOEXEC)

            try:
                fcntl.lockf(fd, flag)

                self.lockfp = os.fdopen(fd, 'w+r')
                break

            except IOError as e:
                os.close(fd)
                if e[0] == errno.EAGAIN:
                    time.sleep(interval)
                else:
                    raise

        else:
            logger.info("Failure acquiring lock %s" % (self.lockfile, ))
            sys.exit(1)

        logger.info("OK acquired lock %s" % (self.lockfile))
services.py 文件源码 项目:abusehelper 作者: Exploit-install 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def lock_file_nonblocking(fileobj):
    # Use fcntl.flock instead of fcntl.lockf. lockf on pypy 1.7 seems
    # to ignore existing locks.

    try:
        fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError, ioe:
        if ioe.errno not in (errno.EACCES, errno.EAGAIN):
            raise
        return False
    return True


问题


面经


文章

微信
公众号

扫码关注公众号