python类fatal()的实例源码

Main.py 文件源码 项目:mongodb_consistent_backup 作者: Percona-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_lock(self):
        # noinspection PyBroadException
        try:
            if not self.config.lock_file:
                self.config.lock_file = '/tmp/%s.lock' % self.program_name
            self.lock = Lock(self.config.lock_file)
        except Exception:
            logging.fatal("Could not acquire lock: '%s'! Is another %s process running? Exiting" % (self.config.lock_file, self.program_name))
            self.logger.compress(True)
            sys.exit(1)
Main.py 文件源码 项目:mongodb_consistent_backup 作者: Percona-Lab 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def exception(self, error_message, error):
        self.last_error_msg = error_message
        if isinstance(error, NotifyError):
            logging.error(error_message)
        else:
            if isinstance(error, OperationError):
                logging.fatal(error_message)
            else:
                logging.exception(error_message)
            return self.cleanup_and_exit(None, None)
Replset.py 文件源码 项目:mongodb_consistent_backup 作者: Percona-Lab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, config, db):
        self.config         = config
        self.db             = db
        self.read_pref_tags = self.config.replication.read_pref_tags
        self.max_lag_secs   = self.config.replication.max_lag_secs
        self.min_priority   = self.config.replication.min_priority
        self.max_priority   = self.config.replication.max_priority
        self.hidden_only    = self.config.replication.hidden_only

        self.state_primary   = 1
        self.state_secondary = 2
        self.state_arbiter   = 7
        self.hidden_weight   = 0.20
        self.pri0_weight     = 0.10

        self.replset      = True
        self.rs_config    = None
        self.rs_status    = None
        self.primary      = None
        self.secondary    = None
        self.mongo_config = None

        self.replset_summary = {}

        # Get a DB connection
        try:
            if isinstance(self.db, DB):
                self.connection = self.db.connection()
            else:
                raise Error("'db' field is not an instance of class: 'DB'!")
        except Exception, e:
            logging.fatal("Could not get DB connection! Error: %s" % e)
            raise OperationError(e)
DB.py 文件源码 项目:mongodb_consistent_backup 作者: Percona-Lab 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def auth_if_required(self):
        if self.username is not None and self.password is not None:
            try:
                logging.debug("Authenticating connection with username: %s" % self.username)
                self._conn[self.authdb].authenticate(self.username, self.password)
            except OperationFailure, e:
                logging.fatal("Unable to authenticate with host %s: %s" % (self.uri, e))
                raise DBAuthenticationError(e)
        else:
            pass
TarThread.py 文件源码 项目:mongodb_consistent_backup 作者: Percona-Lab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def run(self):
        if os.path.isdir(self.backup_dir):
            if not os.path.isfile(self.output_file):
                try:
                    backup_base_dir  = os.path.dirname(self.backup_dir)
                    backup_base_name = os.path.basename(self.backup_dir)

                    log_msg   = "Archiving directory: %s" % self.backup_dir
                    cmd_flags = ["-C", backup_base_dir, "-cf", self.output_file, "--remove-files", backup_base_name]

                    if self.do_gzip():
                        log_msg   = "Archiving and compressing directory: %s" % self.backup_dir
                        cmd_flags = ["-C", backup_base_dir, "-czf", self.output_file, "--remove-files", backup_base_name]

                    logging.info(log_msg)
                    self.running  = True
                    self._command = LocalCommand(self.binary, cmd_flags, self.verbose)
                    self.exit_code = self._command.run()
                except Exception, e:
                    logging.fatal("Failed archiving file: %s! Error: %s" % (self.output_file, e))
                finally:
                    self.running   = False
                    self.stopped   = True
                    self.completed = True
            else:
                logging.fatal("Output file: %s already exists!" % self.output_file)
            return self.backup_dir
Tar.py 文件源码 项目:mongodb_consistent_backup 作者: Percona-Lab 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def run(self):
        try:
            thread_count = self.threads()
            self._pool   = Pool(processes=thread_count)
            logging.info("Archiving backup directories with pool of %i thread(s)" % thread_count)
        except Exception, e:
            logging.fatal("Could not start pool! Error: %s" % e)
            raise Error(e)

        if os.path.isdir(self.backup_dir):
            try:
                self.running = True
                for backup_dir in os.listdir(self.backup_dir):
                    subdir_name = os.path.join(self.backup_dir, backup_dir)
                    if not os.path.isdir(os.path.join(subdir_name, "dump")):
                        continue
                    output_file = "%s.tar" % subdir_name
                    if self.do_gzip():
                        output_file  = "%s.tgz" % subdir_name
                    self._pool.apply_async(TarThread(subdir_name, output_file, self.compression(), self.verbose, self.binary).run, callback=self.done)
                    self._pooled.append(subdir_name)
            except Exception, e:
                self._pool.terminate()
                logging.fatal("Could not create tar archiving thread! Error: %s" % e)
                raise Error(e)
            finally:
                self.wait()
                self.completed = True
LogWrapper.py 文件源码 项目:tagberry 作者: csailer 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def fatal(self,message,exception):
        self._logWriter(logging.FATAL,message,exception)
lang2loc_mdnshared.py 文件源码 项目:geomdn 作者: afshinrahimi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_symb_mus(self, mus, sigmas, corxy, pis, prediction_method="pi"):
        """
        Can be used to train an autoencoder that given location
        trains a mixture density layer and then outputs the same
        location
        symbolycally predict the mu that maximizes the mixture model
        either based on mixture probability of the component
        with highest pi, see pred_sharedparams
        """
        if prediction_method == "mixture":
            """
            sigmainvs = 1.0 / sigmas
            sigmainvprods = sigmainvs[:,:, 0] * sigmainvs[:,:, 1]
            sigmas2 = sigmas ** 2
            corxy2 = corxy **2
            diff2 = diff ** 2
            diffsigma = diff2 / sigmas2
            diffsigmanorm = np.sum(diffsigma, axis=-1)
            z = diffsigmanorm - 2 * corxy * diffprod * sigmainvprods
            oneminuscorxy2inv = 1.0 / (1.0 - corxy2)
            expterm = np.exp(-0.5 * z * oneminuscorxy2inv)
            expterm = 1.0
            probs = (0.5 / np.pi) * sigmainvprods * T.sqrt(oneminuscorxy2inv) * expterm
            probs = pis * probs
            """
            logging.fatal("not implemented!")
            sys.exit()
        elif prediction_method == "pi":    
            preds = T.argmax(pis, axis=1)
            selected_mus = mus[T.arange(mus.shape[0]), preds, :]
            return selected_mus
lang2loc.py 文件源码 项目:geomdn 作者: afshinrahimi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_symb_mus(self, mus, sigmas, corxy, pis, prediction_method="pi"):
        """
        Can be used to train an autoencoder that given location
        trains a mixture density layer and then outputs the same
        location
        symbolycally predict the mu that maximizes the mixture model
        either based on mixture probability of the component
        with highest pi, see pred_sharedparams
        """
        if prediction_method == "mixture":
            #sigmainvs = 1.0 / sigmas
            #sigmainvprods = sigmainvs[:,:, 0] * sigmainvs[:,:, 1]
            #sigmas2 = sigmas ** 2
            #corxy2 = corxy **2
            #diff2 = diff ** 2
            #diffsigma = diff2 / sigmas2
            #diffsigmanorm = np.sum(diffsigma, axis=-1)
            #z = diffsigmanorm - 2 * corxy * diffprod * sigmainvprods
            #oneminuscorxy2inv = 1.0 / (1.0 - corxy2)
            #expterm = np.exp(-0.5 * z * oneminuscorxy2inv)
            #expterm = 1.0
            #probs = (0.5 / np.pi) * sigmainvprods * T.sqrt(oneminuscorxy2inv) * expterm
            #probs = pis * probs
            logging.fatal("not implemented!")
            sys.exit()
        elif prediction_method == "pi":
            preds = T.argmax(pis, axis=1)
            selected_mus = mus[T.arange(mus.shape[0]),preds,:]
            return selected_mus
stratosphere.py 文件源码 项目:stratosphere 作者: victortrac 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def wait_for_completion(project, result):
    print('Waiting for deployment {}...'.format(result['name']))
    last_event = result
    while not last_event['status'] in ['DONE', ]:
        time.sleep(1)
        last_event = dm.operations().get(project=project, operation=last_event['name']).execute()
        logger.info('Operation: {name}, TargetLink: {targetLink}, Progress: {progress}, Status: {status}'
                    .format(**last_event))
    if len(last_event.get('error', [])):
        logging.error('*** Stack apply failed! ***')
        logging.fatal(pprint.pprint(last_event))
        sys.exit(1)
    else:
        print('Stack action complete.')
main.py 文件源码 项目:m16c-interface 作者: q3k 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def dump(args, s):
    # Run target clock at 6MHz.
    s.adapter.set_tclk(0)
    # Run target serial clock at 1.5MHz
    s.adapter.set_sclk(127)

    try:
        code = args.code.decode('hex')
    except TypeError:
        logging.fatal("Code must be in hexadecimal format.")
        return
    if len(code) != 7:
        logging.fatal("Code must be 7 bytes long.")
        return

    s.unlock(code)
    status = s.unlock_status()
    if status != serialio.UNLOCK_SUCCESSFUL:
        logging.fatal("Target did not unlock.")
        return
    logging.info("Target unlocked.")

    start = 0x0e00
    end = 0x0fff

    with open(args.output, 'w') as f:
        logging.info("Writing pages {:x}-{:x} to {}...".format(start, end,
                                                               args.output))
        for page in range(start, end+1):
            logging.debug("Dumping {:x}00-{:x}ff...".format(page, page))
            data = s.read_page(page)
            f.write(data)
hooks.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def noapi(*args):
    fr = sys._getframe().f_back
    if fr is None:
        logging.fatal("{:s}.noapi : Unexpected empty frame from caller. Continuing.. : {!r} : {!r}".format('.'.join(("internal",__name__)), sys._getframe(), sys._getframe().f_code))
        return hook.CONTINUE

    return internal.interface.priorityhook.CONTINUE if fr.f_back is None else internal.interface.priorityhook.STOP
hooks.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def changing(cls, ea, repeatable_cmt, newcmt):
        oldcmt = idaapi.get_cmt(ea, repeatable_cmt)
        try: cls.event.send((ea, bool(repeatable_cmt), newcmt))
        except StopIteration, e:
            logging.fatal("{:s}.changing : Unexpected termination of event handler. Re-instantiating it.".format('.'.join((__name__,cls.__name__))))
            cls.event = cls._event(); next(cls.event)
hooks.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def changed(cls, ea, repeatable_cmt):
        newcmt = idaapi.get_cmt(ea, repeatable_cmt)
        try: cls.event.send((ea, bool(repeatable_cmt), None))
        except StopIteration, e:
            logging.fatal("{:s}.changed : Unexpected termination of event handler. Re-instantiating it.".format('.'.join((__name__,cls.__name__))))
            cls.event = cls._event(); next(cls.event)
hooks.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def changed(cls, cb, a, cmt, repeatable):
        fn = idaapi.get_func(a.startEA)
        newcmt = idaapi.get_func_cmt(fn, repeatable)
        try: cls.event.send((fn.startEA, bool(repeatable), None))
        except StopIteration, e:
            logging.fatal("{:s}.changed : Unexpected termination of event handler. Re-instantiating it.".format('.'.join((__name__,cls.__name__))))
            cls.event = cls._event(); next(cls.event)

### database scope
hooks.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __rebase_function(old, new, size, iterable):
    key = internal.comment.tagging.__address__
    failure, total = [], list(iterable)

    for i, fn in enumerate(total):
        # grab the contents dictionary
        try:
            state = internal.comment.contents._read(None, fn)
        except LookupError:
            logging.fatal("{:s}.rebase : Address {:x} -> {:x} is not a function : {:x} -> {:x}".format(__name__, fn - new + old, fn, old, new))
            state = None
        if state is None: continue

        # now we can erase the old one
        res = fn - new + old
        internal.comment.contents._write(res, None, None)

        # update the addresses
        res, state[key] = state[key], {ea - old + new : ref for ea,ref in state[key].iteritems()}

        # and put the new addresses back
        ok = internal.comment.contents._write(None, fn, state)
        if not ok:
            logging.fatal("{:s}.rebase : Failure trying to write refcount for {:x} : {!r} : {!r}".format(__name__, fn, res, state[key]))
            failure.append((fn, res, state[key]))

        yield i, fn
    return
database.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 76 收藏 0 点赞 0 评论 0
def prevreg(cls, ea, reg, *regs, **modifiers):
        regs = (reg,) + regs
        count = modifiers.get('count', 1)
        args = ', '.join(["{:x}".format(ea)] + __builtin__.map("\"{:s}\"".format, regs) + __builtin__.map(utils.unbox("{:s}={!r}".format), modifiers.items()))

        # generate each helper using the regmatch class
        iterops = interface.regmatch.modifier(**modifiers)
        uses_register = interface.regmatch.use(regs)

        # if within a function, then sure we're within the chunk's bounds.
        if function.within(ea):
            (start, _) = function.chunk(ea)
            fwithin = functools.partial(operator.le, start)

        # otherwise ensure that we're not in the function and we're a code type.
        else:
            fwithin = utils.compose(utils.fap(utils.compose(function.within, operator.not_), type.is_code), all)

            start = cls.walk(ea, cls.prev, fwithin)
            start = top() if start == idaapi.BADADDR else start

        # define a function for cls.walk to continue looping while
        F = lambda ea: fwithin(ea) and not any(uses_register(ea, opnum) for opnum in iterops(ea))

        # skip the current address
        prevea = cls.prev(ea)
        if prevea is None:
            # FIXME: include registers in message
            logging.fatal("{:s}.prevreg({:s}, ...) : Unable to start walking from previous address. : {:x}".format('.'.join((__name__, cls.__name__)), args, ea))
            return ea

        # now walk while none of our registers match
        res = cls.walk(prevea, cls.prev, F)
        if res == idaapi.BADADDR or (cls == address and res < start):
            # FIXME: include registers in message
            raise ValueError("{:s}.prevreg({:s}, ...) : Unable to find register{:s} within chunk. {:x}:{:x} : {:x}".format('.'.join((__name__, cls.__name__)), args, ('s','')[len(regs)>1], start, ea, res))

        # recurse if the user specified it
        modifiers['count'] = count - 1
        return cls.prevreg( cls.prev(res), *regs, **modifiers) if count > 1 else res
database.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def nextreg(cls, ea, reg, *regs, **modifiers):
        regs = (reg,) + regs
        count = modifiers.get('count',1)
        args = ', '.join(["{:x}".format(ea)] + __builtin__.map("\"{:s}\"".format, regs) + __builtin__.map(utils.unbox("{:s}={!r}".format), modifiers.items()))

        # generate each helper using the regmatch class
        iterops = interface.regmatch.modifier(**modifiers)
        uses_register = interface.regmatch.use(regs)

        # if within a function, then sure we're within the chunk's bounds.
        if function.within(ea):
            (_,end) = function.chunk(ea)
            fwithin = functools.partial(operator.gt, end)

        # otherwise ensure that we're not in a function and we're a code type.
        else:
            fwithin = utils.compose(utils.fap(utils.compose(function.within, operator.not_), type.is_code), all)

            end = cls.walk(ea, cls.next, fwithin)
            end = bottom() if end == idaapi.BADADDR else end

        # define a function for cls.walk to continue looping while
        F = lambda ea: fwithin(ea) and not any(uses_register(ea, opnum) for opnum in iterops(ea))

        # skip the current address
        nextea = cls.next(ea)
        if nextea is None:
            # FIXME: include registers in message
            logging.fatal("{:s}.nextreg({:s}) : Unable to start walking from next address. : {:x}".format('.'.join((__name__, cls.__name__)), args, ea))
            return ea

        # now walk while none of our registers match
        res = cls.walk(nextea, cls.next, F)
        if res == idaapi.BADADDR or (cls == address and res >= end):
            # FIXME: include registers in message
            raise ValueError("{:s}.nextreg({:s}, ...) : Unable to find register{:s} within chunk {:x}:{:x} : {:x}".format('.'.join((__name__, cls.__name__)), args, ('s','')[len(regs)>1], end, ea, res))

        # recurse if the user specified it
        modifiers['count'] = count - 1
        return cls.nextreg(cls.next(res), *regs, **modifiers) if count > 1 else res
database.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def next(cls, ea, count):
        ea = interface.address.within(ea)
        isStop = lambda ea: _instruction.feature(ea) & idaapi.CF_STOP == idaapi.CF_STOP
        invalidQ = utils.compose(utils.fap(utils.compose(type.is_code, operator.not_), isStop), any)
        refs = filter(type.is_code, xref.down(ea))
        if len(refs) > 1:
            logging.fatal("{:s}.next({:x}, count={:d}) : Unable to determine next address due to multiple xrefs being available : {:s}".format('.'.join((__name__, cls.__name__)), ea, count, ', '.join(__builtin__.map("{:x}".format,refs))))
            return None
        if invalidQ(ea) and not _instruction.is_jmp(ea):
#            logging.fatal("{:s}.next({:x}, count={:d}) : Unable to move to next address. Flow has stopped.".format('.'.join((__name__, cls.__name__)), ea, count))
            return None
        res = refs[0] if _instruction.is_jmp(ea) else address.next(ea)
        return cls.next(res, count-1) if count > 1 else res
_utils.py 文件源码 项目:idascripts 作者: ctfhacker 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def start(self):
        '''Start to dispatch callables in the execution queue.'''
        cls = self.__class__
        if not self.thread.is_alive():
            logging.fatal("{:s}.start : Unable to resume an already terminated execution queue. : {!r}".format('.'.join(('internal',__name__,cls.__name__)), self))
            return False
        logging.info("{:s}.start : Resuming execution queue. :{!r}".format('.'.join(('internal',__name__,cls.__name__)), self.thread))
        res, _ = self.ev_unpaused.is_set(), self.ev_unpaused.set()
        self.queue.acquire()
        self.queue.notify_all()
        self.queue.release()
        return not res


问题


面经


文章

微信
公众号

扫码关注公众号