python类setproctitle()的实例源码

log.py 文件源码 项目:RPKI-toolkit 作者: pavel-odintsov 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def init(ident = None, args = None):
  """
  Initialize logging system.

  Default logging destination is stderr if "args" is not specified.
  """

  # pylint: disable=E1103

  if ident is None:
    ident = os.path.basename(sys.argv[0])

  if args is None:
    args = argparse.Namespace(log_level   = logging.WARNING,
                              log_handler = logging.StreamHandler)

  handler = args.log_handler()
  handler.setFormatter(Formatter(ident, handler))

  root_logger = logging.getLogger()
  root_logger.addHandler(handler)
  root_logger.setLevel(args.log_level)

  if ident and have_setproctitle and use_setproctitle:
    if proctitle_extra:
      setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra))
    else:
      setproctitle.setproctitle(ident)
__init__.py 文件源码 项目:battray 作者: Carpetsmoker 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def set_proctitle(title):
    try:
        # This is probably the best way to do this, but I don't want to force an
        # external dependency on this C module...
        import setproctitle
        setproctitle.setproctitle(title)
    except ImportError:
        import ctypes, ctypes.util

        libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
        title_bytes = title.encode(sys.getdefaultencoding(), 'replace')
        buf = ctypes.create_string_buffer(title_bytes)

        # BSD, maybe also OSX?
        try:
            libc.setproctitle(ctypes.create_string_buffer(b"-%s"), buf)
            return
        except AttributeError:
            pass

        # Linux
        try:
            libc.prctl(15, buf, 0, 0, 0)
            return
        except AttributeError:
            pass
collector.py 文件源码 项目:kafka_extract 作者: tqlihuiqi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def run(self):
        """ ????Topic Logsize"""

        setproctitle.setproctitle("KafkaExtractCollector")

        while True:
            self.handler()
            time.sleep(base.config["collector"]["interval_minute"] * 60)
main.py 文件源码 项目:eventor 作者: Acrisel 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def task_wrapper(run_id=None, task=None, step=None, adminq=None, use_process=True, logger_info=None):
    ''' 
    Args:
        func: object with action method with the following signature:
            action(self, action, unit, group, sequencer)    
        action: object with taskid, unit, group: id of the unit to pass
        sqid: sequencer id to pass to action '''
    global module_logger

    if use_process:
        module_logger=MpLogger.get_logger(logger_info=logger_info, name='') # name="%s.%s_%s" %(logger_info['name'], step.name, task.sequence))

    task.pid=os.getpid()
    os.environ['EVENTOR_STEP_SEQUENCE']=str(task.sequence)
    os.environ['EVENTOR_STEP_RECOVERY']=str(task.recovery)
    os.environ['EVENTOR_STEP_NAME']=str(step.name)

    if setproctitle is not None and use_process:
        run_id_s = "%s." % run_id if run_id else ''
        setproctitle("eventor: %s%s.%s(%s)" % (run_id_s, step.name, task.id_, task.sequence))

    # Update task with PID
    update=TaskAdminMsg(msg_type=TaskAdminMsgType.update, value=task) 
    adminq.put( update )
    module_logger.info('[ Step {}/{} ] Trying to run'.format(step.name, task.sequence))

    try:
        # todo: need to pass task resources.
        result=step(seq_path=task.sequence, )
    except Exception as e:
        trace=inspect.trace()
        trace=traces(trace) #[2:]
        task.result=(e, pickle.dumps(trace))
        task.status=TaskStatus.failure
    else:
        task.result=result
        task.status=TaskStatus.success

    result=TaskAdminMsg(msg_type=TaskAdminMsgType.result, value=task) 
    module_logger.info('[ Step {}/{} ] Completed, status: {}'.format(step.name, task.sequence, str(task.status), ))
    adminq.put( result )
    return True
mattersend.py 文件源码 项目:mattersend 作者: mtorromeo 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def main():
    try:
        import setproctitle
        setproctitle.setproctitle(name)
    except ImportError:
        pass

    dialects = csv.list_dialects()
    dialects.sort()
    dialects.insert(0, 'sniff')

    # CLI arguments
    parser = argparse.ArgumentParser(prog=name, description=description)

    parser.add_argument('-V', '--version',  action='version', version="%(prog)s " + version)
    parser.add_argument('-C', '--config',   help='Use a different configuration file')
    parser.add_argument('-s', '--section',  help='Configuration file section', default='DEFAULT')
    parser.add_argument('-c', '--channel',  help='Send to this channel or @username')
    parser.add_argument('-U', '--url',      help='Mattermost webhook URL')
    parser.add_argument('-u', '--username', help='Username')
    parser.add_argument('-i', '--icon',     help='Icon')

    group = parser.add_mutually_exclusive_group()
    group.add_argument('-t', '--tabular', metavar='DIALECT', const='sniff',
                       nargs='?', choices=dialects,
                       help='Parse input as CSV and format it as a table (DIALECT can be one of %(choices)s)')
    group.add_argument('-y', '--syntax', default='auto')

    parser.add_argument('-I', '--info', action='store_true',
                        help='Include file information in message')
    parser.add_argument('-n', '--dry-run', '--just-print', action='store_true',
                        help="Don't send, just print the payload")
    parser.add_argument('-f', '--file', default='-',
                        help="Read content from FILE. If - reads from standard input (DEFAULT: %(default)s)")

    args = parser.parse_args()

    if args.file == '-':
        message = sys.stdin.read()
        filename = None
    else:
        message = ''
        filename = args.file


    try:
        payload = send(args.channel, message, filename, args.url,
                       args.username, args.icon, args.syntax, args.tabular,
                       args.info, args.dry_run, args.section, name,
                       args.config)
    except (configparser.Error, TypeError, RuntimeError) as e:
        sys.exit(str(e))

    if args.dry_run:
        print(payload)
prof-sparse.py 文件源码 项目:optnet 作者: locuslab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--no-cuda', action='store_true')
    parser.add_argument('--nTrials', type=int, default=5)
    # parser.add_argument('--boardSz', type=int, default=2)
    # parser.add_argument('--batchSz', type=int, default=150)
    parser.add_argument('--Qpenalty', type=float, default=0.1)
    args = parser.parse_args()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    setproctitle.setproctitle('bamos.sudoku.prof-sparse')

    print('=== nTrials: {}'.format(args.nTrials))
    print('| {:8s} | {:8s} | {:21s} | {:21s} |'.format(
        'boardSz', 'batchSz', 'dense forward (s)', 'sparse forward (s)'))
    for boardSz in [2,3]:
        with open('data/{}/features.pt'.format(boardSz), 'rb') as f:
            X = torch.load(f)
        with open('data/{}/labels.pt'.format(boardSz), 'rb') as f:
            Y = torch.load(f)
        N, nFeatures = X.size(0), int(np.prod(X.size()[1:]))

        for batchSz in [1, 64, 128]:
            dmodel = models.OptNetEq(boardSz, args.Qpenalty, trueInit=True)
            spmodel = models.SpOptNetEq(boardSz, args.Qpenalty, trueInit=True)
            if args.cuda:
                dmodel = dmodel.cuda()
                spmodel = spmodel.cuda()

            dtimes = []
            sptimes = []
            for i in range(args.nTrials):
                Xbatch = Variable(X[i*batchSz:(i+1)*batchSz])
                Ybatch = Variable(Y[i*batchSz:(i+1)*batchSz])
                if args.cuda:
                    Xbatch = Xbatch.cuda()
                    Ybatch = Ybatch.cuda()

                # Make sure buffers are initialized.
                # dmodel(Xbatch)
                # spmodel(Xbatch)

                start = time.time()
                # dmodel(Xbatch)
                dtimes.append(time.time()-start)

                start = time.time()
                spmodel(Xbatch)
                sptimes.append(time.time()-start)

            print('| {:8d} | {:8d} | {:.2e} +/- {:.2e} | {:.2e} +/- {:.2e} |'.format(
                boardSz, batchSz, np.mean(dtimes), np.std(dtimes),
                np.mean(sptimes), np.std(sptimes)))
sender.py 文件源码 项目:iris 作者: linkedin 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def init_sender(config):
    gevent.signal(signal.SIGINT, sender_shutdown)
    gevent.signal(signal.SIGTERM, sender_shutdown)
    gevent.signal(signal.SIGQUIT, sender_shutdown)

    process_title = config['sender'].get('process_title')

    if process_title and isinstance(process_title, basestring):
        setproctitle.setproctitle(process_title)
        logger.info('Changing process name to %s', process_title)

    api_host = config['sender'].get('api_host', 'http://localhost:16649')
    db.init(config)
    cache.init(api_host, config)
    metrics.init(config, 'iris-sender', default_sender_metrics)
    api_cache.cache_priorities()
    api_cache.cache_applications()
    api_cache.cache_modes()

    global should_mock_gwatch_renewer, send_message
    if config['sender'].get('debug'):
        logger.info('DEBUG MODE')
        should_mock_gwatch_renewer = True
        should_skip_send = True
    else:
        should_skip_send = False
    should_mock_gwatch_renewer = should_mock_gwatch_renewer or config.get('skipgmailwatch', False)
    should_skip_send = should_skip_send or config.get('skipsend', False)

    if should_skip_send:
        config['vendors'] = [{
            'type': 'iris_dummy',
            'name': 'iris dummy vendor'
        }]

    global quota
    quota = ApplicationQuota(db, cache.targets_for_role, message_send_enqueue, config['sender'].get('sender_app'))

    global coordinator
    zk_hosts = config['sender'].get('zookeeper_cluster', False)

    if zk_hosts:
        logger.info('Initializing coordinator with ZK: %s', zk_hosts)
        from iris.coordinator.kazoo import Coordinator
        coordinator = Coordinator(zk_hosts=zk_hosts,
                                  hostname=socket.gethostname(),
                                  port=config['sender'].get('port', 2321),
                                  join_cluster=True)
    else:
        logger.info('ZK cluster info not specified. Using master status from config')
        from iris.coordinator.noncluster import Coordinator
        coordinator = Coordinator(is_master=config['sender'].get('is_master', True),
                                  slaves=config['sender'].get('slaves', []))
easy_daemon.py 文件源码 项目:ops_agent 作者: sjqzhang 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def daemonize(self):
        """
        do the UNIX double-fork magic, see Stevens' "Advanced
        Programming in the UNIX Environment" for details (ISBN 0201563177)
        http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
        """
        try:
            pid = os.fork()
            if pid > 0:
                # exit first parent
                sys.exit(0)
        except OSError, e:
            sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
            sys.exit(1)

        # decouple from parent environment (Move to EasyApplication.load_config(), delete by leon 20151108)
        os.setsid()
        os.umask(0)

        # do second fork
        try:
            pid = os.fork()
            if pid > 0:
                # exit from second parent
                sys.exit(0)
        except OSError, e:
            sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
            sys.exit(1)

        # redirect standard file descriptors
        sys.stdout.flush()
        sys.stderr.flush()
        si = file(self.stdin, 'r')
        so = file(self.stdout, 'a+')
        se = file(self.stderr, 'a+', 0)
        os.dup2(si.fileno(), sys.stdin.fileno())
        os.dup2(so.fileno(), sys.stdout.fileno())
        os.dup2(se.fileno(), sys.stderr.fileno())

        # write pidfile
        atexit.register(self.delpid)
        pid = str(os.getpid())
        file(self.pidfile, 'w+').write("%s\n" % pid)

        # update proc name
        import setproctitle
        setproctitle.setproctitle(self.daemon_conf['name'])
icnn.py 文件源码 项目:icnn 作者: locuslab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--save', type=str, default='work')
    parser.add_argument('--nEpoch', type=int, default=100)
    # parser.add_argument('--testBatchSz', type=int, default=2048)
    parser.add_argument('--seed', type=int, default=42)
    parser.add_argument('--model', type=str, default="picnn",
                        choices=['picnn', 'ficnn'])
    parser.add_argument('--dataset', type=str, default="moons",
                        choices=['moons', 'circles', 'linear'])
    parser.add_argument('--noncvx', action='store_true')

    args = parser.parse_args()

    npr.seed(args.seed)
    tf.set_random_seed(args.seed)

    setproctitle.setproctitle('bamos.icnn.synthetic.{}.{}'.format(args.model, args.dataset))

    save = os.path.join(os.path.expanduser(args.save),
                        "{}.{}".format(args.model, args.dataset))
    if os.path.isdir(save):
        shutil.rmtree(save)
    os.makedirs(save, exist_ok=True)

    if args.dataset == "moons":
        (dataX, dataY) = make_moons(noise=0.3, random_state=0)
    elif args.dataset == "circles":
        (dataX, dataY) = make_circles(noise=0.2, factor=0.5, random_state=0)
        dataY = 1.-dataY
    elif args.dataset == "linear":
        (dataX, dataY) = make_classification(n_features=2, n_redundant=0, n_informative=2,
                                             random_state=1, n_clusters_per_class=1)
        rng = np.random.RandomState(2)
        dataX += 2 * rng.uniform(size=dataX.shape)
    else:
        assert(False)

    dataY = dataY.reshape((-1, 1)).astype(np.float32)

    nData = dataX.shape[0]
    nFeatures = dataX.shape[1]
    nLabels = 1
    nXy = nFeatures + nLabels

    config = tf.ConfigProto() #log_device_placement=False)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        model = Model(nFeatures, nLabels, sess, args.model, nGdIter=30)
        model.train(args, dataX, dataY)
icnn_ebundle.py 文件源码 项目:icnn 作者: locuslab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--save', type=str, default='work/mse.ebundle')
    parser.add_argument('--nEpoch', type=float, default=50)
    parser.add_argument('--nBundleIter', type=int, default=30)
    # parser.add_argument('--trainBatchSz', type=int, default=25)
    parser.add_argument('--trainBatchSz', type=int, default=70)
    # parser.add_argument('--testBatchSz', type=int, default=2048)
    parser.add_argument('--noncvx', action='store_true')
    parser.add_argument('--seed', type=int, default=42)
    # parser.add_argument('--valSplit', type=float, default=0)

    args = parser.parse_args()

    assert(not args.noncvx)

    setproctitle.setproctitle('bamos.icnn.comp.mse.ebundle')

    npr.seed(args.seed)
    tf.set_random_seed(args.seed)

    save = os.path.expanduser(args.save)
    if os.path.isdir(save):
        shutil.rmtree(save)
    os.makedirs(save)
    ckptDir = os.path.join(save, 'ckpt')
    args.ckptDir = ckptDir
    if not os.path.exists(ckptDir):
        os.makedirs(ckptDir)

    data = olivetti.load("data/olivetti")
    # eps = 1e-8
    # data['trainX'] = data['trainX'].clip(eps, 1.-eps)
    # data['trainY'] = data['trainY'].clip(eps, 1.-eps)
    # data['testX'] = data['testX'].clip(eps, 1.-eps)
    # data['testY'] = data['testY'].clip(eps, 1.-eps)

    nTrain = data['trainX'].shape[0]
    nTest = data['testX'].shape[0]

    inputSz = list(data['trainX'][0].shape)
    outputSz = list(data['trainY'][1].shape)

    print("\n\n" + "="*40)
    print("+ nTrain: {}, nTest: {}".format(nTrain, nTest))
    print("+ inputSz: {}, outputSz: {}".format(inputSz, outputSz))
    print("="*40 + "\n\n")

    config = tf.ConfigProto() #log_device_placement=False)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        model = Model(inputSz, outputSz, sess)
        model.train(args, data['trainX'], data['trainY'], data['testX'], data['testY'])
icnn.back.py 文件源码 项目:icnn 作者: locuslab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--save', type=str, default='work/mse')
    parser.add_argument('--nEpoch', type=float, default=50)
    # parser.add_argument('--trainBatchSz', type=int, default=25)
    parser.add_argument('--trainBatchSz', type=int, default=70)
    # parser.add_argument('--testBatchSz', type=int, default=2048)
    parser.add_argument('--nGdIter', type=int, default=30)
    parser.add_argument('--noncvx', action='store_true')
    parser.add_argument('--seed', type=int, default=42)
    # parser.add_argument('--valSplit', type=float, default=0)

    args = parser.parse_args()

    setproctitle.setproctitle('bamos.icnn.comp.mse')

    npr.seed(args.seed)
    tf.set_random_seed(args.seed)

    save = os.path.expanduser(args.save)
    if os.path.isdir(save):
        shutil.rmtree(save)
    os.makedirs(save)
    ckptDir = os.path.join(save, 'ckpt')
    args.ckptDir = ckptDir
    if not os.path.exists(ckptDir):
        os.makedirs(ckptDir)

    data = olivetti.load("data/olivetti")

    nTrain = data['trainX'].shape[0]
    nTest = data['testX'].shape[0]

    inputSz = list(data['trainX'][0].shape)
    outputSz = list(data['trainY'][1].shape)

    print("\n\n" + "="*40)
    print("+ nTrain: {}, nTest: {}".format(nTrain, nTest))
    print("+ inputSz: {}, outputSz: {}".format(inputSz, outputSz))
    print("="*40 + "\n\n")

    config = tf.ConfigProto() #log_device_placement=False)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        model = Model(inputSz, outputSz, sess, args.nGdIter)
        model.train(args, data['trainX'], data['trainY'], data['testX'], data['testY'])


问题


面经


文章

微信
公众号

扫码关注公众号