python类info()的实例源码

kas.py 文件源码 项目:kas 作者: siemens 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def kas(argv):
    """
        The main entry point of kas.
    """
    create_logger()

    parser = kas_get_argparser()
    args = parser.parse_args(argv)

    if args.debug:
        logging.getLogger().setLevel(logging.DEBUG)

    logging.info('%s %s started', os.path.basename(sys.argv[0]), __version__)

    loop = asyncio.get_event_loop()

    for sig in (signal.SIGINT, signal.SIGTERM):
        loop.add_signal_handler(sig, interruption)
    atexit.register(_atexit_handler)

    for plugin in getattr(kasplugin, 'plugins', []):
        if plugin().run(args):
            return

    parser.print_help()
releases.py 文件源码 项目:PyPlanet 作者: PyPlanet 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def check(self, first_check=False):
        from pyplanet import __version__ as current_version

        logging.debug('Checking for new versions...')

        async with aiohttp.ClientSession() as session:
            async with session.get(self.url) as resp:
                for release in await resp.json():
                    if not release['draft'] and not release['prerelease']:
                        self.latest = release['tag_name']
                        break

                self.current = current_version
                logging.debug('Version check, your version: {}, online version: {}'.format(self.current, self.latest))

                if first_check and self.update_available:
                    logging.info('New version of PyPlanet available, consider updating: {}'.format(self.latest))
                    await self.instance.chat(
                        '\uf1e6 $FD4$oPy$369Planet$z$s$fff \uf0e7 new version available: v{}. Consider updating!'.format(self.latest)
                    )
nanoQC.py 文件源码 项目:nanoQC 作者: wdecoster 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def main():
    args = get_args()
    logging.basicConfig(
        format='%(asctime)s %(message)s',
        filename=os.path.join(args.outdir, "NanoQC.log"),
        level=logging.INFO)
    logging.info("NanoQC started.")
    sizeRange = length_histogram(
        fqin=gzip.open(args.fastq, 'rt'),
        name=os.path.join(args.outdir, "SequenceLengthDistribution.png"))
    fq = get_bin(gzip.open(args.fastq, 'rt'), sizeRange)
    logging.info("Using {} reads for plotting".format(len(fq)))
    fqbin = [dat[0] for dat in fq]
    qualbin = [dat[1] for dat in fq]
    logging.info("Creating plots...")
    per_base_sequence_content_and_quality(fqbin, qualbin, args.outdir, args.format)
    logging.info("per base sequence content and quality completed.")
    logging.info("Finished!")
tsproxy.py 文件源码 项目:tsproxy 作者: WPO-Foundation 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def run(self):
    global lock, background_activity_count
    try:
      logging.debug('[{0:d}] AsyncDNS - calling getaddrinfo for {1}:{2:d}'.format(self.client_id, self.hostname, self.port))
      addresses = socket.getaddrinfo(self.hostname, self.port)
      logging.info('[{0:d}] Resolving {1}:{2:d} Completed'.format(self.client_id, self.hostname, self.port))
    except:
      addresses = ()
      logging.info('[{0:d}] Resolving {1}:{2:d} Failed'.format(self.client_id, self.hostname, self.port))
    message = {'message': 'resolved', 'connection': self.client_id, 'addresses': addresses, 'localhost': self.is_localhost}
    self.result_pipe.SendMessage(message, False)
    lock.acquire()
    if background_activity_count > 0:
      background_activity_count -= 1
    lock.release()
    # open and close a local socket which will interrupt the long polling loop to process the message
    s = socket.socket()
    s.connect((server.ipaddr, server.port))
    s.close()


########################################################################################################################
#   TCP Client
########################################################################################################################
tsproxy.py 文件源码 项目:tsproxy 作者: WPO-Foundation 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def HandleResolve(self, message):
    global in_pipe,  map_localhost, lock, background_activity_count
    self.did_resolve = True
    is_localhost = False
    if 'hostname' in message:
      self.hostname = message['hostname']
    self.port = 0
    if 'port' in message:
      self.port = message['port']
    logging.info('[{0:d}] Resolving {1}:{2:d}'.format(self.client_id, self.hostname, self.port))
    if self.hostname == 'localhost':
      self.hostname = '127.0.0.1'
    if self.hostname == '127.0.0.1':
      logging.info('[{0:d}] Connection to localhost detected'.format(self.client_id))
      is_localhost = True
    if (dest_addresses is not None) and (not is_localhost or map_localhost):
      logging.info('[{0:d}] Resolving {1}:{2:d} to mapped address {3}'.format(self.client_id, self.hostname, self.port, dest_addresses))
      self.SendMessage('resolved', {'addresses': dest_addresses, 'localhost': False})
    else:
      lock.acquire()
      background_activity_count += 1
      lock.release()
      self.state = self.STATE_RESOLVING
      self.dns_thread = AsyncDNS(self.client_id, self.hostname, self.port, is_localhost, in_pipe)
      self.dns_thread.start()
tsproxy.py 文件源码 项目:tsproxy 作者: WPO-Foundation 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def handle_message(self, message):
    if message['message'] == 'data' and 'data' in message and len(message['data']) > 0:
      self.buffer += message['data']
      if self.state == self.STATE_CONNECTED:
        self.handle_write()
    elif message['message'] == 'resolved':
      self.HandleResolved(message)
    elif message['message'] == 'connected':
      self.HandleConnected(message)
      self.handle_write()
    elif message['message'] == 'closed':
      if len(self.buffer) == 0:
        logging.info('[{0:d}] Server connection close being processed, closing Browser connection'.format(self.client_id))
        self.handle_close()
      else:
        logging.info('[{0:d}] Server connection close being processed, queuing browser connection close'.format(self.client_id))
        self.needs_close = True
emccbuild.py 文件源码 项目:emscripten-docker 作者: apiaryio 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def get_tags(versions):
    logging.info("Reading tags from repo %s" % (TAGS_URL))
    r = requests.get(TAGS_URL)
    if r.status_code != 200:
        raise RuntimeError(r.text)
    else:
        tags = [i['name'] for i in json.loads(
            r.text) if any(x in i['name'] for x in versions)]
        while 'next' in r.links:
            r = requests.get(r.links['next']['url'])
            if r.status_code != 200:
                raise RuntimeError(r.text)
            else:
                tags.extend([i['name'] for i in json.loads(r.text)
                             if any(x in i['name'] for x in versions)])
    return tags
OldMunkiPackages.py 文件源码 项目:OldMunkiPackages 作者: aysiu 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def trash_old_stuff(trashlist, trashpath, newpath):
    if isinstance(trashlist, list):
        for old_location in trashlist:
            # Get the subfolders needed to be created
            path_within_destination=os.path.relpath(old_location, trashpath)
            # Create what will be the destination path
            new_location=os.path.join(newpath, path_within_destination)
            # Make sure all the relevant subfolders exist in the destination
            if not os.path.exists(os.path.dirname(new_location)):
                os.makedirs(os.path.dirname(new_location))
            # Even though we've been double-checking paths all along, let's just make one last check
            if os.path.exists(old_location) and os.path.isdir(newpath):
                os.rename(old_location, new_location)
                logging.info("Moving %s to %s\n" % (old_location, new_location))
            else:
                logging.error("One of %s or %s does not exist\n" % (old_location, new_location))
    else:
        logging.error("%s is not a valid list\n" % trashlist)

# Function that checks paths are writable
database.py 文件源码 项目:PyPlanet 作者: PyPlanet 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def create_from_settings(cls, instance, conf):
        try:
            engine_path, _, cls_name = conf['ENGINE'].rpartition('.')
            db_name = conf['NAME']
            db_options = conf['OPTIONS'] if 'OPTIONS' in conf and conf['OPTIONS'] else dict()

            # FIX for #331. Replace utf8 by utf8mb4 in the mysql driver encoding.
            if conf['ENGINE'] == 'peewee_async.MySQLDatabase' and 'charset' in db_options and db_options['charset'] == 'utf8':
                logging.info('Forcing to use \'utf8mb4\' instead of \'utf8\' for the MySQL charset option! (Fix #331).')
                db_options['charset'] = 'utf8mb4'

            # We will try to load it so we have the validation inside this class.
            engine = getattr(importlib.import_module(engine_path), cls_name)
        except ImportError:
            raise ImproperlyConfigured('Database engine doesn\'t exist!')
        except Exception as e:
            raise ImproperlyConfigured('Database configuration isn\'t complete or engine could\'t be found!')

        return cls(engine, instance, db_name, **db_options)
button.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def use_box(cli):
    log.info("[use_box] configuring magic buttons.")
    red_button.when_pressed = red_push
    red_button.when_released = red_release
    green_button.when_pressed = green_push
    green_button.when_released = green_release
    white_button.when_pressed = white_push
    white_button.when_released = white_release
    white_led.on()
    log.info("[use_box] configured buttons. White LED should now be on.")
    try:
        while 1:
            time.sleep(0.2)
    except KeyboardInterrupt:
        log.info(
            "[use_box] KeyboardInterrupt ... exiting box monitoring loop")
    red_led.off()
    green_led.off()
    white_led.off()
utils.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def mqtt_connect(mqtt_client, core_info):
    connected = False

    # try connecting to all connectivity info objects in the list
    for connectivity_info in core_info.connectivityInfoList:
        core_host = connectivity_info.host
        core_port = connectivity_info.port
        logging.info("Connecting to Core at {0}:{1}".format(
            core_host, core_port))
        mqtt_client.configureEndpoint(core_host, core_port)
        try:
            mqtt_client.connect()
            connected = True
            break
        except socket.error as se:
            print("SE:{0}".format(se))
        except operationTimeoutException as te:
            print("operationTimeoutException:{0}".format(te.message))
            traceback.print_tb(te, limit=25)
        except Exception as e:
            print("Exception caught:{0}".format(e.message))

    return connected
utils.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def get_conn_info(core_connectivity_info_list, match):
    """
    Get core connectivity info objects from the list. Matching any the `match`
    argument.

    :param core_connectivity_info_list: the connectivity info object list
    :param match: the value to match against either the Core Connectivity Info
        `id`, `host`, `port`, or `metadata` values
    :return: the list of zero or more matching connectivity info objects
    """
    conn_info = list()

    if not match:
        return conn_info

    for cil in core_connectivity_info_list:
        for ci in cil.connectivityInfoList:
            if match == ci.id or match == ci.host or match == ci.port or \
                            match == ci.metadata:
                conn_info.append(ci)

    return conn_info
lambda_setup.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _create_function_alias(func_alias, func_name, func_version):
    lamb = boto3.client('lambda')

    try:
        resp = lamb.create_alias(
            Name=func_alias,
            FunctionName=func_name,
            FunctionVersion=func_version
        )
        logging.info("Create Lambda Alias resp:{0}".format(
            json.dumps(resp, indent=4, sort_keys=True))
        )
        return resp
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning("Validation Error {0} creating alias '{1}'.".format(
                ce, func_alias))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
utils.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def mqtt_connect(mqtt_client, core_info):
    connected = False

    # try connecting to all connectivity info objects in the list
    for connectivity_info in core_info.connectivityInfoList:
        core_host = connectivity_info.host
        core_port = connectivity_info.port
        logging.info("Connecting to Core at {0}:{1}".format(
            core_host, core_port))
        mqtt_client.configureEndpoint(core_host, core_port)
        try:
            mqtt_client.connect()
            connected = True
            break
        except socket.error as se:
            print("SE:{0}".format(se))
        except operationTimeoutException as te:
            print("operationTimeoutException:{0}".format(te.message))
            traceback.print_tb(te, limit=25)
        except Exception as e:
            print("Exception caught:{0}".format(e.message))

    return connected
utils.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def discover_configured_core(device_name, dip, config_file):
    cfg = GroupConfigFile(config_file)
    gg_core = None
    # Discover Greengrass Core

    discovered, discovery_info = ggc_discovery(
        device_name, dip, retry_count=10
    )
    logging.info("[discover_cores] Device: {0} discovery success".format(
        device_name)
    )

    # find the configured Group's core
    for group in discovery_info.getAllGroups():
        dump_core_info_list(group.coreConnectivityInfoList)
        gg_core = group.getCoreConnectivityInfo(cfg['core']['thing_arn'])

        if gg_core:
            logging.info('Found the configured core and Group CA.')
            break

    return gg_core, discovery_info
utils.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_conn_info(core_connectivity_info_list, match):
    """
    Get core connectivity info objects from the list. Matching any the `match`
    argument.

    :param core_connectivity_info_list: the connectivity info object list
    :param match: the value to match against either the Core Connectivity Info
        `id`, `host`, `port`, or `metadata` values
    :return: the list of zero or more matching connectivity info objects
    """
    conn_info = list()

    if not match:
        return conn_info

    for cil in core_connectivity_info_list:
        for ci in cil.connectivityInfoList:
            if match == ci.id or match == ci.host or match == ci.port or \
                            match == ci.metadata:
                conn_info.append(ci)

    return conn_info
arm.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def _activate_command(self, cmd):
        """Use the shared `threading.Event` instance to signal a mini
        fulfillment shadow command to the running Control thread.
        """
        self.last_state = self.active_state
        self.active_state = cmd
        log.info("[arm._activate_command] last_state='{0}' state='{1}'".format(
            self.last_state, cmd))

        if self.active_state == 'run':
            log.info("[arm._activate_command] START RUN")
            self.cmd_event.set()
        elif self.active_state == 'stop':
            log.info("[arm._activate_command] STOP")
            self.cmd_event.clear()
        return
arm.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def pick(self):
        log.debug("[act.pick] [begin]")
        arm = ArmStages(self.sg)
        self.mqtt_client.publish(
            self.stage_topic, _stage_message("pick", "begin"), 0
        )
        pick_box = self.found_box
        self.found_box = NO_BOX_FOUND
        log.info("[act.pick] pick_box:{0}".format(pick_box))
        log.info("[act.pick] self.found_box:{0}".format(self.found_box))
        stage_result = arm.stage_pick(previous_results=pick_box,
                                      cartesian=False)
        self.mqtt_client.publish(
            self.stage_topic, _stage_message("pick", "end", stage_result), 0
        )
        log.debug("[act.pick] [end]")
        return stage_result
arm.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def emergency_stop_arm(self):
        if self.active_state == 'stopped' or \
                self.active_state == 'initialized':
            return

        if 'present_position' in base_servo_cache:
            stop_positions = [
                base_servo_cache['present_position'],
                femur01_servo_cache['present_position'],
                femur02_servo_cache['present_position'],
                tibia_servo_cache['present_position'],
                eff_servo_cache['present_position']
            ]
            log.info("[emergency_stop_arm] stop_positions:{0}".format(
                stop_positions))
            self.sg.write_values(
                register='goal_position', values=stop_positions)
            self.active_state = 'stopped'
            log.info("[emergency_stop_arm] active_state:{0}".format(
                self.active_state))
        else:
            log.error("[emergency_stop_arm] no 'present_position' cache value")
stages.py 文件源码 项目:aws-greengrass-mini-fulfillment 作者: awslabs 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def stage_stop(self):
        log.info("[stage_stop] _begin_")

        self.sg.goal_position([
            512,  # first servo value
            500,  # second servo value
            500,  # third servo value
            135,  # fourth servo value
            OPEN_EFFECTOR  # fifth servo value
        ], block=True, margin=POSITION_MARGIN)

        # add little sleepy motion in end effector for fun
        self.sg['effector']['goal_position'] = GRAB_EFFECTOR
        time.sleep(0.4)
        self.sg['effector']['goal_position'] = GRAB_EFFECTOR + 100
        time.sleep(0.4)
        self.sg['effector']['goal_position'] = GRAB_EFFECTOR
        time.sleep(0.4)
        self.sg['effector']['goal_position'] = GRAB_EFFECTOR + 30
        time.sleep(0.4)
        self.sg['effector']['goal_position'] = GRAB_EFFECTOR

        log.info("[stage_stop] _end_")
conv2mp4-server.py 文件源码 项目:conv2mp4-py 作者: Kameecoding 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def good_output(oldFile,new_file):
    oldSize = get_length(oldFile)
    newSize = get_length(new_file)

    for i in range(0,2):
        if i == 2:
            if abs(oldSize[i]- newSize[i]) > 5:
                Logger.info("ERROR: File Duration difference bigger than 5 seconds, convert failed")
                return False
        else:
            if oldSize[i] != newSize [i]:
                Logger.info("ERROR: File Duration difference bigger than 5 seconds, convert failed")
                return False
    Logger.info("SUCCESS: File Duration difference less than 5 seconds, convert successful")
    return True

#Recursively build a map of folder paths to GDRIVE ids
conv2mp4-server.py 文件源码 项目:conv2mp4-py 作者: Kameecoding 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def execute(self):
        check_path(self.target_dir)

        if EXTRACT_SRT:
            self.extract_srt()
            self.move_external_subs()

        if self.input_video != self.output_video:
            try:
                self.handbrake_convert();
                if UPLOAD_TO_DRIVE:
                    self.upload_to_drive()
            except KeyboardInterrupt:
                Logger.info("KeyBoardInterrupt Detected, Cleaning up and Exiting")
                self.remove_media_file(self.output_video)
                sys.exit(0)
            if REMOVE_CONVERTED:
                Logger.info("Deleting old files")
                self.remove_media_file(self.input_video)
                self.remove_folder(os.path.dirname(self.input_video))
        else:
            Logger.info("{file} already exists, skipping.".format(file=self.input_video))
conv2mp4-server.py 文件源码 项目:conv2mp4-py 作者: Kameecoding 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def get_audio_streams(self):
        with open(os.devnull, 'w') as DEV_NULL:
            #Get file info and Parse it
            try:
                proc = subprocess.Popen([
                    FFPROBE,
                    '-i', self.input_video,
                    '-of', 'json',
                    '-show_streams'
                ], stdout=subprocess.PIPE, stderr=DEV_NULL)
            except OSError as e:
                if e.errno == os.errno.ENOENT:
                    Logger.error("FFPROBE not found, install on your system to use this script")
                    sys.exit(0)
            output = proc.stdout.read()

            return get_audio_streams(json.loads(output))
experiment.py 文件源码 项目:bnn-analysis 作者: myshkov 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def configure_env_dropout(self, env, sampler_params=None, dropout=0.01, tau=0.15, length_scale=1e-2):
        def sampler_factory():
            params = env.get_default_sampler_params()
            params['n_epochs'] = 50

            wreg = length_scale ** 2 * (1 - dropout) / (2. * env.get_train_x().shape[0] * tau)
            model = DropoutSampler.model_from_description(env.layers_description, wreg, dropout)
            logging.info(f'Reg: {wreg}')

            if sampler_params is not None:
                params.update(sampler_params)

            sampler = DropoutSampler(model=model, **params)
            sampler.construct()
            return sampler

        env.sampler_factory = sampler_factory
WPWithinWrapperImpl.py 文件源码 项目:wpw-sdk-python 作者: WPTechInnovation 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def openRpcListener(self):
        try:
            # Make socket
            transport = TSocket.TSocket(self.ipAddress, self.portNumber)
            # Buffering is critical. Raw sockets are very slow
            transport = TTransport.TBufferedTransport(transport)
            # Wrap in a protocol
            protocol = TBinaryProtocol.TBinaryProtocol(transport)
            # Create a client to use the protocol encoder
            client = Client(protocol)
            # Connect!
            transport.open()
            logging.info("STARTED connection to SDK via RPC thrift")
            return client
        except Exception as e:
            logging.info("Error: Couldn't open the RpcListener: " + str(e))
            self.killRpcAgent()
            raise WWTypes.WPWithinGeneralException("Error: Couldn't open the RpcListener", e)
WPWithinWrapperImpl.py 文件源码 项目:wpw-sdk-python 作者: WPTechInnovation 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def stopRPCAgent(self):
        logging.info('SHOULD STOP RPC AGENT')
        try:
            self.getClient().CloseRPCAgent()
        except socket.error as er:
            time.sleep(2)
            if self.rpcProcess is None or self.rpcProcess.poll() is not None:
                logging.info("RPC agent closed.")
            else:
                self.killRpcAgent()
                raise WWTypes.WPWithinGeneralException("RPC process killed.", er)
        except Exception as e:
            if self.rpcProcess is None or self.rpcProcess.poll() is not None:
                logging.info("RPC agent is closed.")
            else:
                self.killRpcAgent()
                raise WWTypes.WPWithinGeneralException("RPC process killed.", e)
striptls.py 文件源码 项目:striptls 作者: tintinweb 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def on_recv_peek(session, s_in):
                if s_in.socket_ssl:
                    return

                ssl_version = session.protocol.detect_peek_tls(s_in)
                if ssl_version:
                    logger.info("SSL Handshake detected - performing ssl/tls conversion")
                    try:
                        context = Vectors.GENERIC.Intercept.create_ssl_context()
                        context.load_cert_chain(certfile=Vectors._TLS_CERTFILE,
                                                keyfile=Vectors._TLS_KEYFILE)
                        session.inbound.ssl_wrap_socket_with_context(context, server_side=True)
                        logger.debug("%s [client] <> [      ]          SSL handshake done: %s"%(session, session.inbound.socket_ssl.cipher()))
                        session.outbound.ssl_wrap_socket_with_context(context, server_side=False)
                        logger.debug("%s [      ] <> [server]          SSL handshake done: %s"%(session, session.outbound.socket_ssl.cipher()))
                    except Exception, e:
                        logger.warning("Exception - not ssl intercepting outbound: %s"%repr(e))
kubectl_util.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _GetPodNames(pod_name_prefix, job_name=None):
  """Get pod names based on the pod_name_prefix and job_name.

  Args:
    pod_name_prefix: value of 'name-prefix' selector.
    job_name: value of 'job' selector. If None, pod names will be
      selected only based on 'name-prefix' selector.

  Returns:
    List of pod names.
  """
  pod_list_command = [
      _KUBECTL, 'get', 'pods', '-o', 'name', '-a',
      '-l', _GetJobSelector(pod_name_prefix, job_name)]
  logging.info('Command to get pod names: %s', ' '.join(pod_list_command))
  output = subprocess.check_output(pod_list_command, universal_newlines=True)
  pod_names = [name for name in output.strip().split('\n') if name]
  logging.info('Pod names: "%s"', ','.join(pod_names))
  return pod_names
kubectl_util.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def CreatePods(pod_name, yaml_file):
  """Creates pods based on the given kubernetes config.

  Args:
    pod_name: 'name-prefix' selector for the pods.
    yaml_file: kubernetes yaml config.

  Raises:
    TimeoutError: if jobs didn't come up for a long time.
  """
  command = [_KUBECTL, 'create', '--filename=%s' % yaml_file]
  logging.info('Creating pods: %s', subprocess.list2cmdline(command))
  subprocess.check_call(command)

  if not _WaitUntil(100, _GetPodNames, pod_name):
    raise TimeoutError(
        'Timed out waiting for %s pod to come up.' % pod_name)
kubectl_util.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def DeletePods(pod_name, yaml_file):
  """Deletes pods based on the given kubernetes config.

  Args:
    pod_name: 'name-prefix' selector for the pods.
    yaml_file: kubernetes yaml config.

  Raises:
    TimeoutError: if jobs didn't terminate for a long time.
  """
  command = [_KUBECTL, 'delete', '--filename=%s' % yaml_file]
  logging.info('Deleting pods: %s', ' '.join(command))
  subprocess.call(command)

  def CheckPodsAreTerminated():
    return not _GetPodNames(pod_name)
  if not _WaitUntil(100, CheckPodsAreTerminated):
    raise TimeoutError(
        'Timed out waiting for %s pod to terminate.' % pod_name)


问题


面经


文章

微信
公众号

扫码关注公众号