python类Wrapper()的实例源码

misc_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_wrapper_by_name(env, classname):
    """Given an a gym environment possibly wrapped multiple times, returns a wrapper
    of class named classname or raises ValueError if no such wrapper was applied

    Parameters
    ----------
    env: gym.Env of gym.Wrapper
        gym environment
    classname: str
        name of the wrapper

    Returns
    -------
    wrapper: gym.Wrapper
        wrapper named classname
    """
    currentenv = env
    while True:
        if classname == currentenv.class_name():
            return currentenv
        elif isinstance(currentenv, gym.Wrapper):
            currentenv = currentenv.env
        else:
            raise ValueError("Couldn't find wrapper named %s" % classname)
misc_util.py 文件源码 项目:combine-DT-with-NN-in-RL 作者: Burning-Bear 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_wrapper_by_name(env, classname):
    """Given an a gym environment possibly wrapped multiple times, returns a wrapper
    of class named classname or raises ValueError if no such wrapper was applied

    Parameters
    ----------
    env: gym.Env of gym.Wrapper
        gym environment
    classname: str
        name of the wrapper

    Returns
    -------
    wrapper: gym.Wrapper
        wrapper named classname
    """
    currentenv = env
    while True:
        if classname == currentenv.class_name():
            return currentenv
        elif isinstance(currentenv, gym.Wrapper):
            currentenv = currentenv.env
        else:
            raise ValueError("Couldn't find wrapper named %s" % classname)
misc_util.py 文件源码 项目:rl-attack-detection 作者: yenchenlin 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_wrapper_by_name(env, classname):
    """Given an a gym environment possibly wrapped multiple times, returns a wrapper
    of class named classname or raises ValueError if no such wrapper was applied

    Parameters
    ----------
    env: gym.Env of gym.Wrapper
        gym environment
    classname: str
        name of the wrapper

    Returns
    -------
    wrapper: gym.Wrapper
        wrapper named classname
    """
    currentenv = env
    while True:
        if classname == currentenv.class_name():
            return currentenv
        elif isinstance(currentenv, gym.Wrapper):
            currentenv = currentenv.env
        else:
            raise ValueError("Couldn't find wrapper named %s" % classname)
misc_util.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_wrapper_by_name(env, classname):
    """Given an a gym environment possibly wrapped multiple times, returns a wrapper
    of class named classname or raises ValueError if no such wrapper was applied

    Parameters
    ----------
    env: gym.Env of gym.Wrapper
        gym environment
    classname: str
        name of the wrapper

    Returns
    -------
    wrapper: gym.Wrapper
        wrapper named classname
    """
    currentenv = env
    while True:
        if classname == currentenv.class_name():
            return currentenv
        elif isinstance(currentenv, gym.Wrapper):
            currentenv = currentenv.env
        else:
            raise ValueError("Couldn't find wrapper named %s" % classname)
misc.py 文件源码 项目:ml-utils 作者: LinxiFan 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def env_wrapper_by_name(env, classname):
    """Given an a gym environment possibly wrapped multiple times, returns a wrapper
    of class named classname or raises ValueError if no such wrapper was applied

    Parameters
    ----------
    env: gym.Env of gym.Wrapper
        gym environment
    classname: str
        name of the wrapper

    Returns
    -------
    wrapper: gym.Wrapper
        wrapper named classname
    """
    currentenv = env
    while True:
        if classname == currentenv.class_name():
            return currentenv
        elif isinstance(currentenv, gym.Wrapper):
            currentenv = currentenv.env
        else:
            raise ValueError("Couldn't find wrapper named %s" % classname)
observation_space.py 文件源码 项目:gym-doom 作者: ppaquette 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def SetResolution(target_resolution):

    class SetResolutionWrapper(gym.Wrapper):
        """
            Doom wrapper to change screen resolution
        """
        def __init__(self, env):
            super(SetResolutionWrapper, self).__init__(env)
            if target_resolution not in resolutions:
                raise gym.error.Error('Error - The specified resolution "{}" is not supported by Vizdoom.'.format(target_resolution))
            parts = target_resolution.lower().split('x')
            width = int(parts[0])
            height = int(parts[1])
            screen_res = __import__('doom_py')
            screen_res = getattr(screen_res, 'ScreenResolution')
            screen_res = getattr(screen_res, 'RES_{}X{}'.format(width, height))
            self.screen_width, self.screen_height, self.unwrapped.screen_resolution = width, height, screen_res
            self.unwrapped.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.screen_height, self.screen_width, 3))
            self.observation_space = self.unwrapped.observation_space

    return SetResolutionWrapper
misc_util.py 文件源码 项目:rl-teacher 作者: nottombrown 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_wrapper_by_name(env, classname):
    """Given an a gym environment possibly wrapped multiple times, returns a wrapper
    of class named classname or raises ValueError if no such wrapper was applied

    Parameters
    ----------
    env: gym.Env of gym.Wrapper
        gym environment
    classname: str
        name of the wrapper

    Returns
    -------
    wrapper: gym.Wrapper
        wrapper named classname
    """
    currentenv = env
    while True:
        if classname == currentenv.class_name():
            return currentenv
        elif isinstance(currentenv, gym.Wrapper):
            currentenv = currentenv.env
        else:
            raise ValueError("Couldn't find wrapper named %s" % classname)
atari_wrappers_deprecated.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, env, k):
        """Stack k last frames.

        Returns lazy array, which is much more memory efficient.

        See Also
        --------
        baselines.common.atari_wrappers.LazyFrames
        """
        gym.Wrapper.__init__(self, env)
        self.k = k
        self.frames = deque([], maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k))
atari_wrappers_deprecated.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, env):
        gym.Wrapper.__init__(self, env)
        self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
dqn_utils.py 文件源码 项目:deep-q-learning 作者: alvinwan 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_wrapper_by_name(env, classname):
    currentenv = env
    while True:
        if classname in currentenv.__class__.__name__:
            return currentenv
        elif isinstance(env, gym.Wrapper):
            currentenv = currentenv.env
        else:
            raise ValueError("Couldn't find wrapper named %s"%classname)
wrapper.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 58 收藏 0 点赞 0 评论 0
def __init__(self, env, noop_max=30):
        """Sample initial states by taking random number of no-ops on reset.
        No-op is assumed to be action 0.
        """
        gym.Wrapper.__init__(self, env)
        self.noop_max = noop_max
        self.override_num_noops = None
        self.noop_action = 0
        assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
wrapper.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, env):
        """Take action on reset for environments that are fixed until firing."""
        gym.Wrapper.__init__(self, env)
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3
wrapper.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, env):
        """Make end-of-life == end-of-episode, but only reset on true game over.
        Done by DeepMind for the DQN and co. since it helps value estimation.
        """
        gym.Wrapper.__init__(self, env)
        self.lives = 0
        self.was_real_done = True
wrapper.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, env, skip=4):
        """Return only every `skip`-th frame"""
        gym.Wrapper.__init__(self, env)
        # most recent raw observations (for max pooling across time steps)
        self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype='uint8')
        self._skip = skip
wrapper.py 文件源码 项目:pytorch.rl.learning 作者: moskomule 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, env, k):
        """Stack k last frames.
        Returns lazy array, which is much more memory efficient.
        See Also
        --------
        baselines.common.atari_wrappers.LazyFrames
        """
        gym.Wrapper.__init__(self, env)
        self.k = k
        self.frames = deque([], maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k))
atari_wrappers_deprecated.py 文件源码 项目:combine-DT-with-NN-in-RL 作者: Burning-Bear 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, env, k):
        """Stack k last frames.

        Returns lazy array, which is much more memory efficient.

        See Also
        --------
        baselines.common.atari_wrappers.LazyFrames
        """
        gym.Wrapper.__init__(self, env)
        self.k = k
        self.frames = deque([], maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k))
atari_wrappers_deprecated.py 文件源码 项目:combine-DT-with-NN-in-RL 作者: Burning-Bear 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, env):
        gym.Wrapper.__init__(self, env)
        self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
atari_wrappers_deprecated.py 文件源码 项目:combine-DT-with-NN-in-RL 作者: Burning-Bear 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, env, k):
        """Stack k last frames.

        Returns lazy array, which is much more memory efficient.

        See Also
        --------
        baselines.common.atari_wrappers.LazyFrames
        """
        gym.Wrapper.__init__(self, env)
        self.k = k
        self.frames = deque([], maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k))
atari_wrappers_deprecated.py 文件源码 项目:combine-DT-with-NN-in-RL 作者: Burning-Bear 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, env):
        gym.Wrapper.__init__(self, env)
        self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
atari_wrappers_deprecated.py 文件源码 项目:rl-attack-detection 作者: yenchenlin 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, env, k):
        """Stack k last frames.

        Returns lazy array, which is much more memory efficient.

        See Also
        --------
        baselines.common.atari_wrappers.LazyFrames
        """
        gym.Wrapper.__init__(self, env)
        self.k = k
        self.frames = deque([], maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k))
atari_wrappers_deprecated.py 文件源码 项目:rl-attack-detection 作者: yenchenlin 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, env):
        gym.Wrapper.__init__(self, env)
        self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
atari_wrappers.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self, env, noop_max=30):
        """Sample initial states by taking random number of no-ops on reset.
        No-op is assumed to be action 0.
        """
        gym.Wrapper.__init__(self, env)
        self.noop_max = noop_max
        self.override_num_noops = None
        self.noop_action = 0
        assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
atari_wrappers.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, env):
        """Take action on reset for environments that are fixed until firing."""
        gym.Wrapper.__init__(self, env)
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3
atari_wrappers.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, env):
        """Make end-of-life == end-of-episode, but only reset on true game over.
        Done by DeepMind for the DQN and co. since it helps value estimation.
        """
        gym.Wrapper.__init__(self, env)
        self.lives = 0
        self.was_real_done  = True
atari_wrappers.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, env, skip=4):
        """Return only every `skip`-th frame"""
        gym.Wrapper.__init__(self, env)
        # most recent raw observations (for max pooling across time steps)
        self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype='uint8')
        self._skip       = skip
dqn_utils.py 文件源码 项目:rl_algorithms 作者: DanielTakeshi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_wrapper_by_name(env, classname):
    currentenv = env
    while True:
        if classname in currentenv.__class__.__name__:
            return currentenv
        elif isinstance(env, gym.Wrapper):
            currentenv = currentenv.env
        else:
            raise ValueError("Couldn't find wrapper named %s"%classname)
wrappers.py 文件源码 项目:chi 作者: rmst 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def list_wrappers(env: Union[Env, gym.Wrapper]):
    while isinstance(env, gym.Wrapper):
        yield env
        env = env.env
wrappers.py 文件源码 项目:chi 作者: rmst 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def set_meta(self, meta):
        self.meta = meta
        w = get_wrapper(self.env, Wrapper)
        if w:
            w.set_meta(meta)
core.py 文件源码 项目:universe 作者: openai 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, env):
        super(Wrapper, self).__init__(env)
        if not env.metadata.get('runtime.vectorized'):
            if self.autovectorize:
                # Circular dependency :(
                from universe import wrappers
                env = wrappers.Vectorize(env)
            else:
                raise error.Error('This wrapper can only wrap vectorized envs (i.e. where env.metadata["runtime.vectorized"] = True), not {}. Set "self.autovectorize = True" to automatically add a Vectorize wrapper.'.format(env))

        self.env = env
frame_skipping.py 文件源码 项目:gym 作者: openai 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def SkipWrapper(repeat_count):
    class SkipWrapper(gym.Wrapper):
        """
            Generic common frame skipping wrapper
            Will perform action for `x` additional steps
        """
        def __init__(self, env):
            super(SkipWrapper, self).__init__(env)
            self.repeat_count = repeat_count
            self.stepcount = 0

        def _step(self, action):
            done = False
            total_reward = 0
            current_step = 0
            while current_step < (self.repeat_count + 1) and not done:
                self.stepcount += 1
                obs, reward, done, info = self.env.step(action)
                total_reward += reward
                current_step += 1
            if 'skip.stepcount' in info:
                raise gym.error.Error('Key "skip.stepcount" already in info. Make sure you are not stacking ' \
                                      'the SkipWrapper wrappers.')
            info['skip.stepcount'] = self.stepcount
            return obs, total_reward, done, info

        def _reset(self):
            self.stepcount = 0
            return self.env.reset()

    return SkipWrapper


问题


面经


文章

微信
公众号

扫码关注公众号