diff --git a/examples/pybullet/examples/rendertest_sync.py b/examples/pybullet/examples/rendertest_sync.py index b7dfd2f3ed..ac738ddced 100644 --- a/examples/pybullet/examples/rendertest_sync.py +++ b/examples/pybullet/examples/rendertest_sync.py @@ -98,7 +98,7 @@ def step(self, action): renderer=p.ER_BULLET_HARDWARE_OPENGL) #renderer=pybullet.ER_TINY_RENDERER) self._observation = img_arr[2] - return np.array(self._observation), 0, 0, {} + return np.array(self._observation), 0, 0, False, {} def seed(self, seed=None): pass diff --git a/examples/pybullet/gym/pybullet_envs/ARS/ars.py b/examples/pybullet/gym/pybullet_envs/ARS/ars.py index 51bc776db7..97af460f08 100644 --- a/examples/pybullet/gym/pybullet_envs/ARS/ars.py +++ b/examples/pybullet/gym/pybullet_envs/ARS/ars.py @@ -73,7 +73,7 @@ def ExploreWorker(rank, childPipe, envname, args): normalizer.observe(state) state = normalizer.normalize(state) action = policy.evaluate(state, delta, direction, hp) - state, reward, done, _ = env.step(action) + state, reward, done, _, _ = env.step(action) reward = max(min(reward, 1), -1) sum_rewards += reward num_plays += 1 @@ -154,7 +154,7 @@ def explore(env, normalizer, policy, direction, delta, hp): normalizer.observe(state) state = normalizer.normalize(state) action = policy.evaluate(state, delta, direction, hp) - state, reward, done, _ = env.step(action) + state, reward, done, _, _ = env.step(action) reward = max(min(reward, 1), -1) sum_rewards += reward num_plays += 1 diff --git a/examples/pybullet/gym/pybullet_envs/__init__.py b/examples/pybullet/gym/pybullet_envs/__init__.py index 031efef74f..27fa32ff5b 100644 --- a/examples/pybullet/gym/pybullet_envs/__init__.py +++ b/examples/pybullet/gym/pybullet_envs/__init__.py @@ -3,7 +3,7 @@ def register(id, *args, **kvargs): - if id in registry.env_specs: + if id in registry.keys(): return else: return gym.envs.registration.register(id, *args, **kvargs) @@ -16,6 +16,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.deep_mimic.gym_env:HumanoidDeepMimicBackflipBulletEnv', max_episode_steps=2000, reward_threshold=2000.0, + order_enforce=False, ) register( @@ -23,6 +24,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.deep_mimic.gym_env:HumanoidDeepMimicWalkBulletEnv', max_episode_steps=2000, reward_threshold=2000.0, + order_enforce=False, ) register( @@ -30,6 +32,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.bullet:CartPoleBulletEnv', max_episode_steps=200, reward_threshold=190.0, + order_enforce=False, ) register( @@ -37,6 +40,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.bullet:CartPoleContinuousBulletEnv', max_episode_steps=200, reward_threshold=190.0, + order_enforce=False, ) @@ -45,6 +49,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.bullet:MinitaurBulletEnv', max_episode_steps=1000, reward_threshold=15.0, + order_enforce=False, ) register( @@ -52,6 +57,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.bullet:MinitaurBulletDuckEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -59,6 +65,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.minitaur.envs:MinitaurExtendedEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) @@ -67,6 +74,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.minitaur.envs:MinitaurReactiveEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -74,6 +82,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.minitaur.envs:MinitaurBallGymEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -81,6 +90,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.minitaur.envs:MinitaurTrottingEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -88,6 +98,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.minitaur.envs:MinitaurStandGymEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -95,6 +106,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.minitaur.envs:MinitaurAlternatingLegsEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -102,6 +114,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.minitaur.envs:MinitaurFourLegStandEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -109,6 +122,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.bullet:RacecarGymEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -116,6 +130,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.bullet:RacecarZEDGymEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -123,6 +138,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.bullet:KukaGymEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -130,6 +146,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.bullet:KukaCamGymEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -137,6 +154,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.bullet:KukaDiverseObjectEnv', max_episode_steps=1000, reward_threshold=5.0, + order_enforce=False, ) register( @@ -144,6 +162,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.gym_pendulum_envs:InvertedPendulumBulletEnv', max_episode_steps=1000, reward_threshold=950.0, + order_enforce=False, ) register( @@ -151,6 +170,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.gym_pendulum_envs:InvertedDoublePendulumBulletEnv', max_episode_steps=1000, reward_threshold=9100.0, + order_enforce=False, ) register( @@ -158,6 +178,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.gym_pendulum_envs:InvertedPendulumSwingupBulletEnv', max_episode_steps=1000, reward_threshold=800.0, + order_enforce=False, ) register( @@ -165,6 +186,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.gym_manipulator_envs:ReacherBulletEnv', max_episode_steps=150, reward_threshold=18.0, + order_enforce=False, ) register( @@ -172,6 +194,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.gym_manipulator_envs:PusherBulletEnv', max_episode_steps=150, reward_threshold=18.0, + order_enforce=False, ) register( @@ -179,6 +202,7 @@ def register(id, *args, **kvargs): entry_point='pybullet_envs.gym_manipulator_envs:ThrowerBulletEnv', max_episode_steps=100, reward_threshold=18.0, + order_enforce=False, ) #register( @@ -186,44 +210,60 @@ def register(id, *args, **kvargs): # entry_point='pybullet_envs.gym_manipulator_envs:StrikerBulletEnv', # max_episode_steps=100, # reward_threshold=18.0, +# order_enforce=False, #) register(id='Walker2DBulletEnv-v0', entry_point='pybullet_envs.gym_locomotion_envs:Walker2DBulletEnv', max_episode_steps=1000, - reward_threshold=2500.0) + reward_threshold=2500.0, + order_enforce=False, +) register(id='HalfCheetahBulletEnv-v0', entry_point='pybullet_envs.gym_locomotion_envs:HalfCheetahBulletEnv', max_episode_steps=1000, - reward_threshold=3000.0) + reward_threshold=3000.0, + order_enforce=False, +) register(id='AntBulletEnv-v0', entry_point='pybullet_envs.gym_locomotion_envs:AntBulletEnv', max_episode_steps=1000, - reward_threshold=2500.0) + reward_threshold=2500.0, + order_enforce=False, +) register(id='HopperBulletEnv-v0', entry_point='pybullet_envs.gym_locomotion_envs:HopperBulletEnv', max_episode_steps=1000, - reward_threshold=2500.0) + reward_threshold=2500.0, + order_enforce=False, +) register(id='HumanoidBulletEnv-v0', entry_point='pybullet_envs.gym_locomotion_envs:HumanoidBulletEnv', - max_episode_steps=1000) + max_episode_steps=1000, + order_enforce=False, +) register(id='HumanoidFlagrunBulletEnv-v0', entry_point='pybullet_envs.gym_locomotion_envs:HumanoidFlagrunBulletEnv', max_episode_steps=1000, - reward_threshold=2000.0) + reward_threshold=2000.0, + order_enforce=False, +) register(id='HumanoidFlagrunHarderBulletEnv-v0', entry_point='pybullet_envs.gym_locomotion_envs:HumanoidFlagrunHarderBulletEnv', - max_episode_steps=1000) + max_episode_steps=1000, + order_enforce=False, +) #register( # id='AtlasBulletEnv-v0', # entry_point='pybullet_envs.gym_locomotion_envs:AtlasBulletEnv', -# max_episode_steps=1000 +# max_episode_steps=1000, +# order_enforce=False, # ) diff --git a/examples/pybullet/gym/pybullet_envs/agents/configs.py b/examples/pybullet/gym/pybullet_envs/agents/configs.py index 7e78486d12..5ead78315c 100644 --- a/examples/pybullet/gym/pybullet_envs/agents/configs.py +++ b/examples/pybullet/gym/pybullet_envs/agents/configs.py @@ -118,7 +118,7 @@ def pybullet_racecar(): """Configuration for Bullet MIT Racecar task.""" locals().update(default()) # Environment - env = 'RacecarBulletEnv-v0' #functools.partial(racecarGymEnv.RacecarGymEnv, isDiscrete=False, renders=True) + env = 'RacecarBulletEnv-v0' #functools.partial(racecarGymEnv.RacecarGymEnv, isDiscrete=False, render_mode=True) max_length = 10 steps = 1e7 # 10M return locals() @@ -142,7 +142,7 @@ def pybullet_minitaur(): motor_overheat_protection=True, pd_control_enabled=True, env_randomizer=randomizer, - render=False) + render_mode=False) max_length = 1000 steps = 3e7 # 30M return locals() @@ -157,7 +157,7 @@ def pybullet_duck_minitaur(): motor_overheat_protection=True, pd_control_enabled=True, env_randomizer=randomizer, - render=False) + render_mode=False) max_length = 1000 steps = 3e7 # 30M return locals() diff --git a/examples/pybullet/gym/pybullet_envs/agents/tools/batch_env.py b/examples/pybullet/gym/pybullet_envs/agents/tools/batch_env.py index ef8b7430ec..a3c9f29f5f 100644 --- a/examples/pybullet/gym/pybullet_envs/agents/tools/batch_env.py +++ b/examples/pybullet/gym/pybullet_envs/agents/tools/batch_env.py @@ -86,12 +86,12 @@ def step(self, actions): else: transitions = [env.step(action, blocking=False) for env, action in zip(self._envs, actions)] transitions = [transition() for transition in transitions] - observs, rewards, dones, infos = zip(*transitions) + observs, rewards, dones, _, infos = zip(*transitions) observ = np.stack(observs) reward = np.stack(rewards) done = np.stack(dones) info = tuple(infos) - return observ, reward, done, info + return observ, reward, done, False, info def reset(self, indices=None): """Reset the environment and convert the resulting observation. @@ -110,7 +110,7 @@ def reset(self, indices=None): observs = [self._envs[index].reset(blocking=False) for index in indices] observs = [observ() for observ in observs] observ = np.stack(observs) - return observ + return observ, {} def close(self): """Send close messages to the external process and join them.""" diff --git a/examples/pybullet/gym/pybullet_envs/agents/tools/in_graph_batch_env.py b/examples/pybullet/gym/pybullet_envs/agents/tools/in_graph_batch_env.py index 8c7b73163d..b9cfb097ec 100644 --- a/examples/pybullet/gym/pybullet_envs/agents/tools/in_graph_batch_env.py +++ b/examples/pybullet/gym/pybullet_envs/agents/tools/in_graph_batch_env.py @@ -120,7 +120,7 @@ def reset(self, indices=None): tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done) ]): - return tf.identity(observ) + return tf.identity(observ), {} @property def observ(self): diff --git a/examples/pybullet/gym/pybullet_envs/agents/tools/in_graph_env.py b/examples/pybullet/gym/pybullet_envs/agents/tools/in_graph_env.py index e5083bf08d..fac61cfeeb 100644 --- a/examples/pybullet/gym/pybullet_envs/agents/tools/in_graph_env.py +++ b/examples/pybullet/gym/pybullet_envs/agents/tools/in_graph_env.py @@ -102,7 +102,7 @@ def reset(self): [self._observ.assign(observ), self._reward.assign(0), self._done.assign(False)]): - return tf.identity(observ) + return tf.identity(observ), {} @property def observ(self): diff --git a/examples/pybullet/gym/pybullet_envs/agents/tools/mock_environment.py b/examples/pybullet/gym/pybullet_envs/agents/tools/mock_environment.py index 9db14d56cd..9b931d23e7 100644 --- a/examples/pybullet/gym/pybullet_envs/agents/tools/mock_environment.py +++ b/examples/pybullet/gym/pybullet_envs/agents/tools/mock_environment.py @@ -70,7 +70,7 @@ def step(self, action): reward = self._current_reward() done = self.steps[-1] >= self.durations[-1] info = {} - return observ, reward, done, info + return observ, reward, done, False, info def reset(self): duration = self._random.randint(self._min_duration, self._max_duration + 1) diff --git a/examples/pybullet/gym/pybullet_envs/agents/tools/wrappers.py b/examples/pybullet/gym/pybullet_envs/agents/tools/wrappers.py index a8d184e61d..58026a3f7e 100644 --- a/examples/pybullet/gym/pybullet_envs/agents/tools/wrappers.py +++ b/examples/pybullet/gym/pybullet_envs/agents/tools/wrappers.py @@ -45,9 +45,9 @@ def step(self, action): if self._done: observ, reward, done, info = self._env.reset(), 0.0, False, {} else: - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) self._done = done - return observ, reward, done, info + return observ, reward, done, False, info def reset(self): self._done = False @@ -69,10 +69,10 @@ def step(self, action): total_reward = 0 current_step = 0 while current_step < self._amount and not done: - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) total_reward += reward current_step += 1 - return observ, total_reward, done, info + return observ, total_reward, done, False, info class RandomStart(object): @@ -90,11 +90,11 @@ def reset(self): random_steps = np.random.randint(0, self._max_steps) for _ in range(random_steps): action = self._env.action_space.sample() - observ, unused_reward, done, unused_info = self._env.step(action) + observ, unused_reward, done, _, unused_info = self._env.step(action) if done: tf.logging.warning('Episode ended during random start.') return self.reset() - return observ + return observ, {} class FrameHistory(object): @@ -138,17 +138,17 @@ def observation_space(self): return gym.spaces.Box(low, high) def step(self, action): - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) self._step += 1 self._buffer[self._step % self._capacity] = observ observ = self._select_frames() - return observ, reward, done, info + return observ, reward, done, False, info def reset(self): observ = self._env.reset() self._buffer = np.repeat(observ[None, ...], self._capacity, 0) self._step = 0 - return self._select_frames() + return self._select_frames(), {} def _select_frames(self): indices = [(self._step - index) % self._capacity for index in self._past_indices] @@ -176,15 +176,15 @@ def observation_space(self): return gym.spaces.Box(low, high) def step(self, action): - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) delta = observ - self._last self._last = observ - return delta, reward, done, info + return delta, reward, done, False, info def reset(self): observ = self._env.reset() self._last = observ - return observ + return observ, {} class RangeNormalize(object): @@ -225,16 +225,16 @@ def action_space(self): def step(self, action): if self._should_normalize_action: action = self._denormalize_action(action) - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) if self._should_normalize_observ: observ = self._normalize_observ(observ) - return observ, reward, done, info + return observ, reward, done, False, info def reset(self): observ = self._env.reset() if self._should_normalize_observ: observ = self._normalize_observ(observ) - return observ + return observ, {} def _denormalize_action(self, action): min_ = self._env.action_space.low @@ -286,12 +286,12 @@ def __getattr__(self, name): def step(self, action): if self._step is None: raise RuntimeError('Must reset environment.') - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) self._step += 1 if self._step >= self._duration: done = True self._step = None - return observ, reward, done, info + return observ, reward, done, False, info def reset(self): self._step = 0 @@ -506,10 +506,10 @@ def step(self, action): Returns: Converted observation, converted reward, done flag, and info object. """ - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) observ = self._convert_observ(observ) reward = self._convert_reward(reward) - return observ, reward, done, info + return observ, reward, done, False, info def reset(self): """Reset the environment and convert the resulting observation. diff --git a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_kuka_diverse_object_grasping.py b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_kuka_diverse_object_grasping.py index 509e33d1f9..f77e515bc4 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_kuka_diverse_object_grasping.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_kuka_diverse_object_grasping.py @@ -37,11 +37,11 @@ def sample_action(self, obs, explore_prob): def main(): - env = KukaDiverseObjectEnv(renders=True, isDiscrete=False) + env = KukaDiverseObjectEnv(render_mode=True, isDiscrete=False) policy = ContinuousDownwardBiasPolicy() while True: - obs, done = env.reset(), False + obs, _, done = env.reset(), False print("===================================") print("obs") print(obs) @@ -51,7 +51,7 @@ def main(): act = policy.sample_action(obs, .1) print("Action") print(act) - obs, rew, done, _ = env.step([0, 0, 0, 0, 0]) + obs, rew, done, _, _ = env.step([0, 0, 0, 0, 0]) episode_rew += rew print("Episode reward", episode_rew) diff --git a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_kuka_grasping.py b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_kuka_grasping.py index 422a7a621a..2078c32f1f 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_kuka_grasping.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_kuka_grasping.py @@ -12,18 +12,18 @@ def main(): - env = KukaGymEnv(renders=True, isDiscrete=True) + env = KukaGymEnv(render_mode=True, isDiscrete=True) act = deepq.load("kuka_model.pkl") print(act) while True: - obs, done = env.reset(), False + obs, _, done = env.reset(), False print("===================================") print("obs") print(obs) episode_rew = 0 while not done: env.render() - obs, rew, done, _ = env.step(act(obs[None])[0]) + obs, rew, done, _, _ = env.step(act(obs[None])[0]) episode_rew += rew print("Episode reward", episode_rew) diff --git a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_cartpole.py b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_cartpole.py index 341029dd4e..9ba58ed6a4 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_cartpole.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_cartpole.py @@ -16,7 +16,7 @@ def main(): act = deepq.load("cartpole_model.pkl") while True: - obs, done = env.reset(), False + obs, _, done = env.reset(), False print("obs") print(obs) print("type(obs)") @@ -28,7 +28,7 @@ def main(): o = obs[None] aa = act(o) a = aa[0] - obs, rew, done, _ = env.step(a) + obs, rew, done, _, _ = env.step(a) episode_rew += rew time.sleep(1. / 240.) print("Episode reward", episode_rew) diff --git a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_racecar.py b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_racecar.py index 64936dd9a7..feab0f54df 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_racecar.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_racecar.py @@ -12,18 +12,18 @@ def main(): - env = RacecarGymEnv(renders=True, isDiscrete=True) + env = RacecarGymEnv(render_mode=True, isDiscrete=True) act = deepq.load("racecar_model.pkl") print(act) while True: - obs, done = env.reset(), False + obs, _, done = env.reset(), False print("===================================") print("obs") print(obs) episode_rew = 0 while not done: env.render() - obs, rew, done, _ = env.step(act(obs[None])[0]) + obs, rew, done, _, _ = env.step(act(obs[None])[0]) episode_rew += rew print("Episode reward", episode_rew) diff --git a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_zed_racecar.py b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_zed_racecar.py index a8bcaa9879..44cfc3e11a 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_zed_racecar.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/enjoy_pybullet_zed_racecar.py @@ -12,18 +12,18 @@ def main(): - env = RacecarZEDGymEnv(renders=True) + env = RacecarZEDGymEnv(render_mode=True) act = deepq.load("racecar_zed_model.pkl") print(act) while True: - obs, done = env.reset(), False + obs, _, done = env.reset(), False print("===================================") print("obs") print(obs) episode_rew = 0 while not done: env.render() - obs, rew, done, _ = env.step(act(obs[None])[0]) + obs, rew, done, _, _ = env.step(act(obs[None])[0]) episode_rew += rew print("Episode reward", episode_rew) diff --git a/examples/pybullet/gym/pybullet_envs/baselines/train_kuka_cam_grasping.py b/examples/pybullet/gym/pybullet_envs/baselines/train_kuka_cam_grasping.py index 804be59f6a..437a4dd744 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/train_kuka_cam_grasping.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/train_kuka_cam_grasping.py @@ -24,7 +24,7 @@ def callback(lcl, glb): def main(): - env = KukaCamGymEnv(renders=False, isDiscrete=True) + env = KukaCamGymEnv(render_mode=False, isDiscrete=True) model = deepq.models.cnn_to_mlp(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], hiddens=[256], dueling=False) diff --git a/examples/pybullet/gym/pybullet_envs/baselines/train_kuka_grasping.py b/examples/pybullet/gym/pybullet_envs/baselines/train_kuka_grasping.py index 10225a1942..7330edab9d 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/train_kuka_grasping.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/train_kuka_grasping.py @@ -24,7 +24,7 @@ def callback(lcl, glb): def main(): - env = KukaGymEnv(renders=False, isDiscrete=True) + env = KukaGymEnv(render_mode=False, isDiscrete=True) model = deepq.models.mlp([64]) act = deepq.learn(env, q_func=model, diff --git a/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_cartpole.py b/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_cartpole.py index 9c369a3a2d..15537606f2 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_cartpole.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_cartpole.py @@ -18,7 +18,7 @@ def callback(lcl, glb): def main(): - env = CartPoleBulletEnv(renders=False) + env = CartPoleBulletEnv(render_mode=False) model = deepq.models.mlp([64]) act = deepq.learn(env, q_func=model, diff --git a/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_racecar.py b/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_racecar.py index 9b06bcfb2f..5169988ace 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_racecar.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_racecar.py @@ -22,7 +22,7 @@ def callback(lcl, glb): def main(): - env = RacecarGymEnv(renders=False, isDiscrete=True) + env = RacecarGymEnv(render_mode=False, isDiscrete=True) model = deepq.models.mlp([64]) act = deepq.learn(env, q_func=model, diff --git a/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_zed_racecar.py b/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_zed_racecar.py index 825197b79a..afff56b4de 100644 --- a/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_zed_racecar.py +++ b/examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_zed_racecar.py @@ -22,7 +22,7 @@ def callback(lcl, glb): def main(): - env = RacecarZEDGymEnv(renders=False, isDiscrete=True) + env = RacecarZEDGymEnv(render_mode=False, isDiscrete=True) model = deepq.models.cnn_to_mlp(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], hiddens=[256], dueling=False) diff --git a/examples/pybullet/gym/pybullet_envs/bullet/cartpole_bullet.py b/examples/pybullet/gym/pybullet_envs/bullet/cartpole_bullet.py index 399b7f455a..764b932fb5 100644 --- a/examples/pybullet/gym/pybullet_envs/bullet/cartpole_bullet.py +++ b/examples/pybullet/gym/pybullet_envs/bullet/cartpole_bullet.py @@ -26,9 +26,9 @@ class CartPoleBulletEnv(gym.Env): metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50} - def __init__(self, renders=False, discrete_actions=True): + def __init__(self, render_mode=False, discrete_actions=True): # start the bullet physics server - self._renders = renders + self._renders = render_mode self._discrete_actions = discrete_actions self._render_height = 200 self._render_width = 320 @@ -84,7 +84,7 @@ def step(self, action): done = bool(done) reward = 1.0 #print("state=",self.state) - return np.array(self.state), reward, done, {} + return np.array(self.state), reward, done, False, {} def reset(self): # print("-----------reset simulation---------------") @@ -115,7 +115,7 @@ def reset(self): #print("randstate=",randstate) self.state = p.getJointState(self.cartpole, 1)[0:2] + p.getJointState(self.cartpole, 0)[0:2] #print("self.state=", self.state) - return np.array(self.state) + return np.array(self.state), {} def render(self, mode='human', close=False): if mode == "human": @@ -163,6 +163,6 @@ def close(self): class CartPoleContinuousBulletEnv(CartPoleBulletEnv): metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50} - def __init__(self, renders=False): + def __init__(self, render_mode=False): # start the bullet physics server - CartPoleBulletEnv.__init__(self, renders, discrete_actions=False) + CartPoleBulletEnv.__init__(self, render_mode, discrete_actions=False) diff --git a/examples/pybullet/gym/pybullet_envs/bullet/kukaCamGymEnv.py b/examples/pybullet/gym/pybullet_envs/bullet/kukaCamGymEnv.py index bb715ec41a..9ef06f314a 100644 --- a/examples/pybullet/gym/pybullet_envs/bullet/kukaCamGymEnv.py +++ b/examples/pybullet/gym/pybullet_envs/bullet/kukaCamGymEnv.py @@ -28,7 +28,7 @@ def __init__(self, urdfRoot=pybullet_data.getDataPath(), actionRepeat=1, isEnableSelfCollision=True, - renders=False, + render_mode=False, isDiscrete=False): self._timeStep = 1. / 240. self._urdfRoot = urdfRoot @@ -36,7 +36,7 @@ def __init__(self, self._isEnableSelfCollision = isEnableSelfCollision self._observation = [] self._envStepCounter = 0 - self._renders = renders + self._renders = render_mode self._width = 341 self._height = 256 self._isDiscrete = isDiscrete @@ -92,7 +92,7 @@ def reset(self): self._envStepCounter = 0 p.stepSimulation() self._observation = self.getExtendedObservation() - return np.array(self._observation) + return np.array(self._observation), {} def __del__(self): p.disconnect() @@ -177,7 +177,7 @@ def step2(self, action): reward = self._reward() #print("len=%r" % len(self._observation)) - return np.array(self._observation), reward, done, {} + return np.array(self._observation), reward, done, False, {} def render(self, mode='human', close=False): if mode != "rgb_array": diff --git a/examples/pybullet/gym/pybullet_envs/bullet/kukaGymEnv.py b/examples/pybullet/gym/pybullet_envs/bullet/kukaGymEnv.py index f47a0dd13d..272da0fcf9 100644 --- a/examples/pybullet/gym/pybullet_envs/bullet/kukaGymEnv.py +++ b/examples/pybullet/gym/pybullet_envs/bullet/kukaGymEnv.py @@ -28,7 +28,7 @@ def __init__(self, urdfRoot=pybullet_data.getDataPath(), actionRepeat=1, isEnableSelfCollision=True, - renders=False, + render_mode=False, isDiscrete=False, maxSteps=1000): #print("KukaGymEnv __init__") @@ -39,7 +39,7 @@ def __init__(self, self._isEnableSelfCollision = isEnableSelfCollision self._observation = [] self._envStepCounter = 0 - self._renders = renders + self._renders = render_mode self._maxSteps = maxSteps self.terminated = 0 self._cam_dist = 1.3 @@ -95,7 +95,7 @@ def reset(self): self._envStepCounter = 0 p.stepSimulation() self._observation = self.getExtendedObservation() - return np.array(self._observation) + return np.array(self._observation), {} def __del__(self): p.disconnect() @@ -184,7 +184,7 @@ def step2(self, action): #print("len=%r" % len(self._observation)) - return np.array(self._observation), reward, done, {} + return np.array(self._observation), reward, done, False, {} def render(self, mode="rgb_array", close=False): if mode != "rgb_array": diff --git a/examples/pybullet/gym/pybullet_envs/bullet/kuka_diverse_object_gym_env.py b/examples/pybullet/gym/pybullet_envs/bullet/kuka_diverse_object_gym_env.py index 294a5984a3..0b3fdf90d0 100644 --- a/examples/pybullet/gym/pybullet_envs/bullet/kuka_diverse_object_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/bullet/kuka_diverse_object_gym_env.py @@ -25,7 +25,7 @@ def __init__(self, urdfRoot=pybullet_data.getDataPath(), actionRepeat=80, isEnableSelfCollision=True, - renders=False, + render_mode=False, isDiscrete=False, maxSteps=8, dv=0.06, @@ -42,7 +42,7 @@ def __init__(self, urdfRoot: The diretory from which to load environment URDF's. actionRepeat: The number of simulation steps to apply for each action. isEnableSelfCollision: If true, enable self-collision. - renders: If true, render the bullet GUI. + render_mode: If true, render the bullet GUI. isDiscrete: If true, the action space is discrete. If False, the action space is continuous. maxSteps: The maximum number of actions per episode. @@ -68,7 +68,7 @@ def __init__(self, self._isEnableSelfCollision = isEnableSelfCollision self._observation = [] self._envStepCounter = 0 - self._renders = renders + self._renders = render_mode self._maxSteps = maxSteps self.terminated = 0 self._cam_dist = 1.3 diff --git a/examples/pybullet/gym/pybullet_envs/bullet/minitaur_duck_gym_env.py b/examples/pybullet/gym/pybullet_envs/bullet/minitaur_duck_gym_env.py index 6c2d9dabba..9d5a07ae3d 100644 --- a/examples/pybullet/gym/pybullet_envs/bullet/minitaur_duck_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/bullet/minitaur_duck_gym_env.py @@ -70,7 +70,7 @@ def __init__( motor_overheat_protection=True, hard_reset=True, on_rack=False, - render=False, + render_mode=False, kd_for_pd_controllers=0.3, env_randomizer=minitaur_env_randomizer.MinitaurEnvRandomizer()): """Initialize the minitaur gym environment. @@ -104,7 +104,7 @@ def __init__( on_rack: Whether to place the minitaur on rack. This is only used to debug the walking gait. In this mode, the minitaur's base is hanged midair so that its walking gait is clearer to visualize. - render: Whether to render the simulation. + render_mode: Whether to render the simulation. kd_for_pd_controllers: kd value for the pd controllers of the motors env_randomizer: An EnvRandomizer to randomize the physical properties during reset(). @@ -117,7 +117,7 @@ def __init__( self._motor_velocity_limit = motor_velocity_limit self._observation = [] self._env_step_counter = 0 - self._is_render = render + self._is_render = render_mode self._last_base_position = [0, 0, 0] self._distance_weight = distance_weight self._energy_weight = energy_weight @@ -213,7 +213,7 @@ def reset(self): if self._pd_control_enabled or self._accurate_motor_model_enabled: self.minitaur.ApplyAction([math.pi / 2] * 8) self._pybullet_client.stepSimulation() - return self._noisy_observation() + return self._noisy_observation(), {} def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) @@ -263,7 +263,7 @@ def step(self, action): self._env_step_counter += 1 reward = self._reward() done = self._termination() - return np.array(self._noisy_observation()), reward, done, {} + return np.array(self._noisy_observation()), reward, done, False, {} def render(self, mode="rgb_array", close=False): if mode != "rgb_array": diff --git a/examples/pybullet/gym/pybullet_envs/bullet/minitaur_gym_env.py b/examples/pybullet/gym/pybullet_envs/bullet/minitaur_gym_env.py index 1770fa5e3d..25c19294ef 100644 --- a/examples/pybullet/gym/pybullet_envs/bullet/minitaur_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/bullet/minitaur_gym_env.py @@ -66,7 +66,7 @@ def __init__( motor_overheat_protection=True, hard_reset=True, on_rack=False, - render=False, + render_mode=False, kd_for_pd_controllers=0.3, env_randomizer=minitaur_env_randomizer.MinitaurEnvRandomizer()): """Initialize the minitaur gym environment. @@ -100,7 +100,7 @@ def __init__( on_rack: Whether to place the minitaur on rack. This is only used to debug the walking gait. In this mode, the minitaur's base is hanged midair so that its walking gait is clearer to visualize. - render: Whether to render the simulation. + render_mode: Whether to render the simulation. kd_for_pd_controllers: kd value for the pd controllers of the motors env_randomizer: An EnvRandomizer to randomize the physical properties during reset(). @@ -113,7 +113,7 @@ def __init__( self._motor_velocity_limit = motor_velocity_limit self._observation = [] self._env_step_counter = 0 - self._is_render = render + self._is_render = render_mode self._last_base_position = [0, 0, 0] self._distance_weight = distance_weight self._energy_weight = energy_weight @@ -208,7 +208,7 @@ def reset(self): if self._pd_control_enabled or self._accurate_motor_model_enabled: self.minitaur.ApplyAction([math.pi / 2] * 8) self._pybullet_client.stepSimulation() - return self._noisy_observation() + return self._noisy_observation(), {} def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) @@ -267,7 +267,7 @@ def step(self, action): self._env_step_counter += 1 reward = self._reward() done = self._termination() - return np.array(self._noisy_observation()), reward, done, {} + return np.array(self._noisy_observation()), reward, done, False, {} def render(self, mode="rgb_array", close=False): if mode != "rgb_array": diff --git a/examples/pybullet/gym/pybullet_envs/bullet/racecarGymEnv.py b/examples/pybullet/gym/pybullet_envs/bullet/racecarGymEnv.py index 445ae7d652..fee8a36cc8 100644 --- a/examples/pybullet/gym/pybullet_envs/bullet/racecarGymEnv.py +++ b/examples/pybullet/gym/pybullet_envs/bullet/racecarGymEnv.py @@ -28,7 +28,7 @@ def __init__(self, actionRepeat=50, isEnableSelfCollision=True, isDiscrete=False, - renders=False): + render_mode=False): print("init") self._timeStep = 0.01 self._urdfRoot = urdfRoot @@ -37,7 +37,7 @@ def __init__(self, self._observation = [] self._ballUniqueId = -1 self._envStepCounter = 0 - self._renders = renders + self._renders = render_mode self._isDiscrete = isDiscrete if self._renders: self._p = bc.BulletClient(connection_mode=pybullet.GUI) @@ -88,7 +88,7 @@ def reset(self): for i in range(100): self._p.stepSimulation() self._observation = self.getExtendedObservation() - return np.array(self._observation) + return np.array(self._observation), {} def __del__(self): self._p = 0 @@ -135,7 +135,7 @@ def step(self, action): done = self._termination() #print("len=%r" % len(self._observation)) - return np.array(self._observation), reward, done, {} + return np.array(self._observation), reward, done, False, {} def render(self, mode='human', close=False): if mode != "rgb_array": diff --git a/examples/pybullet/gym/pybullet_envs/bullet/racecarZEDGymEnv.py b/examples/pybullet/gym/pybullet_envs/bullet/racecarZEDGymEnv.py index ebd67f156c..a9c753fe7c 100644 --- a/examples/pybullet/gym/pybullet_envs/bullet/racecarZEDGymEnv.py +++ b/examples/pybullet/gym/pybullet_envs/bullet/racecarZEDGymEnv.py @@ -28,7 +28,7 @@ def __init__(self, actionRepeat=10, isEnableSelfCollision=True, isDiscrete=False, - renders=True): + render_mode=True): print("init") self._timeStep = 0.01 self._urdfRoot = urdfRoot @@ -36,7 +36,7 @@ def __init__(self, self._isEnableSelfCollision = isEnableSelfCollision self._ballUniqueId = -1 self._envStepCounter = 0 - self._renders = renders + self._renders = render_mode self._width = 100 self._height = 10 @@ -94,7 +94,7 @@ def reset(self): for i in range(100): self._p.stepSimulation() self._observation = self.getExtendedObservation() - return np.array(self._observation) + return np.array(self._observation), {} def __del__(self): self._p = 0 @@ -165,7 +165,7 @@ def step(self, action): done = self._termination() #print("len=%r" % len(self._observation)) - return np.array(self._observation), reward, done, {} + return np.array(self._observation), reward, done, False, {} def render(self, mode='human', close=False): if mode != "rgb_array": diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/gym_env/deep_mimic_env.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/gym_env/deep_mimic_env.py index 26366a079b..54700bc5ce 100644 --- a/examples/pybullet/gym/pybullet_envs/deep_mimic/gym_env/deep_mimic_env.py +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/gym_env/deep_mimic_env.py @@ -31,7 +31,7 @@ class HumanoidDeepBulletEnv(gym.Env): """Base Gym environment for DeepMimic.""" metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50} - def __init__(self, renders=False, arg_file='', test_mode=False, + def __init__(self, render_mode=False, arg_file='', test_mode=False, time_step=1./240, rescale_actions=True, rescale_observations=True): @@ -53,7 +53,7 @@ def __init__(self, renders=False, arg_file='', test_mode=False, self._p = None self._time_step = time_step self._internal_env = None - self._renders = renders + self._renders = render_mode self._discrete_actions = False self._arg_file = arg_file self._render_height = 400 @@ -168,7 +168,7 @@ def step(self, action): done = self._internal_env.is_episode_end() info = {} - return state, reward, done, info + return state, reward, done, False, info def reset(self): # use the initialization strategy @@ -198,7 +198,7 @@ def reset(self): mean = -self._state_offset std = 1./self._state_scale state = (state - mean) / (std + 1e-8) - return state + return state, {} def render(self, mode='human', close=False): if mode == "human": @@ -257,21 +257,21 @@ def close(self): class HumanoidDeepMimicBackflipBulletEnv(HumanoidDeepBulletEnv): metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50} - def __init__(self, renders=False): + def __init__(self, render_mode=False): # start the bullet physics server - HumanoidDeepBulletEnv.__init__(self, renders, arg_file="run_humanoid3d_backflip_args.txt") + HumanoidDeepBulletEnv.__init__(self, render_mode, arg_file="run_humanoid3d_backflip_args.txt") class HumanoidDeepMimicWalkBulletEnv(HumanoidDeepBulletEnv): metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50} - def __init__(self, renders=False): + def __init__(self, render_mode=False): # start the bullet physics server - HumanoidDeepBulletEnv.__init__(self, renders, arg_file="run_humanoid3d_walk_args.txt") + HumanoidDeepBulletEnv.__init__(self, render_mode, arg_file="run_humanoid3d_walk_args.txt") class CartPoleContinuousBulletEnv5(HumanoidDeepBulletEnv): metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50} - def __init__(self, renders=False): + def __init__(self, render_mode=False): # start the bullet physics server - HumanoidDeepBulletEnv.__init__(self, renders, arg_file="") + HumanoidDeepBulletEnv.__init__(self, render_mode, arg_file="") diff --git a/examples/pybullet/gym/pybullet_envs/deep_mimic/gym_env/deepmimic_test.py b/examples/pybullet/gym/pybullet_envs/deep_mimic/gym_env/deepmimic_test.py index 46c875aba9..d92bacc35f 100644 --- a/examples/pybullet/gym/pybullet_envs/deep_mimic/gym_env/deepmimic_test.py +++ b/examples/pybullet/gym/pybullet_envs/deep_mimic/gym_env/deepmimic_test.py @@ -27,7 +27,7 @@ # env.reset() #action=[0]*36 action = env.env.action_space.sample() - state, reward, done, info = env.step(action) + state, reward, done, _, info = env.step(action) #env.render(mode='rgb_array') if done: env.reset() diff --git a/examples/pybullet/gym/pybullet_envs/env_bases.py b/examples/pybullet/gym/pybullet_envs/env_bases.py index ac7c2c067a..58ab809be2 100644 --- a/examples/pybullet/gym/pybullet_envs/env_bases.py +++ b/examples/pybullet/gym/pybullet_envs/env_bases.py @@ -22,12 +22,12 @@ class MJCFBaseBulletEnv(gym.Env): metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 60} - def __init__(self, robot, render=False): + def __init__(self, robot, render_mode=False): self.scene = None self.physicsClientId = -1 self.ownsPhysicsClient = 0 self.camera = Camera(self) - self.isRender = render + self.isRender = render_mode self.robot = robot self.seed() self._cam_dist = 3 @@ -86,7 +86,7 @@ def reset(self): dump = 0 s = self.robot.reset(self._p) self.potential = self.robot.calc_potential() - return s + return s, {} def camera_adjust(self): pass diff --git a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_AntBulletEnv_v0_2017may.py b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_AntBulletEnv_v0_2017may.py index 671cf0e8e6..d9d9f2d7e2 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_AntBulletEnv_v0_2017may.py +++ b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_AntBulletEnv_v0_2017may.py @@ -42,12 +42,12 @@ def main(): frame = 0 score = 0 restart_delay = 0 - obs = env.reset() + obs, {} = env.reset() while 1: time.sleep(1. / 60.) a = pi.act(obs) - obs, r, done, _ = env.step(a) + obs, r, done, _, _ = env.step(a) #print("reward") #print(r) score += r diff --git a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HalfCheetahBulletEnv_v0_2017may.py b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HalfCheetahBulletEnv_v0_2017may.py index be19716779..949cc001a6 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HalfCheetahBulletEnv_v0_2017may.py +++ b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HalfCheetahBulletEnv_v0_2017may.py @@ -42,11 +42,11 @@ def main(): frame = 0 score = 0 restart_delay = 0 - obs = env.reset() + obs, _ = env.reset() while 1: time.sleep(1. / 60.) a = pi.act(obs) - obs, r, done, _ = env.step(a) + obs, r, done, _, _ = env.step(a) score += r frame += 1 still_open = env.render(mode="human") diff --git a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HopperBulletEnv_v0_2017may.py b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HopperBulletEnv_v0_2017may.py index f341aaf003..26cd8b49ee 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HopperBulletEnv_v0_2017may.py +++ b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HopperBulletEnv_v0_2017may.py @@ -43,12 +43,12 @@ def main(): score = 0 restart_delay = 0 #disable rendering during reset, makes loading much faster - obs = env.reset() + obs, _ = env.reset() while 1: time.sleep(1. / 60.) a = pi.act(obs) - obs, r, done, _ = env.step(a) + obs, r, done, _, _ = env.step(a) score += r frame += 1 still_open = env.render("human") diff --git a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HumanoidBulletEnv_v0_2017may.py b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HumanoidBulletEnv_v0_2017may.py index 9b04c44a28..b4cd7a8eac 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HumanoidBulletEnv_v0_2017may.py +++ b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HumanoidBulletEnv_v0_2017may.py @@ -40,12 +40,12 @@ def main(): frame = 0 score = 0 restart_delay = 0 - obs = env.reset() + obs, _ = env.reset() while 1: time.sleep(1. / 60.) a = pi.act(obs) - obs, r, done, _ = env.step(a) + obs, r, done, _, _ = env.step(a) score += r frame += 1 still_open = env.render(mode="human") diff --git a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HumanoidFlagrunHarderBulletEnv_v1_2017jul.py b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HumanoidFlagrunHarderBulletEnv_v1_2017jul.py index cad0ca566a..81360b078b 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HumanoidFlagrunHarderBulletEnv_v1_2017jul.py +++ b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HumanoidFlagrunHarderBulletEnv_v1_2017jul.py @@ -42,14 +42,14 @@ def demo_run(): frame = 0 score = 0 restart_delay = 0 - obs = env.reset() + obs, _ = env.reset() while 1: if (gui): time.sleep(1. / 60) a = pi.act(obs) - obs, r, done, _ = env.step(a) + obs, r, done, _, _ = env.step(a) score += r frame += 1 diff --git a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedDoublePendulumBulletEnv_v0_2017may.py b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedDoublePendulumBulletEnv_v0_2017may.py index e0227fe1da..5323994894 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedDoublePendulumBulletEnv_v0_2017may.py +++ b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedDoublePendulumBulletEnv_v0_2017may.py @@ -40,12 +40,12 @@ def main(): frame = 0 score = 0 restart_delay = 0 - obs = env.reset() + obs, _ = env.reset() while 1: time.sleep(1. / 60.) a = pi.act(obs) - obs, r, done, _ = env.step(a) + obs, r, done, _, _ = env.step(a) score += r frame += 1 still_open = env.render(mode="human") diff --git a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedPendulumBulletEnv_v0_2017may.py b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedPendulumBulletEnv_v0_2017may.py index 82ed973298..2dc000882a 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedPendulumBulletEnv_v0_2017may.py +++ b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedPendulumBulletEnv_v0_2017may.py @@ -40,12 +40,12 @@ def main(): frame = 0 score = 0 restart_delay = 0 - obs = env.reset() + obs, _ = env.reset() print("frame") while 1: time.sleep(1. / 60.) a = pi.act(obs) - obs, r, done, _ = env.step(a) + obs, r, done, _, _ = env.step(a) score += r frame += 1 still_open = env.render(mode="human") diff --git a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedPendulumSwingupBulletEnv_v0_2017may.py b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedPendulumSwingupBulletEnv_v0_2017may.py index fc612f393c..ac2da0f8cd 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedPendulumSwingupBulletEnv_v0_2017may.py +++ b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_InvertedPendulumSwingupBulletEnv_v0_2017may.py @@ -45,7 +45,7 @@ def main(): while 1: time.sleep(1. / 60.) a = pi.act(obs) - obs, r, done, _ = env.step(a) + obs, r, done, _, _ = env.step(a) score += r frame += 1 still_open = env.render(mode="human") diff --git a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_Walker2DBulletEnv_v0_2017may.py b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_Walker2DBulletEnv_v0_2017may.py index de71b9770d..20171c7e43 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_Walker2DBulletEnv_v0_2017may.py +++ b/examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_Walker2DBulletEnv_v0_2017may.py @@ -41,12 +41,12 @@ def main(): frame = 0 score = 0 restart_delay = 0 - obs = env.reset() + obs, _ = env.reset() while 1: time.sleep(1. / 60.) a = pi.act(obs) - obs, r, done, _ = env.step(a) + obs, r, done, _, _ = env.step(a) score += r frame += 1 diff --git a/examples/pybullet/gym/pybullet_envs/examples/kukaCamGymEnvTest.py b/examples/pybullet/gym/pybullet_envs/examples/kukaCamGymEnvTest.py index 6cf9909cb2..e9a69edd4b 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/kukaCamGymEnvTest.py +++ b/examples/pybullet/gym/pybullet_envs/examples/kukaCamGymEnvTest.py @@ -10,7 +10,7 @@ def main(): - environment = KukaCamGymEnv(renders=True, isDiscrete=False) + environment = KukaCamGymEnv(render_mode=True, isDiscrete=False) motorsIds = [] #motorsIds.append(environment._p.addUserDebugParameter("posX",0.4,0.75,0.537)) @@ -33,7 +33,7 @@ def main(): for motorId in motorsIds: action.append(environment._p.readUserDebugParameter(motorId)) - state, reward, done, info = environment.step(action) + state, reward, done, _, info = environment.step(action) obs = environment.getExtendedObservation() diff --git a/examples/pybullet/gym/pybullet_envs/examples/kukaGymEnvTest.py b/examples/pybullet/gym/pybullet_envs/examples/kukaGymEnvTest.py index 818bdd0ab8..3091c77bc2 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/kukaGymEnvTest.py +++ b/examples/pybullet/gym/pybullet_envs/examples/kukaGymEnvTest.py @@ -10,7 +10,7 @@ def main(): - environment = KukaGymEnv(renders=True, isDiscrete=False, maxSteps=10000000) + environment = KukaGymEnv(render_mode=True, isDiscrete=False, maxSteps=10000000) motorsIds = [] #motorsIds.append(environment._p.addUserDebugParameter("posX",0.4,0.75,0.537)) diff --git a/examples/pybullet/gym/pybullet_envs/examples/kukaGymEnvTest2.py b/examples/pybullet/gym/pybullet_envs/examples/kukaGymEnvTest2.py index d94702cf00..4bcb831dc7 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/kukaGymEnvTest2.py +++ b/examples/pybullet/gym/pybullet_envs/examples/kukaGymEnvTest2.py @@ -10,7 +10,7 @@ def main(): - environment = KukaGymEnv(renders=True, isDiscrete=False, maxSteps=10000000) + environment = KukaGymEnv(render_mode=True, isDiscrete=False, maxSteps=10000000) motorsIds = [] #motorsIds.append(environment._p.addUserDebugParameter("posX",-1,1,0)) @@ -31,7 +31,7 @@ def main(): for motorId in motorsIds: action.append(environment._p.readUserDebugParameter(motorId)) - state, reward, done, info = environment.step(action) + state, reward, done, _, info = environment.step(action) obs = environment.getExtendedObservation() diff --git a/examples/pybullet/gym/pybullet_envs/examples/minitaur_gym_env_example.py b/examples/pybullet/gym/pybullet_envs/examples/minitaur_gym_env_example.py index 06337da409..ac22150aa5 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/minitaur_gym_env_example.py +++ b/examples/pybullet/gym/pybullet_envs/examples/minitaur_gym_env_example.py @@ -18,7 +18,7 @@ def ResetPoseExample(): """An example that the minitaur stands still using the reset pose.""" steps = 1000 randomizer = (minitaur_env_randomizer.MinitaurEnvRandomizer()) - environment = minitaur_gym_env.MinitaurBulletEnv(render=True, + environment = minitaur_gym_env.MinitaurBulletEnv(render_mode=True, leg_model_enabled=False, motor_velocity_limit=np.inf, pd_control_enabled=True, @@ -28,7 +28,7 @@ def ResetPoseExample(): hard_reset=False) action = [math.pi / 2] * 8 for _ in range(steps): - _, _, done, _ = environment.step(action) + _, _, done, _, _ = environment.step(action) if done: break environment.reset() @@ -41,7 +41,7 @@ def MotorOverheatExample(): torques. The overheat protection will be triggered in ~1 sec. """ - environment = minitaur_gym_env.MinitaurBulletEnv(render=True, + environment = minitaur_gym_env.MinitaurBulletEnv(render_mode=True, leg_model_enabled=False, motor_velocity_limit=np.inf, motor_overheat_protection=True, @@ -63,7 +63,7 @@ def MotorOverheatExample(): current_row = [t] current_row.extend(action) - observation, _, _, _ = environment.step(action) + observation, _, _, _, _ = environment.step(action) current_row.extend(observation.tolist()) actions_and_observations.append(current_row) environment.reset() @@ -76,7 +76,7 @@ def SineStandExample(): periodically in both simulation and experiment. We compare the measured motor trajectories, torques and gains. """ - environment = minitaur_gym_env.MinitaurBulletEnv(render=True, + environment = minitaur_gym_env.MinitaurBulletEnv(render_mode=True, leg_model_enabled=False, motor_velocity_limit=np.inf, motor_overheat_protection=True, @@ -99,7 +99,7 @@ def SineStandExample(): action = [math.sin(speed * t) * amplitude + math.pi / 2] * 8 current_row.extend(action) - observation, _, _, _ = environment.step(action) + observation, _, _, _, _ = environment.step(action) current_row.extend(observation.tolist()) actions_and_observations.append(current_row) @@ -109,7 +109,7 @@ def SineStandExample(): def SinePolicyExample(): """An example of minitaur walking with a sine gait.""" randomizer = (minitaur_env_randomizer.MinitaurEnvRandomizer()) - environment = minitaur_gym_env.MinitaurBulletEnv(render=True, + environment = minitaur_gym_env.MinitaurBulletEnv(render_mode=True, motor_velocity_limit=np.inf, pd_control_enabled=True, hard_reset=False, @@ -141,7 +141,7 @@ def SinePolicyExample(): a3 = math.sin(t * speed) * amplitude2 a4 = math.sin(t * speed + math.pi) * amplitude2 action = [a1, a2, a2, a1, a3, a4, a4, a3] - _, reward, done, _ = environment.step(action) + _, reward, done, _, _ = environment.step(action) sum_reward += reward if done: break diff --git a/examples/pybullet/gym/pybullet_envs/examples/racecarGymEnvTest.py b/examples/pybullet/gym/pybullet_envs/examples/racecarGymEnvTest.py index 3e4488e304..67e2fbee8e 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/racecarGymEnvTest.py +++ b/examples/pybullet/gym/pybullet_envs/examples/racecarGymEnvTest.py @@ -10,7 +10,7 @@ def main(): - environment = RacecarGymEnv(renders=True, isDiscrete=isDiscrete) + environment = RacecarGymEnv(render_mode=True, isDiscrete=isDiscrete) environment.reset() targetVelocitySlider = environment._p.addUserDebugParameter("wheelVelocity", -1, 1, 0) @@ -36,7 +36,7 @@ def main(): action = discreteAction else: action = [targetVelocity, steeringAngle] - state, reward, done, info = environment.step(action) + state, reward, done, _, info = environment.step(action) obs = environment.getExtendedObservation() print("obs") print(obs) diff --git a/examples/pybullet/gym/pybullet_envs/examples/racecarZEDGymEnvTest.py b/examples/pybullet/gym/pybullet_envs/examples/racecarZEDGymEnvTest.py index 4d529c4765..91de4e4c19 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/racecarZEDGymEnvTest.py +++ b/examples/pybullet/gym/pybullet_envs/examples/racecarZEDGymEnvTest.py @@ -10,7 +10,7 @@ def main(): - environment = RacecarZEDGymEnv(renders=True, isDiscrete=isDiscrete) + environment = RacecarZEDGymEnv(render_mode=True, isDiscrete=isDiscrete) targetVelocitySlider = environment._p.addUserDebugParameter("wheelVelocity", -1, 1, 0) steeringSlider = environment._p.addUserDebugParameter("steering", -1, 1, 0) @@ -36,7 +36,7 @@ def main(): else: action = [targetVelocity, steeringAngle] - state, reward, done, info = environment.step(action) + state, reward, done, _, info = environment.step(action) obs = environment.getExtendedObservation() #print("obs") #print(obs) diff --git a/examples/pybullet/gym/pybullet_envs/examples/testEnv.py b/examples/pybullet/gym/pybullet_envs/examples/testEnv.py index 6c550fe4fa..97e08655f5 100644 --- a/examples/pybullet/gym/pybullet_envs/examples/testEnv.py +++ b/examples/pybullet/gym/pybullet_envs/examples/testEnv.py @@ -49,7 +49,7 @@ def test(args): print("action=") print(action) for i in range(args.steps): - obs, rewards, done, _ = env.step(action) + obs, rewards, done, _, _ = env.step(action) if (args.rgb): print(env.render(mode="rgb_array")) print("obs=") diff --git a/examples/pybullet/gym/pybullet_envs/gym_locomotion_envs.py b/examples/pybullet/gym/pybullet_envs/gym_locomotion_envs.py index ba22fa61c9..3d58be2d2c 100644 --- a/examples/pybullet/gym/pybullet_envs/gym_locomotion_envs.py +++ b/examples/pybullet/gym/pybullet_envs/gym_locomotion_envs.py @@ -7,13 +7,13 @@ class WalkerBaseBulletEnv(MJCFBaseBulletEnv): - def __init__(self, robot, render=False): + def __init__(self, robot, render_mode=False): # print("WalkerBase::__init__ start") self.camera_x = 0 self.walk_target_x = 1e3 # kilometer away self.walk_target_y = 0 self.stateId = -1 - MJCFBaseBulletEnv.__init__(self, robot, render) + MJCFBaseBulletEnv.__init__(self, robot, render_mode) def create_single_player_scene(self, bullet_client): @@ -123,7 +123,7 @@ def step(self, a): self.HUD(state, a, done) self.reward += sum(self.rewards) - return state, sum(self.rewards), bool(done), {} + return state, sum(self.rewards), bool(done), False, {} def camera_adjust(self): x, y, z = self.robot.body_real_xyz @@ -134,23 +134,23 @@ def camera_adjust(self): class HopperBulletEnv(WalkerBaseBulletEnv): - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = Hopper() - WalkerBaseBulletEnv.__init__(self, self.robot, render) + WalkerBaseBulletEnv.__init__(self, self.robot, render_mode) class Walker2DBulletEnv(WalkerBaseBulletEnv): - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = Walker2D() - WalkerBaseBulletEnv.__init__(self, self.robot, render) + WalkerBaseBulletEnv.__init__(self, self.robot, render_mode) class HalfCheetahBulletEnv(WalkerBaseBulletEnv): - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = HalfCheetah() - WalkerBaseBulletEnv.__init__(self, self.robot, render) + WalkerBaseBulletEnv.__init__(self, self.robot, render_mode) def _isDone(self): return False @@ -158,19 +158,19 @@ def _isDone(self): class AntBulletEnv(WalkerBaseBulletEnv): - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = Ant() - WalkerBaseBulletEnv.__init__(self, self.robot, render) + WalkerBaseBulletEnv.__init__(self, self.robot, render_mode) class HumanoidBulletEnv(WalkerBaseBulletEnv): - def __init__(self, robot=None, render=False): + def __init__(self, robot=None, render_mode=False): if robot is None: self.robot = Humanoid() else: self.robot = robot - WalkerBaseBulletEnv.__init__(self, self.robot, render) + WalkerBaseBulletEnv.__init__(self, self.robot, render_mode) self.electricity_cost = 4.25 * WalkerBaseBulletEnv.electricity_cost self.stall_torque_cost = 4.25 * WalkerBaseBulletEnv.stall_torque_cost @@ -178,9 +178,9 @@ def __init__(self, robot=None, render=False): class HumanoidFlagrunBulletEnv(HumanoidBulletEnv): random_yaw = True - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = HumanoidFlagrun() - HumanoidBulletEnv.__init__(self, self.robot, render) + HumanoidBulletEnv.__init__(self, self.robot, render_mode) def create_single_player_scene(self, bullet_client): s = HumanoidBulletEnv.create_single_player_scene(self, bullet_client) @@ -191,10 +191,10 @@ def create_single_player_scene(self, bullet_client): class HumanoidFlagrunHarderBulletEnv(HumanoidBulletEnv): random_lean = True # can fall on start - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = HumanoidFlagrunHarder() self.electricity_cost /= 4 # don't care that much about electricity, just stand up! - HumanoidBulletEnv.__init__(self, self.robot, render) + HumanoidBulletEnv.__init__(self, self.robot, render_mode) def create_single_player_scene(self, bullet_client): s = HumanoidBulletEnv.create_single_player_scene(self, bullet_client) diff --git a/examples/pybullet/gym/pybullet_envs/gym_manipulator_envs.py b/examples/pybullet/gym/pybullet_envs/gym_manipulator_envs.py index 9fef7e615a..de17614563 100644 --- a/examples/pybullet/gym/pybullet_envs/gym_manipulator_envs.py +++ b/examples/pybullet/gym/pybullet_envs/gym_manipulator_envs.py @@ -6,9 +6,9 @@ class ReacherBulletEnv(MJCFBaseBulletEnv): - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = Reacher() - MJCFBaseBulletEnv.__init__(self, self.robot, render) + MJCFBaseBulletEnv.__init__(self, self.robot, render_mode) def create_single_player_scene(self, bullet_client): return SingleRobotEmptyScene(bullet_client, gravity=0.0, timestep=0.0165, frame_skip=1) @@ -35,7 +35,7 @@ def step(self, a): float(stuck_joint_cost) ] self.HUD(state, a, False) - return state, sum(self.rewards), False, {} + return state, sum(self.rewards), False, False, {} def camera_adjust(self): x, y, z = self.robot.fingertip.pose().xyz() @@ -46,9 +46,9 @@ def camera_adjust(self): class PusherBulletEnv(MJCFBaseBulletEnv): - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = Pusher() - MJCFBaseBulletEnv.__init__(self, self.robot, render) + MJCFBaseBulletEnv.__init__(self, self.robot, render_mode) def create_single_player_scene(self, bullet_client): return SingleRobotEmptyScene(bullet_client, gravity=9.81, timestep=0.0020, frame_skip=5) @@ -91,7 +91,7 @@ def step(self, a): float(stuck_joint_cost) ] self.HUD(state, a, False) - return state, sum(self.rewards), False, {} + return state, sum(self.rewards), False, False, {} def calc_potential(self): return -100 * np.linalg.norm(self.to_target_vec) @@ -105,9 +105,9 @@ def camera_adjust(self): class StrikerBulletEnv(MJCFBaseBulletEnv): - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = Striker() - MJCFBaseBulletEnv.__init__(self, self.robot, render) + MJCFBaseBulletEnv.__init__(self, self.robot, render_mode) self._striked = False self._min_strike_dist = np.inf self.strike_threshold = 0.1 @@ -172,7 +172,7 @@ def step(self, a): float(stuck_joint_cost), 3 * reward_dist, 0.1 * reward_ctrl, 0.5 * reward_near ] self.HUD(state, a, False) - return state, sum(self.rewards), False, {} + return state, sum(self.rewards), False, False, {} def calc_potential(self): return -100 * np.linalg.norm(self.to_target_vec) @@ -186,9 +186,9 @@ def camera_adjust(self): class ThrowerBulletEnv(MJCFBaseBulletEnv): - def __init__(self, render=False): + def __init__(self, render_mode=False): self.robot = Thrower() - MJCFBaseBulletEnv.__init__(self, self.robot, render) + MJCFBaseBulletEnv.__init__(self, self.robot, render_mode) def create_single_player_scene(self, bullet_client): return SingleRobotEmptyScene(bullet_client, gravity=0.0, timestep=0.0020, frame_skip=5) @@ -245,7 +245,7 @@ def step(self, a): float(stuck_joint_cost), reward_dist, 0.002 * reward_ctrl ] self.HUD(state, a, False) - return state, sum(self.rewards), False, {} + return state, sum(self.rewards), False, False, {} def camera_adjust(self): x, y, z = self.robot.fingertip.pose().xyz() diff --git a/examples/pybullet/gym/pybullet_envs/gym_pendulum_envs.py b/examples/pybullet/gym/pybullet_envs/gym_pendulum_envs.py index 310874de82..0845ecbfae 100644 --- a/examples/pybullet/gym/pybullet_envs/gym_pendulum_envs.py +++ b/examples/pybullet/gym/pybullet_envs/gym_pendulum_envs.py @@ -40,7 +40,7 @@ def step(self, a): done = np.abs(self.robot.theta) > .2 self.rewards = [float(reward)] self.HUD(state, a, done) - return state, sum(self.rewards), done, {} + return state, sum(self.rewards), done, False, {} def camera_adjust(self): self.camera.move_and_look_at(0, 1.2, 1.0, 0, 0, 0.5) @@ -86,7 +86,7 @@ def step(self, a): done = self.robot.pos_y + 0.3 <= 1 self.rewards = [float(alive_bonus), float(-dist_penalty), float(-vel_penalty)] self.HUD(state, a, done) - return state, sum(self.rewards), done, {} + return state, sum(self.rewards), done, False, {} def camera_adjust(self): self.camera.move_and_look_at(0, 1.2, 1.2, 0, 0, 0.5) diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/actuatornet/minitaur_raibert_controller_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/actuatornet/minitaur_raibert_controller_example.py index 39108d003e..069754a515 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/actuatornet/minitaur_raibert_controller_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/actuatornet/minitaur_raibert_controller_example.py @@ -40,7 +40,7 @@ def main(argv): motor_kd=FLAGS.motor_kd, remove_default_joint_damping=True, leg_model_enabled=False, - render=True, + render_mode=True, on_rack=False, accurate_motor_model_enabled=True, log_path=FLAGS.log_path) diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/locomotion_controller_in_scenario_set_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/locomotion_controller_in_scenario_set_example.py index 71fefa7b56..ef32a959d2 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/locomotion_controller_in_scenario_set_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/locomotion_controller_in_scenario_set_example.py @@ -162,7 +162,7 @@ def _test_stability(max_time=5, render=False, test_generator=None): controller.update() hybrid_action = controller.get_action() - _, _, done, _ = env.step(hybrid_action) + _, _, done, _, _ = env.step(hybrid_action) if done: break print(f"Scene name: flat ground. Random push: {FLAGS.add_random_push}. " diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/batch_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/batch_env.py index a2971bc4d1..caab5e8a88 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/batch_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/batch_env.py @@ -87,12 +87,12 @@ def step(self, action): else: transitions = [env.step(action, blocking=False) for env, action in zip(self._envs, actions)] transitions = [transition() for transition in transitions] - observs, rewards, dones, infos = zip(*transitions) + observs, rewards, dones, _, infos = zip(*transitions) observ = np.stack(observs) reward = np.stack(rewards) done = np.stack(dones) info = tuple(infos) - return observ, reward, done, info + return observ, reward, done, _, info def reset(self, indices=None): """Reset the environment and convert the resulting observation. @@ -111,7 +111,7 @@ def reset(self, indices=None): observs = [self._envs[index].reset(blocking=False) for index in indices] observs = [observ() for observ in observs] observ = np.stack(observs) - return observ + return observ, {} def close(self): """Send close messages to the external process and join them.""" diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/mock_environment.py b/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/mock_environment.py index 9db14d56cd..9b931d23e7 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/mock_environment.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/mock_environment.py @@ -70,7 +70,7 @@ def step(self, action): reward = self._current_reward() done = self.steps[-1] >= self.durations[-1] info = {} - return observ, reward, done, info + return observ, reward, done, False, info def reset(self): duration = self._random.randint(self._min_duration, self._max_duration + 1) diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/wrappers.py b/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/wrappers.py index 829c48a97f..09c595d04d 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/wrappers.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/agents/tools/wrappers.py @@ -41,11 +41,11 @@ def __getattr__(self, name): def step(self, action): if self._done: - observ, reward, done, info = self._env.reset(), 0.0, False, {} + observ, reward, done, _, info = self._env.reset(), 0.0, False, {} else: - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) self._done = done - return observ, reward, done, info + return observ, reward, done, _, info def reset(self): self._done = False @@ -67,10 +67,10 @@ def step(self, action): total_reward = 0 current_step = 0 while current_step < self._amount and not done: - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) total_reward += reward current_step += 1 - return observ, total_reward, done, info + return observ, total_reward, done, _, info class RandomStart(object): @@ -88,11 +88,11 @@ def reset(self): random_steps = np.random.randint(0, self._max_steps) for _ in range(random_steps): action = self._env.action_space.sample() - observ, unused_reward, done, unused_info = self._env.step(action) + observ, unused_reward, done, _, unused_info = self._env.step(action) if done: tf.logging.warning('Episode ended during random start.') return self.reset() - return observ + return observ, {} class FrameHistory(object): @@ -136,17 +136,17 @@ def observation_space(self): return gym.spaces.Box(low, high) def step(self, action): - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) self._step += 1 self._buffer[self._step % self._capacity] = observ observ = self._select_frames() - return observ, reward, done, info + return observ, reward, done, _, info def reset(self): observ = self._env.reset() self._buffer = np.repeat(observ[None, ...], self._capacity, 0) self._step = 0 - return self._select_frames() + return self._select_frames(), {} def _select_frames(self): indices = [(self._step - index) % self._capacity for index in self._past_indices] @@ -174,15 +174,15 @@ def observation_space(self): return gym.spaces.Box(low, high) def step(self, action): - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) delta = observ - self._last self._last = observ - return delta, reward, done, info + return delta, reward, done, _, info def reset(self): observ = self._env.reset() self._last = observ - return observ + return observ, {} class RangeNormalize(object): @@ -223,16 +223,16 @@ def action_space(self): def step(self, action): if self._should_normalize_action: action = self._denormalize_action(action) - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) if self._should_normalize_observ: observ = self._normalize_observ(observ) - return observ, reward, done, info + return observ, reward, done, _, info def reset(self): observ = self._env.reset() if self._should_normalize_observ: observ = self._normalize_observ(observ) - return observ + return observ, {} def _denormalize_action(self, action): min_ = self._env.action_space.low @@ -284,12 +284,12 @@ def __getattr__(self, name): def step(self, action): if self._step is None: raise RuntimeError('Must reset environment.') - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) self._step += 1 if self._step >= self._duration: done = True self._step = None - return observ, reward, done, info + return observ, reward, done, _, info def reset(self): self._step = 0 @@ -497,10 +497,10 @@ def step(self, action): Returns: Converted observation, converted reward, done flag, and info object. """ - observ, reward, done, info = self._env.step(action) + observ, reward, done, _, info = self._env.step(action) observ = self._convert_observ(observ) reward = self._convert_reward(reward) - return observ, reward, done, info + return observ, reward, done, _, info def reset(self): """Reset the environment and convert the resulting observation. @@ -510,7 +510,7 @@ def reset(self): """ observ = self._env.reset() observ = self._convert_observ(observ) - return observ + return observ, {} def _convert_observ(self, observ): """Convert the observation to 32 bits. diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env.py index fa177b7b55..102290146f 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env.py @@ -43,7 +43,7 @@ def __init__(self, motor_kp=1.0, motor_kd=0.02, remove_default_joint_damping=False, - render=False, + render_mode=False, num_steps_to_log=1000, env_randomizer=None, log_path=None): @@ -65,7 +65,7 @@ def __init__(self, motor_kp: The P gain of the motor. motor_kd: The D gain of the motor. remove_default_joint_damping: Whether to remove the default joint damping. - render: Whether to render the simulation. + render_mode: Whether to render the simulation. num_steps_to_log: The max number of control steps in one episode. If the number of steps is over num_steps_to_log, the environment will still be running, but only first num_steps_to_log will be recorded in logging. @@ -89,7 +89,7 @@ def __init__(self, control_latency=control_latency, pd_latency=pd_latency, on_rack=on_rack, - render=render, + render_mode=render_mode, num_steps_to_log=num_steps_to_log, env_randomizer=env_randomizer, log_path=log_path, diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env_example.py index da308e2fe0..f14653890d 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_alternating_legs_env_example.py @@ -76,7 +76,7 @@ def hand_tuned_balance_example(log_path=None): randomizer = randomizer_lib.MinitaurAlternatingLegsEnvRandomizer() environment = minitaur_alternating_legs_env.MinitaurAlternatingLegsEnv( urdf_version=minitaur_gym_env.DERPY_V0_URDF_VERSION, - render=True, + render_mode=True, num_steps_to_log=steps, pd_latency=0.002, control_latency=0.02, @@ -93,7 +93,7 @@ def hand_tuned_balance_example(log_path=None): # Sleep to prevent serial buffer overflow on microcontroller. time.sleep(0.002) action = hand_tuned_agent(observation, environment.minitaur.GetTimeSinceReset()) - observation, reward, done, _ = environment.step(action) + observation, reward, done, _, _ = environment.step(action) sum_reward += reward if done: break diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env.py index b40d6f0d43..3559c15d54 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env.py @@ -38,7 +38,7 @@ def __init__(self, pd_control_enabled=False, leg_model_enabled=True, on_rack=False, - render=False): + render_mode=False): """Initialize the minitaur and ball gym environment. Args: @@ -50,14 +50,14 @@ def __init__(self, on_rack: Whether to place the minitaur on rack. This is only used to debug the walking gait. In this mode, the minitaur's base is hanged midair so that its walking gait is clearer to visualize. - render: Whether to render the simulation. + render_mode: Whether to render the simulation. """ super(MinitaurBallGymEnv, self).__init__(urdf_root=urdf_root, self_collision_enabled=self_collision_enabled, pd_control_enabled=pd_control_enabled, leg_model_enabled=leg_model_enabled, on_rack=on_rack, - render=render) + render_mode=render_mode) self._cam_dist = 2.0 self._cam_yaw = -70 self._cam_pitch = -30 diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env_example.py index b0d7d4807b..3379221162 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env_example.py @@ -10,19 +10,19 @@ def FollowBallManualPolicy(): """An example of a minitaur following a ball.""" - env = minitaur_ball_gym_env.MinitaurBallGymEnv(render=True, + env = minitaur_ball_gym_env.MinitaurBallGymEnv(render_mode=True, pd_control_enabled=True, on_rack=False) - observation = env.reset() + observation, _ = env.reset() sum_reward = 0 steps = 100000 for _ in range(steps): action = [math.tanh(observation[0] * 4)] - observation, reward, done, _ = env.step(action) + observation, reward, done, _, _ = env.step(action) sum_reward += reward if done: tf.logging.info("Return is {}".format(sum_reward)) - observation = env.reset() + observation, _ = env.reset() sum_reward = 0 diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_extended_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_extended_env.py index eda9b31951..f4dcf9269d 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_extended_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_extended_env.py @@ -162,12 +162,12 @@ def step(self, action): self._past_actions[self._counter] = action self._counter += 1 - next_obs, _, done, info = super(MinitaurExtendedEnv, self).step(action) + next_obs, _, done, _, info = super(MinitaurExtendedEnv, self).step(action) reward = self.reward() info.update(base_reward=reward) - return next_obs, reward, done, info + return next_obs, reward, done, False, info def terminate(self): """The helper function to terminate the environment.""" diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_four_leg_stand_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_four_leg_stand_env.py index 1008f80620..0ec0953f5e 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_four_leg_stand_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_four_leg_stand_env.py @@ -43,7 +43,7 @@ def __init__(self, on_rack=False, motor_kp=1.0, motor_kd=0.02, - render=False, + render_mode=False, env_randomizer=None, use_angular_velocity_in_observation=False, use_motor_angle_in_observation=False, @@ -79,7 +79,7 @@ class remains the same, some code changes (e.g. the constraint location that its walking gait is clearer to visualize. motor_kp: The P gain of the motor. motor_kd: The D gain of the motor. - render: Whether to render the simulation. + render_mode: Whether to render the simulation. env_randomizer: An instance (or a list) of EnvRanzomier(s) that can randomize the environment during when env.reset() is called and add perturbations when env.step() is called. @@ -111,7 +111,7 @@ class remains the same, some code changes (e.g. the constraint location control_latency=control_latency, pd_latency=pd_latency, on_rack=on_rack, - render=render, + render_mode=render_mode, env_randomizer=env_randomizer, reflection=False, log_path=log_path) @@ -146,7 +146,7 @@ def reset(self): self._pybullet_client.resetBasePositionAndOrientation(0, [0, 0, 0], [0, 0, 0, 1]) super(MinitaurFourLegStandEnv, self).reset(initial_motor_angles=initial_motor_angles, reset_duration=0.5) - return self._get_observation() + return self._get_observation(), {} def step(self, action): """Step forward the simulation, given the action. @@ -214,7 +214,7 @@ def step(self, action): self._env_step_counter) if done: self.minitaur.Terminate() - return np.array(self._get_observation()), reward, done, {} + return np.array(self._get_observation()), reward, done, False, {} def _convert_from_leg_model(self, leg_pose): motor_pose = np.zeros(NUM_MOTORS) diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_four_leg_stand_env_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_four_leg_stand_env_example.py index d4d92fd565..26a70b175f 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_four_leg_stand_env_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_four_leg_stand_env_example.py @@ -35,7 +35,7 @@ def feed_forward_only_control_example(log_path=None): control_time_step=0.006, action_repeat=6, env_randomizer=None, - render=True) + render_mode=True) np.random.seed(100) avg_reward = 0 @@ -50,7 +50,7 @@ def feed_forward_only_control_example(log_path=None): action[1] = -upitch - uroll action[2] = upitch + uroll action[3] = -upitch + uroll - observation, reward, done, _ = environment.step(action) + observation, reward, done, _, _ = environment.step(action) sum_reward += reward if done: break diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env.py index 608b8eab0d..9677f60473 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env.py @@ -89,7 +89,7 @@ def __init__(self, motor_overheat_protection=False, hard_reset=True, on_rack=False, - render=False, + render_mode=False, num_steps_to_log=1000, action_repeat=1, control_time_step=None, @@ -146,7 +146,7 @@ class remains the same, some code changes (e.g. the constraint location on_rack: Whether to place the minitaur on rack. This is only used to debug the walking gait. In this mode, the minitaur's base is hanged midair so that its walking gait is clearer to visualize. - render: Whether to render the simulation. + render_mode: Whether to render the simulation. num_steps_to_log: The max number of control steps in one episode that will be logged. If the number of steps is more than num_steps_to_log, the environment will still be running, but only first num_steps_to_log will @@ -191,7 +191,7 @@ class remains the same, some code changes (e.g. the constraint location self._objective_weights = [distance_weight, energy_weight, drift_weight, shake_weight] self._env_step_counter = 0 self._num_steps_to_log = num_steps_to_log - self._is_render = render + self._is_render = render_mode self._last_base_position = [0, 0, 0] self._distance_weight = distance_weight self._energy_weight = energy_weight @@ -303,7 +303,7 @@ def reset(self, initial_motor_angles=None, reset_duration=1.0): self._pybullet_client.resetDebugVisualizerCamera(self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0]) self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_RENDERING, 1) - return self._get_observation() + return self._get_observation(), {} def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) @@ -362,7 +362,7 @@ def step(self, action): self._env_step_counter += 1 if done: self.minitaur.Terminate() - return np.array(self._get_observation()), reward, done, {} + return np.array(self._get_observation()), reward, done, False, {} def render(self, mode="rgb_array", close=False): if mode != "rgb_array": diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env_example.py index 9511b7280a..b31a4b9061 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env_example.py @@ -48,7 +48,7 @@ def ResetPoseExample(log_path=None): steps = 10000 environment = minitaur_gym_env.MinitaurGymEnv( urdf_version=minitaur_gym_env.DERPY_V0_URDF_VERSION, - render=True, + render_mode=True, leg_model_enabled=False, motor_velocity_limit=np.inf, pd_control_enabled=True, @@ -58,7 +58,7 @@ def ResetPoseExample(log_path=None): log_path=log_path) action = [math.pi / 2] * 8 for _ in range(steps): - _, _, done, _ = environment.step(action) + _, _, done, _, _ = environment.step(action) time.sleep(1. / 100.) if done: break @@ -77,7 +77,7 @@ def MotorOverheatExample(log_path=None): environment = minitaur_gym_env.MinitaurGymEnv( urdf_version=minitaur_gym_env.DERPY_V0_URDF_VERSION, - render=True, + render_mode=True, leg_model_enabled=False, motor_velocity_limit=np.inf, motor_overheat_protection=True, @@ -100,7 +100,7 @@ def MotorOverheatExample(log_path=None): current_row = [t] current_row.extend(action) - observation, _, _, _ = environment.step(action) + observation, _, _, _, _ = environment.step(action) current_row.extend(observation.tolist()) actions_and_observations.append(current_row) time.sleep(1. / 100.) @@ -123,7 +123,7 @@ def SineStandExample(log_path=None): """ environment = minitaur_gym_env.MinitaurGymEnv( urdf_version=minitaur_gym_env.RAINBOW_DASH_V0_URDF_VERSION, - render=True, + render_mode=True, leg_model_enabled=False, motor_velocity_limit=np.inf, motor_overheat_protection=True, @@ -147,7 +147,7 @@ def SineStandExample(log_path=None): action = [math.sin(speed * t) * amplitude + math.pi / 2] * 8 current_row.extend(action) - observation, _, _, _ = environment.step(action) + observation, _, _, _, _ = environment.step(action) current_row.extend(observation.tolist()) actions_and_observations.append(current_row) time.sleep(1. / 100.) @@ -165,7 +165,7 @@ def SinePolicyExample(log_path=None): """ environment = minitaur_gym_env.MinitaurGymEnv( urdf_version=minitaur_gym_env.DERPY_V0_URDF_VERSION, - render=True, + render_mode=True, motor_velocity_limit=np.inf, pd_control_enabled=True, hard_reset=False, @@ -197,7 +197,7 @@ def SinePolicyExample(log_path=None): a3 = math.sin(t * speed) * amplitude2 a4 = math.sin(t * speed + math.pi) * amplitude2 action = [a1, a2, a2, a1, a3, a4, a4, a3] - _, reward, done, _ = environment.step(action) + _, reward, done, _, _ = environment.step(action) time.sleep(1. / 100.) sum_reward += reward diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_raibert_controller_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_raibert_controller_example.py index 301f03569d..3a74ed828c 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_raibert_controller_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_raibert_controller_example.py @@ -46,7 +46,7 @@ def main(argv): motor_kd=FLAGS.motor_kd, remove_default_joint_damping=True, leg_model_enabled=False, - render=True, + render_mode=True, on_rack=False, accurate_motor_model_enabled=True, log_path=FLAGS.log_path) diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_randomize_terrain_gym_env_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_randomize_terrain_gym_env_example.py index ad329136a2..0f19d5ef64 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_randomize_terrain_gym_env_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_randomize_terrain_gym_env_example.py @@ -21,12 +21,12 @@ def ResetTerrainExample(): num_reset = 10 steps = 100 env = minitaur_randomize_terrain_gym_env.MinitaurRandomizeTerrainGymEnv( - render=True, leg_model_enabled=False, motor_velocity_limit=np.inf, pd_control_enabled=True) + render_mode=True, leg_model_enabled=False, motor_velocity_limit=np.inf, pd_control_enabled=True) action = [math.pi / 2] * 8 for _ in xrange(num_reset): env.reset() for _ in xrange(steps): - _, _, done, _ = env.step(action) + _, _, done, _, _ = env.step(action) if done: break @@ -34,7 +34,7 @@ def ResetTerrainExample(): def SinePolicyExample(): """An example of minitaur walking with a sine gait.""" env = minitaur_randomize_terrain_gym_env.MinitaurRandomizeTerrainGymEnv( - render=True, motor_velocity_limit=np.inf, pd_control_enabled=True, on_rack=False) + render_mode=True, motor_velocity_limit=np.inf, pd_control_enabled=True, on_rack=False) sum_reward = 0 steps = 200 amplitude_1_bound = 0.5 @@ -61,7 +61,7 @@ def SinePolicyExample(): a3 = math.sin(t * speed) * amplitude2 a4 = math.sin(t * speed + math.pi) * amplitude2 action = [a1, a2, a2, a1, a3, a4, a4, a3] - _, reward, _, _ = env.step(action) + _, reward, _, _, _ = env.step(action) sum_reward += reward diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_reactive_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_reactive_env.py index 7f899cb4a8..6e60f3dac6 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_reactive_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_reactive_env.py @@ -47,7 +47,7 @@ def __init__(self, motor_kp=1.0, motor_kd=0.015, remove_default_joint_damping=True, - render=False, + render_mode=False, num_steps_to_log=1000, accurate_motor_model_enabled=True, use_angle_in_observation=True, @@ -74,7 +74,7 @@ def __init__(self, motor_kp: The P gain of the motor. motor_kd: The D gain of the motor. remove_default_joint_damping: Whether to remove the default joint damping. - render: Whether to render the simulation. + render_mode: Whether to render the simulation. num_steps_to_log: The max number of control steps in one episode. If the number of steps is over num_steps_to_log, the environment will still be running, but only first num_steps_to_log will be recorded in logging. @@ -103,7 +103,7 @@ def __init__(self, control_latency=control_latency, pd_latency=pd_latency, on_rack=on_rack, - render=render, + render_mode=render_mode, hard_reset=hard_reset, num_steps_to_log=num_steps_to_log, env_randomizer=env_randomizer, diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_reactive_env_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_reactive_env_example.py index d4f70a2eef..521818b7de 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_reactive_env_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_reactive_env_example.py @@ -29,7 +29,7 @@ def main(argv): config = utility.load_config(LOG_DIR) policy_layers = config.policy_layers value_layers = config.value_layers - env = config.env(render=True) + env = config.env(render_mode=True) network = config.network with tf.Session() as sess: @@ -41,10 +41,10 @@ def main(argv): checkpoint=os.path.join(LOG_DIR, CHECKPOINT)) sum_reward = 0 - observation = env.reset() + observation, _ = env.reset() while True: action = agent.get_action([observation]) - observation, reward, done, _ = env.step(action[0]) + observation, reward, done, _, _ = env.step(action[0]) time.sleep(0.002) sum_reward += reward if done: diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_stand_gym_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_stand_gym_env.py index 446a3156a6..a0b22cbab2 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_stand_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_stand_gym_env.py @@ -38,7 +38,7 @@ def __init__(self, self_collision_enabled=True, motor_velocity_limit=np.inf, pd_control_enabled=False, - render=False): + render_mode=False): """Initialize the minitaur standing up gym environment. Args: @@ -48,7 +48,7 @@ def __init__(self, self_collision_enabled: Whether to enable self collision in the sim. motor_velocity_limit: The velocity limit of each motor. pd_control_enabled: Whether to use PD controller for each motor. - render: Whether to render the simulation. + render_mode: Whether to render the simulation. """ super(MinitaurStandGymEnv, self).__init__(urdf_root=urdf_root, action_repeat=action_repeat, @@ -58,7 +58,7 @@ def __init__(self, pd_control_enabled=pd_control_enabled, accurate_motor_model_enabled=True, motor_overheat_protection=True, - render=render) + render_mode=render_mode) # Set the action dimension to 1, and reset the action space. action_dim = 1 action_high = np.array([self._action_bound] * action_dim) diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_stand_gym_env_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_stand_gym_env_example.py index 16bae5f71b..9371885330 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_stand_gym_env_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_stand_gym_env_example.py @@ -13,14 +13,14 @@ def StandUpExample(): """An example that the minitaur stands up.""" steps = 1000 - environment = minitaur_stand_gym_env.MinitaurStandGymEnv(render=True, + environment = minitaur_stand_gym_env.MinitaurStandGymEnv(render_mode=True, motor_velocity_limit=np.inf) action = [0.5] - _, _, done, _ = environment.step(action) + _, _, done, _, _ = environment.step(action) for t in range(steps): # A policy that oscillates between -1 and 1 action = [math.sin(t * math.pi * 0.01)] - _, _, done, _ = environment.step(action) + _, _, done, _, _ = environment.step(action) if done: break diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env.py index e79e54926b..17135b9c08 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env.py @@ -36,7 +36,7 @@ def __init__(self, motor_kp=1.0, motor_kd=0.015, remove_default_joint_damping=True, - render=False, + render_mode=False, num_steps_to_log=1000, accurate_motor_model_enabled=True, use_signal_in_observation=False, @@ -68,7 +68,7 @@ def __init__(self, motor_kp: The P gain of the motor. motor_kd: The D gain of the motor. remove_default_joint_damping: Whether to remove the default joint damping. - render: Whether to render the simulation. + render_mode: Whether to render the simulation. num_steps_to_log: The max number of control steps in one episode. If the number of steps is over num_steps_to_log, the environment will still be running, but only first num_steps_to_log will be recorded in logging. @@ -119,7 +119,7 @@ def __init__(self, control_latency=control_latency, pd_latency=pd_latency, on_rack=on_rack, - render=render, + render_mode=render_mode, hard_reset=hard_reset, num_steps_to_log=num_steps_to_log, env_randomizer=env_randomizer, @@ -143,7 +143,7 @@ def reset(self): initial_motor_angles = self._convert_from_leg_model(self._init_pose) super(MinitaurTrottingEnv, self).reset(initial_motor_angles=initial_motor_angles, reset_duration=0.5) - return self._get_observation() + return self._get_observation(), {} def _convert_from_leg_model(self, leg_pose): """Converts leg space action into motor commands. diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env_example.py index 604a902616..565da074ee 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env_example.py @@ -22,7 +22,7 @@ def main(argv): config = utility.load_config(LOG_DIR) policy_layers = config.policy_layers value_layers = config.value_layers - env = config.env(render=True) + env = config.env(render_mode=True) network = config.network with tf.Session() as sess: @@ -34,10 +34,10 @@ def main(argv): checkpoint=os.path.join(LOG_DIR, CHECKPOINT)) sum_reward = 0 - observation = env.reset() + observation, _ = env.reset() while True: action = agent.get_action([observation]) - observation, reward, done, _ = env.step(action[0]) + observation, reward, done, _, _ = env.step(action[0]) time.sleep(0.002) sum_reward += reward if done: diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs/simple_ppo_agent_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs/simple_ppo_agent_example.py index fbca0cca7e..e2b22d039a 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs/simple_ppo_agent_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs/simple_ppo_agent_example.py @@ -39,7 +39,7 @@ def main(argv): config = utility.load_config(FLAGS.logdir) policy_layers = config.policy_layers value_layers = config.value_layers - env = config.env(render=True, log_path=FLAGS.log_path, env_randomizer=None) + env = config.env(render_mode=True, log_path=FLAGS.log_path, env_randomizer=None) network = config.network with tf.Session() as sess: @@ -55,7 +55,7 @@ def main(argv): observation = env.reset() while True: action = agent.get_action([observation]) - observation, reward, done, _ = env.step(action[0]) + observation, reward, done, _, _ = env.step(action[0]) # This sleep is to prevent serial communication error on the real robot. time.sleep(0.002) sum_reward += reward diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/depth_uv_to_footplacement_wrapper.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/depth_uv_to_footplacement_wrapper.py index 4b86b6df6c..0a5683f497 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/depth_uv_to_footplacement_wrapper.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/depth_uv_to_footplacement_wrapper.py @@ -115,7 +115,7 @@ def reset(self, **kwargs): obs["LastAction"] = self.last_action self._observation = obs - return self._observation + return self._observation, {} def _move_kinematic(self): """Move robot kinematically. @@ -148,7 +148,7 @@ def _move_kinematic(self): wheeled_robot_base_sim.BODY_ACTION_NAME: joint_pose, } - obs, _, _, info = self._gym_env.step(action) + obs, _, _, _, info = self._gym_env.step(action) return obs, info def _move_com_dynamic(self, swing_foot_id, num_control_steps): @@ -183,7 +183,7 @@ def _move_com_dynamic(self, swing_foot_id, num_control_steps): joint_pose = np.array( self.robot.motor_angles_from_foot_positions( toe_positions_over_time, position_in_world_frame=False)[1]) - obs, _, _, info = self._gym_env.step(joint_pose) + obs, _, _, _, info = self._gym_env.step(joint_pose) self._update_com_visualization() return obs, info @@ -216,7 +216,7 @@ def _swing_leg_dynamic(self, swing_foot_id, destination_foothold_xyz_local, self.robot.motor_angles_from_foot_positions( toe_positions_over_time, position_in_world_frame=False)[1]) - obs, _, _, info = self._gym_env.step(joint_pose) + obs, _, _, _, info = self._gym_env.step(joint_pose) return obs, info def _move_dynamic(self): @@ -273,7 +273,7 @@ def step(self, action: Sequence[float]): obs["toe_position"] = current_end_effector_pos obs["vision"] = self.task.get_depth_image_for_foot() self._observation = obs - return obs, reward, done, info + return obs, reward, done, False, info def _construct_foot_trajectories(self, alpha, swing_foot_start_position, swing_foot_destination, diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/fixed_steptime_wrapper_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/fixed_steptime_wrapper_env.py index 03207c92d8..2fcc9c6a84 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/fixed_steptime_wrapper_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/fixed_steptime_wrapper_env.py @@ -53,7 +53,7 @@ def reset(self, initial_motor_angles=None, reset_duration=1.0): self._last_reset_time = time.time() self._last_step_time = time.time() self._step_counter = 0 - return obs + return obs, {} def step(self, action): """Steps the wrapped environment. diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/ik_based_wrapper_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/ik_based_wrapper_env.py index da80bf159b..5fe4b50163 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/ik_based_wrapper_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/ik_based_wrapper_env.py @@ -104,6 +104,6 @@ def step(self, action): desired_joint_angles = self._joint_angles_from_toe_positions_and_base_pose( ik_actions=action) - observation, reward, done, _ = self._gym_env.step(desired_joint_angles) + observation, reward, done, _, _ = self._gym_env.step(desired_joint_angles) - return observation, reward, done, _ + return observation, reward, done, False, {} diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/imitation_wrapper_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/imitation_wrapper_env.py index bbe14ea831..ab1f23cfd3 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/imitation_wrapper_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/imitation_wrapper_env.py @@ -42,10 +42,10 @@ def step(self, action): ValueError if input action is None. """ - original_observation, reward, done, _ = self._gym_env.step(action) + original_observation, reward, done, _, _ = self._gym_env.step(action) observation = self._modify_observation(original_observation) - return observation, reward, done, _ + return observation, reward, done, False, {} @gin.configurable('imitation_wrapper_env.ImitationWrapperEnv.reset') def reset(self, initial_motor_angles=None, reset_duration=1.0): @@ -64,7 +64,7 @@ def reset(self, initial_motor_angles=None, reset_duration=1.0): """ original_observation = self._gym_env.reset(initial_motor_angles, reset_duration) observation = self._modify_observation(original_observation) - return observation + return observation, {} def _modify_observation(self, original_observation): """Appends target observations from the reference motion to the observations. diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/mpc_locomotion_wrapper.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/mpc_locomotion_wrapper.py index faebcdfead..6b10209db0 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/mpc_locomotion_wrapper.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/mpc_locomotion_wrapper.py @@ -610,7 +610,7 @@ def step(self, action: Sequence[float]): self._fill_observations(obs) self._observation = obs - return obs, reward, done, info + return obs, reward, done, False, info def _extract_action(self, action, name): return action[self. @@ -775,7 +775,7 @@ def _step_motion_controller(self, action): actions = self._combine_swing_stance_action(swing_action, stance_action) - obs, rew, done, info = self._gym_env.step(actions) + obs, rew, done, _, info = self._gym_env.step(actions) if self._stance_controller.qp_solver_fail: done = True total_reward += rew diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/observation_dictionary_to_array_wrapper.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/observation_dictionary_to_array_wrapper.py index 34e6049ccd..cd7f351778 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/observation_dictionary_to_array_wrapper.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/observation_dictionary_to_array_wrapper.py @@ -43,7 +43,7 @@ def reset(self, initial_motor_angles=None, reset_duration=1.0): observation = self._gym_env.reset( initial_motor_angles=initial_motor_angles, reset_duration=reset_duration) - return self._flatten_observation(observation) + return self._flatten_observation(observation), {} def step(self, action): """Steps the wrapped environment. @@ -55,8 +55,8 @@ def step(self, action): The tuple containing the flattened observation, the reward, the epsiode end indicator. """ - observation_dict, reward, done, _ = self._gym_env.step(action) - return self._flatten_observation(observation_dict), reward, done, _ + observation_dict, reward, done, _, _ = self._gym_env.step(action) + return self._flatten_observation(observation_dict), reward, done, False, {} def render(self, mode='human'): return self._gym_env.render(mode) diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/pmtg_inplace_wrapper_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/pmtg_inplace_wrapper_env.py index 0a274d3996..b205a3a4e5 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/pmtg_inplace_wrapper_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/pmtg_inplace_wrapper_env.py @@ -171,6 +171,6 @@ def step(self, action): new_action = action_tg_motor_space + action_residual if self._num_actions == _LAIKAGO_NUM_ACTIONS: new_action += self._init_pose - original_observation, reward, done, _ = self._gym_env.step(new_action) + original_observation, reward, done, _, _ = self._gym_env.step(new_action) - return self._modify_observation(original_observation), reward, done, _ + return self._modify_observation(original_observation), reward, done, False, {} diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/pmtg_wrapper_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/pmtg_wrapper_env.py index 1df14a2965..39b46315f4 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/pmtg_wrapper_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/pmtg_wrapper_env.py @@ -198,7 +198,7 @@ def reset(self, initial_motor_angles=None, reset_duration=1.0): initial_motor_angles = robot_pose_utils.convert_leg_pose_to_motor_angles( self._gym_env.robot_class, [0, 0, 0] * 4) observation = self._gym_env.reset(initial_motor_angles, reset_duration) - return self._modify_observation(observation) + return self._modify_observation(observation), {} def step(self, action): """Steps the wrapped environment. @@ -246,11 +246,11 @@ def step(self, action): else: action_motor_space = robot_pose_utils.convert_leg_pose_to_motor_angles( self._gym_env.robot_class, action_total) - original_observation, reward, done, _ = self._gym_env.step( + original_observation, reward, done, _, _ = self._gym_env.step( action_motor_space) return self._modify_observation(original_observation), np.float32( - reward), done, _ + reward), done, False, {} def _get_observation_bounds(self): """Get the bounds of the observation added from the trajectory generator. diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/state_machine_based_wrapper_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/state_machine_based_wrapper_env.py index 56a92dcd06..3d1923de7b 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/state_machine_based_wrapper_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/state_machine_based_wrapper_env.py @@ -281,7 +281,7 @@ def step(self, action): while time_since_transition < state_duration / 2.0: ik_actions = self._move_com(action[1:3], time_since_transition, state_duration) - _, reward, done, _ = self._gym_env.step(ik_actions) + _, reward, done, _, _ = self._gym_env.step(ik_actions) sum_reward += reward step_num += 1 time_since_transition = self.robot.GetTimeSinceReset( @@ -293,7 +293,7 @@ def step(self, action): while time_since_transition < state_duration: ik_actions = self._move_leg(action[0], time_since_transition, state_duration) - _, reward, done, _ = self._gym_env.step(ik_actions) + _, reward, done, _, _ = self._gym_env.step(ik_actions) sum_reward += reward step_num += 1 time_since_transition = self.robot.GetTimeSinceReset( @@ -305,7 +305,7 @@ def step(self, action): state_machine_observation = self._state_machine_observation() - return state_machine_observation, sum_reward, done, _ + return state_machine_observation, sum_reward, done, False, {} def reset(self): """Reset the simulation and state machine states.""" @@ -318,4 +318,4 @@ def reset(self): state_machine_observation = self._state_machine_observation() - return state_machine_observation + return state_machine_observation, {} diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/step_based_curriculum_wrapper_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/step_based_curriculum_wrapper_env.py index e6296cd802..9fec140e75 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/step_based_curriculum_wrapper_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/step_based_curriculum_wrapper_env.py @@ -227,5 +227,5 @@ def reset(self, *args, **kwargs): self.set_scene_params() self._gym_env.reset(*args, **kwargs) - return self._get_observation() + return self._get_observation(), {} diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/trajectory_generator_wrapper_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/trajectory_generator_wrapper_env.py index a2c6f0bf65..ebbe0049df 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/trajectory_generator_wrapper_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/trajectory_generator_wrapper_env.py @@ -53,7 +53,7 @@ def reset(self, initial_motor_angles=None, reset_duration=1.0): if getattr(self._trajectory_generator, 'reset'): self._trajectory_generator.reset() observation = self._gym_env.reset(initial_motor_angles, reset_duration) - return self._modify_observation(observation) + return self._modify_observation(observation), {} def step(self, action): """Steps the wrapped environment. @@ -76,6 +76,6 @@ def step(self, action): new_action = self._trajectory_generator.get_action( self._gym_env.robot.GetTimeSinceReset(), action) - original_observation, reward, done, _ = self._gym_env.step(new_action) + original_observation, reward, done, _, _ = self._gym_env.step(new_action) - return self._modify_observation(original_observation), reward, done, _ + return self._modify_observation(original_observation), reward, done, False, {} diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/walking_wrapper.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/walking_wrapper.py index 6235a85363..c883926e25 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/walking_wrapper.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/env_wrappers/walking_wrapper.py @@ -106,7 +106,7 @@ def reset(self, *args, **kwargs) -> Any: # just recreate the controller. self._controller = _setup_controller(self._gym_env.robot) self._controller.reset() - return obs + return obs, {} def __getattr__(self, attr): return getattr(self._gym_env, attr) @@ -128,7 +128,7 @@ def step(self, action) -> Any: for _ in range(self._action_repeat): self._controller.update() hybrid_action = self._controller.get_action() - obs, reward, done, info = self._gym_env.step(hybrid_action) + obs, reward, done, _, info = self._gym_env.step(hybrid_action) if done: break - return obs, reward, done, info + return obs, reward, done, False, info diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/examples/laikago_pmtg_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/examples/laikago_pmtg_example.py index d791dd18a7..785703588c 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/examples/laikago_pmtg_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/examples/laikago_pmtg_example.py @@ -77,7 +77,7 @@ def main(argv): _PMTG_SWING_VS_STANCE ] action = residual + tg_params - _, reward, done, _ = env.step(action) + _, reward, done, _, _ = env.step(action) sum_reward += reward diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/examples/laikago_static_gait_example.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/examples/laikago_static_gait_example.py index 13e2387cc5..5923611815 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/examples/laikago_static_gait_example.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/examples/laikago_static_gait_example.py @@ -38,7 +38,7 @@ def run_example(num_max_steps=_NUM_STEPS): for _ in range(num_max_steps): action = policy.act(observation) - _, _, done, _ = env.step(action) + _, _, done, _, _ = env.step(action) if done: break diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/laikago_ars_run_policy.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/laikago_ars_run_policy.py index 38c33c02a7..67a91219bb 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/laikago_ars_run_policy.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/laikago_ars_run_policy.py @@ -129,7 +129,7 @@ def main(argv): actions.append(action) #time.sleep(1) - obs, r, done, _ = env.step(action) + obs, r, done, _, _ = env.step(action) totalr += r steps += 1 diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/laikago_ars_train.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/laikago_ars_train.py index f04404f3e3..00fb4145d5 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/laikago_ars_train.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/laikago_ars_train.py @@ -118,7 +118,7 @@ def rollout(self, shift = 0., rollout_length = None): ob = self.env.reset() for i in range(rollout_length): action = self.policy.act(ob) - ob, reward, done, _ = self.env.step(action) + ob, reward, done, _, _ = self.env.step(action) steps += 1 total_reward += (reward - shift) if done: diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/locomotion_gym_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/locomotion_gym_env.py index b98d3b0056..ae5fb3d30d 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/locomotion_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/locomotion_gym_env.py @@ -393,7 +393,7 @@ def reset( self._last_reset_time = self._clock() - return self._get_observation() + return self._get_observation(), {} def _wait_for_rendering(self): # Sleep, otherwise the computation takes less time than real time, @@ -498,7 +498,7 @@ def _step_new_robot_class(self, action): self._env_step_counter += 1 # TODO(b/161941829): terminate removed for now, change terminate to other # names. - return self._get_observation(), reward, done, {} + return self._get_observation(), reward, done, False, {} def step(self, action): """Step forward the simulation, given the action. diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/locomotion_gym_env_test.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/locomotion_gym_env_test.py index 1c9ea0e9b9..3a6b72ea5c 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/locomotion_gym_env_test.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/locomotion_gym_env_test.py @@ -91,7 +91,7 @@ def test_step_gym(self): steps = 1000 action_dim = len(env.action_space.high) for _ in range(steps): - observations, reward, done, _ = env.step([desired_motor_angle] * + observations, reward, done, _, _ = env.step([desired_motor_angle] * action_dim) observations = env_utils.flatten_observations(observations) @@ -112,7 +112,7 @@ def test_scene(self): steps = 2 action_dim = len(env.action_space.high) for _ in range(steps): - _, reward, _, _ = env.step([desired_motor_angle] * action_dim) + _, reward, _, _, _ = env.step([desired_motor_angle] * action_dim) self.assertEqual(reward, steps) def test_except_on_invalid_config(self): diff --git a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/multiagent_mobility_gym_env.py b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/multiagent_mobility_gym_env.py index c716d367b4..e999482414 100644 --- a/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/multiagent_mobility_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/multiagent_mobility_gym_env.py @@ -287,7 +287,7 @@ def reset(self, for s in self.all_sensors(): s.on_reset(self) - return self._get_observation() + return self._get_observation(), {} def get_robot(self, name): for robot in self.robots: @@ -396,7 +396,7 @@ def step(self, actions): if done: for robot in self._robots: robot.Terminate() - return self._get_observation(), reward, done, {} + return self._get_observation(), reward, done, False, {} def render(self, mode='rgb_array'): if mode != 'rgb_array': diff --git a/examples/pybullet/gym/pybullet_envs/prediction/pybullet_sim_gym_env.py b/examples/pybullet/gym/pybullet_envs/prediction/pybullet_sim_gym_env.py index 0d94520cc4..10979f644f 100644 --- a/examples/pybullet/gym/pybullet_envs/prediction/pybullet_sim_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/prediction/pybullet_sim_gym_env.py @@ -33,7 +33,7 @@ class PyBulletSimGymEnv(gym.Env): def __init__(self, pybullet_sim_factory=boxstack_pybullet_sim, - render=True, + render_mode=True, render_sleep=False, debug_visualization=True, hard_reset=False, @@ -55,7 +55,7 @@ def __init__(self, self._action_repeat = action_repeat self._num_bullet_solver_iterations = num_bullet_solver_iterations self._env_step_counter = 0 - self._is_render = render + self._is_render = render_mode self._debug_visualization = debug_visualization self._render_sleep = render_sleep self._render_width = render_width @@ -125,7 +125,7 @@ def reset(self): #self._pybullet_client.resetDebugVisualizerCamera( # self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0]) - return self._get_observation() + return self._get_observation(), {} def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) @@ -167,7 +167,7 @@ def step(self, action): self._env_step_counter += 1 reward = self._reward() done = self._termination() - return np.array(self._get_observation()), reward, done, {} + return np.array(self._get_observation()), reward, done, False, {} def render(self, mode="rgb_array", close=False): if mode != "rgb_array": diff --git a/examples/pybullet/gym/pybullet_envs/prediction/test_pybullet_sim_gym_env.py b/examples/pybullet/gym/pybullet_envs/prediction/test_pybullet_sim_gym_env.py index ae24b3a9d5..7ecfbdbf82 100644 --- a/examples/pybullet/gym/pybullet_envs/prediction/test_pybullet_sim_gym_env.py +++ b/examples/pybullet/gym/pybullet_envs/prediction/test_pybullet_sim_gym_env.py @@ -22,7 +22,7 @@ def ResetPoseExample(steps): environment = pybullet_sim_gym_env.PyBulletSimGymEnv(pybullet_sim_factory=boxstack_pybullet_sim, debug_visualization=False, - render=True, + render_mode=True, action_repeat=30) action = [math.pi / 2] * 8 @@ -31,7 +31,7 @@ def ResetPoseExample(steps): for _ in range(steps): print(_) startsim = time.time() - _, _, done, _ = environment.step(action) + _, _, done, _, _ = environment.step(action) stopsim = time.time() startrender = time.time() #environment.render(mode='rgb_array') diff --git a/examples/pybullet/gym/pybullet_envs/stable_baselines/enjoy.py b/examples/pybullet/gym/pybullet_envs/stable_baselines/enjoy.py index d33daabe01..94d7a3a599 100644 --- a/examples/pybullet/gym/pybullet_envs/stable_baselines/enjoy.py +++ b/examples/pybullet/gym/pybullet_envs/stable_baselines/enjoy.py @@ -81,7 +81,7 @@ episode_length = 0 while not done: action, _ = model.predict(obs, deterministic=True) - obs, reward, done, _info = env.step(action) + obs, reward, done, _, _info = env.step(action) episode_reward += reward episode_length += 1