Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions scripts/environments/random_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,12 @@
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -52,8 +57,12 @@ def main():
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
)

# enable visualizers if requested
if args_cli.visualize:
import isaaclab.sim as sim_utils
sim_utils.enable_visualizers(env_cfg)
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)

Expand Down
13 changes: 11 additions & 2 deletions scripts/environments/zero_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,12 @@
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -52,8 +57,12 @@ def main():
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
)

# enable visualizers if requested
if args_cli.visualize:
import isaaclab.sim as sim_utils
sim_utils.enable_visualizers(env_cfg)
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)

Expand Down
15 changes: 7 additions & 8 deletions scripts/reinforcement_learning/rl_games/play.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,12 @@
help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.",
)
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -79,13 +84,7 @@ def main():
"""Play with RL-Games agent."""
task_name = args_cli.task.split(":")[-1]
# parse env configuration
env_cfg = parse_env_cfg(
args_cli.task,
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
)
env_cfg = parse_env_cfg(args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric, visualize=args_cli.visualize, train_mode=False)
agent_cfg = load_cfg_from_registry(args_cli.task, "rl_games_cfg_entry_point")

# specify directory for logging experiments
Expand Down
13 changes: 11 additions & 2 deletions scripts/reinforcement_learning/rl_games/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,12 @@
const=True,
help="if toggled, this experiment will be tracked with Weights and Biases",
)
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -90,7 +95,11 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
# override configurations with non-hydra CLI arguments
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# enable visualizers if requested
if args_cli.visualize:
import isaaclab.sim as sim_utils
sim_utils.enable_visualizers(env_cfg)

# randomly sample a seed if seed = -1
if args_cli.seed == -1:
Expand Down
26 changes: 11 additions & 15 deletions scripts/reinforcement_learning/rsl_rl/play.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,12 @@
help="Use the pre-trained checkpoint from Nucleus.",
)
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
Expand Down Expand Up @@ -96,7 +101,11 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlBaseRun
# note: certain randomizations occur in the environment initialization so we set the seed here
env_cfg.seed = agent_cfg.seed
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# enable visualizers if requested
if args_cli.visualize:
import isaaclab.sim as sim_utils
sim_utils.enable_visualizers(env_cfg, train_mode=False)

# specify directory for logging experiments
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
Expand All @@ -117,19 +126,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlBaseRun
# set the log directory for the environment (works for all environment types)
env_cfg.log_dir = log_dir

# Set play mode for Newton viewer if using Newton visualizer
if args_cli.newton_visualizer:
# Set visualizer to play mode in Newton config
if hasattr(env_cfg.sim, "newton_cfg"):
env_cfg.sim.newton_cfg.visualizer_train_mode = False
else:
# Create newton_cfg if it doesn't exist
from isaaclab.sim._impl.newton_manager_cfg import NewtonCfg

newton_cfg = NewtonCfg()
newton_cfg.visualizer_train_mode = False
env_cfg.sim.newton_cfg = newton_cfg

# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)

Expand Down
13 changes: 11 additions & 2 deletions scripts/reinforcement_learning/rsl_rl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,12 @@
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
)
parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
Expand Down Expand Up @@ -119,7 +124,11 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlBaseRun
# note: certain randomizations occur in the environment initialization so we set the seed here
env_cfg.seed = agent_cfg.seed
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# enable visualizers if requested
if args_cli.visualize:
import isaaclab.sim as sim_utils
sim_utils.enable_visualizers(env_cfg)

# multi-gpu training configuration
if args_cli.distributed:
Expand Down
15 changes: 7 additions & 8 deletions scripts/reinforcement_learning/sb3/play.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,12 @@
default=False,
help="Use a slower SB3 wrapper but keep all the extra training info.",
)
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -82,13 +87,7 @@
def main():
"""Play with stable-baselines agent."""
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task,
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
)
env_cfg = parse_env_cfg(args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric, visualize=args_cli.visualize, train_mode=False)

task_name = args_cli.task.split(":")[-1]
train_task_name = task_name.replace("-Play", "")
Expand Down
13 changes: 11 additions & 2 deletions scripts/reinforcement_learning/sb3/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,12 @@
default=False,
help="Use a slower SB3 wrapper but keep all the extra training info.",
)
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -113,7 +118,11 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
# note: certain randomizations occur in the environment initialization so we set the seed here
env_cfg.seed = agent_cfg["seed"]
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# enable visualizers if requested
if args_cli.visualize:
import isaaclab.sim as sim_utils
sim_utils.enable_visualizers(env_cfg)

# directory for logging into
run_info = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
Expand Down
15 changes: 7 additions & 8 deletions scripts/reinforcement_learning/skrl/play.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,12 @@
help="The RL algorithm used for training the skrl agent.",
)
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)

# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
Expand Down Expand Up @@ -111,13 +116,7 @@ def main():
task_name = args_cli.task.split(":")[-1]

# parse configuration
env_cfg = parse_env_cfg(
args_cli.task,
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
)
env_cfg = parse_env_cfg(args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric, visualize=args_cli.visualize, train_mode=False)
try:
experiment_cfg = load_cfg_from_registry(task_name, f"skrl_{algorithm}_cfg_entry_point")
except ValueError:
Expand Down
13 changes: 11 additions & 2 deletions scripts/reinforcement_learning/skrl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,12 @@
choices=["AMP", "PPO", "IPPO", "MAPPO"],
help="The RL algorithm used for training the skrl agent.",
)
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)

# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
Expand Down Expand Up @@ -113,7 +118,11 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
# override configurations with non-hydra CLI arguments
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# enable visualizers if requested
if args_cli.visualize:
import isaaclab.sim as sim_utils
sim_utils.enable_visualizers(env_cfg)

# multi-gpu training config
if args_cli.distributed:
Expand Down
13 changes: 11 additions & 2 deletions scripts/sim2sim_transfer/rsl_rl_transfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,12 @@
help="Use the pre-trained checkpoint from Nucleus.",
)
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="Launch visualizer(s). Uses visualizers defined in environment config, or defaults to Newton OpenGL if none configured.",
)
# Joint ordering arguments
parser.add_argument(
"--policy_transfer_file",
Expand Down Expand Up @@ -147,8 +152,12 @@ def main():
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
)

# enable visualizers if requested
if args_cli.visualize:
import isaaclab.sim as sim_utils
sim_utils.enable_visualizers(env_cfg)
agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(task_name, args_cli)

# specify directory for logging experiments
Expand Down
3 changes: 2 additions & 1 deletion source/isaaclab/isaaclab/sim/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,9 @@
"""

from .converters import * # noqa: F401, F403
from .scene_data_providers import NewtonSceneDataProvider, SceneDataProvider # noqa: F401, F403
from .schemas import * # noqa: F401, F403
from .simulation_cfg import RenderCfg, SimulationCfg # noqa: F401, F403
from .simulation_context import SimulationContext, build_simulation_context # noqa: F401, F403
from .simulation_context import SimulationContext, build_simulation_context, enable_visualizers # noqa: F401, F403
from .spawners import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
Loading
Loading