Frame Transformer#

One of the most common operations that needs to be performed within a physics simulation is the frame transformation: rewriting a vector or quaternion in the basis of an arbitrary euclidean coordinate system. There are many ways to accomplish this within Isaac and USD, but these methods can be cumbersome to implement within Isaac Lab’s GPU based simulation and cloned environments. To mitigate this problem, we have designed the Frame Transformer Sensor, that tracks and calculate the relative frame transformations for rigid bodies of interest to the scene.
The sensory is minimally defined by a source frame and a list of target frames. These definitions take the form of a prim path (for the source) and list of regex capable prim paths the rigid bodies to be tracked (for the targets).
# Pre-defined configs
##
from isaaclab_assets.robots.anymal import ANYMAL_C_CFG # isort: skip
@configclass
class FrameTransformerSensorSceneCfg(InteractiveSceneCfg):
"""Design the scene with sensors on the robot."""
# ground plane
ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg())
# lights
dome_light = AssetBaseCfg(
prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
)
# robot
robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
# Rigid Object
cube = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Cube",
spawn=sim_utils.CuboidCfg(
size=(1, 1, 1),
rigid_props=sim_utils.RigidBodyPropertiesCfg(),
mass_props=sim_utils.MassPropertiesCfg(mass=100.0),
collision_props=sim_utils.CollisionPropertiesCfg(),
physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(5, 0, 0.5)),
)
specific_transforms = FrameTransformerCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
target_frames=[
FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT"),
FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT"),
],
debug_vis=True,
)
cube_transform = FrameTransformerCfg(
prim_path="{ENV_REGEX_NS}/Robot/base",
target_frames=[FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Cube")],
debug_vis=False,
)
We can now run the scene and query the sensor for data
def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene):
.
.
.
# Simulate physics
while simulation_app.is_running():
.
.
.
# print information from the sensors
print("-------------------------------")
print(scene["specific_transforms"])
print("relative transforms:", scene["specific_transforms"].data.target_pos_source)
print("relative orientations:", scene["specific_transforms"].data.target_quat_source)
print("-------------------------------")
print(scene["cube_transform"])
print("relative transform:", scene["cube_transform"].data.target_pos_source)
print("-------------------------------")
print(scene["robot_transforms"])
print("relative transforms:", scene["robot_transforms"].data.target_pos_source)
Let’s take a look at the result for tracking specific objects. First, we can take a look at the data coming from the sensors on the feet
-------------------------------
FrameTransformer @ '/World/envs/env_.*/Robot/base':
tracked body frames: ['base', 'LF_FOOT', 'RF_FOOT']
number of envs: 1
source body frame: base
target frames (count: ['LF_FOOT', 'RF_FOOT']): 2
relative transforms: tensor([[[ 0.4658, 0.3085, -0.4840],
[ 0.4487, -0.2959, -0.4828]]], device='cuda:0')
relative orientations: tensor([[[ 0.9623, 0.0072, -0.2717, -0.0020],
[ 0.9639, 0.0052, -0.2663, -0.0014]]], device='cuda:0')

By activating the visualizer, we can see that the frames of the feet are rotated “upward” slightly. We can also see the explicit relative positions and rotations by querying the sensor for data, which returns these values as a list with the same order as the tracked frames. This becomes even more apparent if we examine the transforms specified by regex.
-------------------------------
FrameTransformer @ '/World/envs/env_.*/Robot/base':
tracked body frames: ['base', 'LF_FOOT', 'LF_HIP', 'LF_SHANK', 'LF_THIGH', 'LH_FOOT', 'LH_HIP', 'LH_SHANK', 'LH_THIGH', 'RF_FOOT', 'RF_HIP', 'RF_SHANK', 'RF_THIGH', 'RH_FOOT', 'RH_HIP', 'RH_SHANK', 'RH_THIGH', 'base']
number of envs: 1
source body frame: base
target frames (count: ['LF_FOOT', 'LF_HIP', 'LF_SHANK', 'LF_THIGH', 'LH_FOOT', 'LH_HIP', 'LH_SHANK', 'LH_THIGH', 'RF_FOOT', 'RF_HIP', 'RF_SHANK', 'RF_THIGH', 'RH_FOOT', 'RH_HIP', 'RH_SHANK', 'RH_THIGH', 'base']): 17
relative transforms: tensor([[[ 4.6581e-01, 3.0846e-01, -4.8398e-01],
[ 2.9990e-01, 1.0400e-01, -1.7062e-09],
[ 2.1409e-01, 2.9177e-01, -2.4214e-01],
[ 3.5980e-01, 1.8780e-01, 1.2608e-03],
[-4.8813e-01, 3.0973e-01, -4.5927e-01],
[-2.9990e-01, 1.0400e-01, 2.7044e-09],
[-2.1495e-01, 2.9264e-01, -2.4198e-01],
[-3.5980e-01, 1.8780e-01, 1.5582e-03],
[ 4.4871e-01, -2.9593e-01, -4.8277e-01],
[ 2.9990e-01, -1.0400e-01, -2.7057e-09],
[ 1.9971e-01, -2.8554e-01, -2.3778e-01],
[ 3.5980e-01, -1.8781e-01, -9.1049e-04],
[-5.0090e-01, -2.9095e-01, -4.5746e-01],
[-2.9990e-01, -1.0400e-01, 6.3592e-09],
[-2.1860e-01, -2.8251e-01, -2.5163e-01],
[-3.5980e-01, -1.8779e-01, -1.8792e-03],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00]]], device='cuda:0')
Here, the sensor is tracking all rigid body children of Robot/base
, but this expression is inclusive, meaning that the source body itself is also a target. This can be seen both by examining the source and target list, where base
appears twice, and also in the returned data, where the sensor returns the relative transform to itself, (0, 0, 0).
Code for frame_transformer_sensor.py
1# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
2# All rights reserved.
3#
4# SPDX-License-Identifier: BSD-3-Clause
5
6# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
7# All rights reserved.
8#
9# SPDX-License-Identifier: BSD-3-Clause
10
11import argparse
12
13from isaaclab.app import AppLauncher
14
15# add argparse arguments
16parser = argparse.ArgumentParser(description="Example on using the frame transformer sensor.")
17parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.")
18# append AppLauncher cli args
19AppLauncher.add_app_launcher_args(parser)
20# parse the arguments
21args_cli = parser.parse_args()
22
23# launch omniverse app
24app_launcher = AppLauncher(args_cli)
25simulation_app = app_launcher.app
26
27"""Rest everything follows."""
28
29import torch
30
31import isaaclab.sim as sim_utils
32from isaaclab.assets import AssetBaseCfg, RigidObjectCfg
33from isaaclab.scene import InteractiveScene, InteractiveSceneCfg
34from isaaclab.sensors import FrameTransformerCfg
35from isaaclab.utils import configclass
36
37##
38# Pre-defined configs
39##
40from isaaclab_assets.robots.anymal import ANYMAL_C_CFG # isort: skip
41
42
43@configclass
44class FrameTransformerSensorSceneCfg(InteractiveSceneCfg):
45 """Design the scene with sensors on the robot."""
46
47 # ground plane
48 ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg())
49
50 # lights
51 dome_light = AssetBaseCfg(
52 prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
53 )
54
55 # robot
56 robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
57
58 # Rigid Object
59 cube = RigidObjectCfg(
60 prim_path="{ENV_REGEX_NS}/Cube",
61 spawn=sim_utils.CuboidCfg(
62 size=(1, 1, 1),
63 rigid_props=sim_utils.RigidBodyPropertiesCfg(),
64 mass_props=sim_utils.MassPropertiesCfg(mass=100.0),
65 collision_props=sim_utils.CollisionPropertiesCfg(),
66 physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0),
67 visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2),
68 ),
69 init_state=RigidObjectCfg.InitialStateCfg(pos=(5, 0, 0.5)),
70 )
71
72 specific_transforms = FrameTransformerCfg(
73 prim_path="{ENV_REGEX_NS}/Robot/base",
74 target_frames=[
75 FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT"),
76 FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT"),
77 ],
78 debug_vis=True,
79 )
80
81 cube_transform = FrameTransformerCfg(
82 prim_path="{ENV_REGEX_NS}/Robot/base",
83 target_frames=[FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Cube")],
84 debug_vis=False,
85 )
86
87 robot_transforms = FrameTransformerCfg(
88 prim_path="{ENV_REGEX_NS}/Robot/base",
89 target_frames=[FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/.*")],
90 debug_vis=False,
91 )
92
93
94def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene):
95 """Run the simulator."""
96 # Define simulation stepping
97 sim_dt = sim.get_physics_dt()
98 sim_time = 0.0
99 count = 0
100
101 # Simulate physics
102 while simulation_app.is_running():
103
104 if count % 500 == 0:
105 # reset counter
106 count = 0
107 # reset the scene entities
108 # root state
109 # we offset the root state by the origin since the states are written in simulation world frame
110 # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world
111 root_state = scene["robot"].data.default_root_state.clone()
112 root_state[:, :3] += scene.env_origins
113 scene["robot"].write_root_pose_to_sim(root_state[:, :7])
114 scene["robot"].write_root_velocity_to_sim(root_state[:, 7:])
115 # set joint positions with some noise
116 joint_pos, joint_vel = (
117 scene["robot"].data.default_joint_pos.clone(),
118 scene["robot"].data.default_joint_vel.clone(),
119 )
120 joint_pos += torch.rand_like(joint_pos) * 0.1
121 scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel)
122 # clear internal buffers
123 scene.reset()
124 print("[INFO]: Resetting robot state...")
125 # Apply default actions to the robot
126 # -- generate actions/commands
127 targets = scene["robot"].data.default_joint_pos
128 # -- apply action to the robot
129 scene["robot"].set_joint_position_target(targets)
130 # -- write data to sim
131 scene.write_data_to_sim()
132 # perform step
133 sim.step()
134 # update sim-time
135 sim_time += sim_dt
136 count += 1
137 # update buffers
138 scene.update(sim_dt)
139
140 # print information from the sensors
141 print("-------------------------------")
142 print(scene["specific_transforms"])
143 print("relative transforms:", scene["specific_transforms"].data.target_pos_source)
144 print("relative orientations:", scene["specific_transforms"].data.target_quat_source)
145 print("-------------------------------")
146 print(scene["cube_transform"])
147 print("relative transform:", scene["cube_transform"].data.target_pos_source)
148 print("-------------------------------")
149 print(scene["robot_transforms"])
150 print("relative transforms:", scene["robot_transforms"].data.target_pos_source)
151
152
153def main():
154 """Main function."""
155
156 # Initialize the simulation context
157 sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device)
158 sim = sim_utils.SimulationContext(sim_cfg)
159 # Set main camera
160 sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0])
161 # design scene
162 scene_cfg = FrameTransformerSensorSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0)
163 scene = InteractiveScene(scene_cfg)
164 # Play the simulator
165 sim.reset()
166 # Now we are ready!
167 print("[INFO]: Setup complete...")
168 # Run the simulator
169 run_simulator(sim, scene)
170
171
172if __name__ == "__main__":
173 # run the main function
174 main()
175 # close sim app
176 simulation_app.close()