保存渲染图像和3D重投影#

这个指南与 run_usd_camera.py 脚本一起使用,位于 IsaacLab/source/standalone/tutorials/04_sensors 目录中。

run_usd_camera.py的代码
  1# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
  2# All rights reserved.
  3#
  4# SPDX-License-Identifier: BSD-3-Clause
  5
  6"""
  7This script shows how to use the camera sensor from the Isaac Lab framework.
  8
  9The camera sensor is created and interfaced through the Omniverse Replicator API. However, instead of using
 10the simulator or OpenGL convention for the camera, we use the robotics or ROS convention.
 11
 12.. code-block:: bash
 13
 14    # Usage with GUI
 15    ./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py
 16
 17    # Usage with headless
 18    ./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --headless --enable_cameras
 19
 20"""
 21
 22"""Launch Isaac Sim Simulator first."""
 23
 24import argparse
 25
 26from omni.isaac.lab.app import AppLauncher
 27
 28# add argparse arguments
 29parser = argparse.ArgumentParser(description="This script demonstrates how to use the camera sensor.")
 30parser.add_argument(
 31    "--draw",
 32    action="store_true",
 33    default=False,
 34    help="Draw the pointcloud from camera at index specified by ``--camera_id``.",
 35)
 36parser.add_argument(
 37    "--save",
 38    action="store_true",
 39    default=False,
 40    help="Save the data from camera at index specified by ``--camera_id``.",
 41)
 42parser.add_argument(
 43    "--camera_id",
 44    type=int,
 45    choices={0, 1},
 46    default=0,
 47    help=(
 48        "The camera ID to use for displaying points or saving the camera data. Default is 0."
 49        " The viewport will always initialize with the perspective of camera 0."
 50    ),
 51)
 52# append AppLauncher cli args
 53AppLauncher.add_app_launcher_args(parser)
 54# parse the arguments
 55args_cli = parser.parse_args()
 56args_cli.enable_cameras = True
 57# launch omniverse app
 58app_launcher = AppLauncher(args_cli)
 59simulation_app = app_launcher.app
 60
 61"""Rest everything follows."""
 62
 63import numpy as np
 64import os
 65import random
 66import torch
 67
 68import omni.isaac.core.utils.prims as prim_utils
 69import omni.replicator.core as rep
 70
 71import omni.isaac.lab.sim as sim_utils
 72from omni.isaac.lab.assets import RigidObject, RigidObjectCfg
 73from omni.isaac.lab.markers import VisualizationMarkers
 74from omni.isaac.lab.markers.config import RAY_CASTER_MARKER_CFG
 75from omni.isaac.lab.sensors.camera import Camera, CameraCfg
 76from omni.isaac.lab.sensors.camera.utils import create_pointcloud_from_depth
 77from omni.isaac.lab.utils import convert_dict_to_backend
 78
 79
 80def define_sensor() -> Camera:
 81    """Defines the camera sensor to add to the scene."""
 82    # Setup camera sensor
 83    # In contrast to the ray-cast camera, we spawn the prim at these locations.
 84    # This means the camera sensor will be attached to these prims.
 85    prim_utils.create_prim("/World/Origin_00", "Xform")
 86    prim_utils.create_prim("/World/Origin_01", "Xform")
 87    camera_cfg = CameraCfg(
 88        prim_path="/World/Origin_.*/CameraSensor",
 89        update_period=0,
 90        height=480,
 91        width=640,
 92        data_types=[
 93            "rgb",
 94            "distance_to_image_plane",
 95            "normals",
 96            "semantic_segmentation",
 97            "instance_segmentation_fast",
 98            "instance_id_segmentation_fast",
 99        ],
100        colorize_semantic_segmentation=True,
101        colorize_instance_id_segmentation=True,
102        colorize_instance_segmentation=True,
103        spawn=sim_utils.PinholeCameraCfg(
104            focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5)
105        ),
106    )
107    # Create camera
108    camera = Camera(cfg=camera_cfg)
109
110    return camera
111
112
113def design_scene() -> dict:
114    """Design the scene."""
115    # Populate scene
116    # -- Ground-plane
117    cfg = sim_utils.GroundPlaneCfg()
118    cfg.func("/World/defaultGroundPlane", cfg)
119    # -- Lights
120    cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
121    cfg.func("/World/Light", cfg)
122
123    # Create a dictionary for the scene entities
124    scene_entities = {}
125
126    # Xform to hold objects
127    prim_utils.create_prim("/World/Objects", "Xform")
128    # Random objects
129    for i in range(8):
130        # sample random position
131        position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0])
132        position *= np.asarray([1.5, 1.5, 0.5])
133        # sample random color
134        color = (random.random(), random.random(), random.random())
135        # choose random prim type
136        prim_type = random.choice(["Cube", "Cone", "Cylinder"])
137        common_properties = {
138            "rigid_props": sim_utils.RigidBodyPropertiesCfg(),
139            "mass_props": sim_utils.MassPropertiesCfg(mass=5.0),
140            "collision_props": sim_utils.CollisionPropertiesCfg(),
141            "visual_material": sim_utils.PreviewSurfaceCfg(diffuse_color=color, metallic=0.5),
142            "semantic_tags": [("class", prim_type)],
143        }
144        if prim_type == "Cube":
145            shape_cfg = sim_utils.CuboidCfg(size=(0.25, 0.25, 0.25), **common_properties)
146        elif prim_type == "Cone":
147            shape_cfg = sim_utils.ConeCfg(radius=0.1, height=0.25, **common_properties)
148        elif prim_type == "Cylinder":
149            shape_cfg = sim_utils.CylinderCfg(radius=0.25, height=0.25, **common_properties)
150        # Rigid Object
151        obj_cfg = RigidObjectCfg(
152            prim_path=f"/World/Objects/Obj_{i:02d}",
153            spawn=shape_cfg,
154            init_state=RigidObjectCfg.InitialStateCfg(pos=position),
155        )
156        scene_entities[f"rigid_object{i}"] = RigidObject(cfg=obj_cfg)
157
158    # Sensors
159    camera = define_sensor()
160
161    # return the scene information
162    scene_entities["camera"] = camera
163    return scene_entities
164
165
166def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict):
167    """Run the simulator."""
168    # extract entities for simplified notation
169    camera: Camera = scene_entities["camera"]
170
171    # Create replicator writer
172    output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera")
173    rep_writer = rep.BasicWriter(
174        output_dir=output_dir,
175        frame_padding=0,
176        colorize_instance_id_segmentation=camera.cfg.colorize_instance_id_segmentation,
177        colorize_instance_segmentation=camera.cfg.colorize_instance_segmentation,
178        colorize_semantic_segmentation=camera.cfg.colorize_semantic_segmentation,
179    )
180
181    # Camera positions, targets, orientations
182    camera_positions = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device)
183    camera_targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device)
184    # These orientations are in ROS-convention, and will position the cameras to view the origin
185    camera_orientations = torch.tensor(  # noqa: F841
186        [[-0.1759, 0.3399, 0.8205, -0.4247], [-0.4247, 0.8205, -0.3399, 0.1759]], device=sim.device
187    )
188
189    # Set pose: There are two ways to set the pose of the camera.
190    # -- Option-1: Set pose using view
191    camera.set_world_poses_from_view(camera_positions, camera_targets)
192    # -- Option-2: Set pose using ROS
193    # camera.set_world_poses(camera_positions, camera_orientations, convention="ros")
194
195    # Index of the camera to use for visualization and saving
196    camera_index = args_cli.camera_id
197
198    # Create the markers for the --draw option outside of is_running() loop
199    if sim.has_gui() and args_cli.draw:
200        cfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/CameraPointCloud")
201        cfg.markers["hit"].radius = 0.002
202        pc_markers = VisualizationMarkers(cfg)
203
204    # Simulate physics
205    while simulation_app.is_running():
206        # Step simulation
207        sim.step()
208        # Update camera data
209        camera.update(dt=sim.get_physics_dt())
210
211        # Print camera info
212        print(camera)
213        if "rgb" in camera.data.output.keys():
214            print("Received shape of rgb image        : ", camera.data.output["rgb"].shape)
215        if "distance_to_image_plane" in camera.data.output.keys():
216            print("Received shape of depth image      : ", camera.data.output["distance_to_image_plane"].shape)
217        if "normals" in camera.data.output.keys():
218            print("Received shape of normals          : ", camera.data.output["normals"].shape)
219        if "semantic_segmentation" in camera.data.output.keys():
220            print("Received shape of semantic segm.   : ", camera.data.output["semantic_segmentation"].shape)
221        if "instance_segmentation_fast" in camera.data.output.keys():
222            print("Received shape of instance segm.   : ", camera.data.output["instance_segmentation_fast"].shape)
223        if "instance_id_segmentation_fast" in camera.data.output.keys():
224            print("Received shape of instance id segm.: ", camera.data.output["instance_id_segmentation_fast"].shape)
225        print("-------------------------------")
226
227        # Extract camera data
228        if args_cli.save:
229            # Save images from camera at camera_index
230            # note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy.
231            # tensordict allows easy indexing of tensors in the dictionary
232            single_cam_data = convert_dict_to_backend(camera.data.output[camera_index], backend="numpy")
233
234            # Extract the other information
235            single_cam_info = camera.data.info[camera_index]
236
237            # Pack data back into replicator format to save them using its writer
238            rep_output = {"annotators": {}}
239            for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()):
240                if info is not None:
241                    rep_output["annotators"][key] = {"render_product": {"data": data, **info}}
242                else:
243                    rep_output["annotators"][key] = {"render_product": {"data": data}}
244            # Save images
245            # Note: We need to provide On-time data for Replicator to save the images.
246            rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]}
247            rep_writer.write(rep_output)
248
249        # Draw pointcloud if there is a GUI and --draw has been passed
250        if sim.has_gui() and args_cli.draw and "distance_to_image_plane" in camera.data.output.keys():
251            # Derive pointcloud from camera at camera_index
252            pointcloud = create_pointcloud_from_depth(
253                intrinsic_matrix=camera.data.intrinsic_matrices[camera_index],
254                depth=camera.data.output[camera_index]["distance_to_image_plane"],
255                position=camera.data.pos_w[camera_index],
256                orientation=camera.data.quat_w_ros[camera_index],
257                device=sim.device,
258            )
259
260            # In the first few steps, things are still being instanced and Camera.data
261            # can be empty. If we attempt to visualize an empty pointcloud it will crash
262            # the sim, so we check that the pointcloud is not empty.
263            if pointcloud.size()[0] > 0:
264                pc_markers.visualize(translations=pointcloud)
265
266
267def main():
268    """Main function."""
269    # Load simulation context
270    sim_cfg = sim_utils.SimulationCfg(device=args_cli.device)
271    sim = sim_utils.SimulationContext(sim_cfg)
272    # Set main camera
273    sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
274    # design the scene
275    scene_entities = design_scene()
276    # Play simulator
277    sim.reset()
278    # Now we are ready!
279    print("[INFO]: Setup complete...")
280    # Run simulator
281    run_simulator(sim, scene_entities)
282
283
284if __name__ == "__main__":
285    # run the main function
286    main()
287    # close sim app
288    simulation_app.close()

使用复制器基本写入器进行保存#

为了保存摄像机输出,我们使用Omniverse Replicator中的基本写入类。该类允许我们以numpy格式保存图像。有关基本写入器的更多信息,请参阅 文档

    rep_writer = rep.BasicWriter(
        output_dir=output_dir,
        frame_padding=0,
        colorize_instance_id_segmentation=camera.cfg.colorize_instance_id_segmentation,
        colorize_instance_segmentation=camera.cfg.colorize_instance_segmentation,
        colorize_semantic_segmentation=camera.cfg.colorize_semantic_segmentation,
    )

在模拟器运行时,图像可以保存到已定义的文件夹中。由于BasicWriter仅支持使用NumPy格式保存数据,因此我们首先需要将PyTorch传感器转换为NumPy数组,然后将其打包到字典中。

            # Save images from camera at camera_index
            # note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy.
            # tensordict allows easy indexing of tensors in the dictionary
            single_cam_data = convert_dict_to_backend(camera.data.output[camera_index], backend="numpy")

            # Extract the other information
            single_cam_info = camera.data.info[camera_index]

完成此步骤后,我们可以使用BasicWriter保存图像。

            # Pack data back into replicator format to save them using its writer
            rep_output = {"annotators": {}}
            for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()):
                if info is not None:
                    rep_output["annotators"][key] = {"render_product": {"data": data, **info}}
                else:
                    rep_output["annotators"][key] = {"render_product": {"data": data}}
            # Save images
            # Note: We need to provide On-time data for Replicator to save the images.
            rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]}
            rep_writer.write(rep_output)

投影到3D空间#

我们包含实用程序来将深度图像投影到3D空间。重新投影操作使用PyTorch操作来进行,这可以加快计算速度。

from omni.isaac.lab.utils.math import transform_points, unproject_depth

# Pointcloud in world frame
points_3d_cam = unproject_depth(
   camera.data.output["distance_to_image_plane"], camera.data.intrinsic_matrices
)

points_3d_world = transform_points(points_3d_cam, camera.data.pos_w, camera.data.quat_w_ros)

或者,我们可以使用 omni.isaac.lab.sensors.camera.utils.create_pointcloud_from_depth() 函数从深度图像创建点云,并将其转换为世界框架。

            # Derive pointcloud from camera at camera_index
            pointcloud = create_pointcloud_from_depth(
                intrinsic_matrix=camera.data.intrinsic_matrices[camera_index],
                depth=camera.data.output[camera_index]["distance_to_image_plane"],
                position=camera.data.pos_w[camera_index],
                orientation=camera.data.quat_w_ros[camera_index],
                device=sim.device,
            )

使用Isaac Sim中的 omni.isaac.debug_draw 扩展,可以将结果点云可视化。这样可以轻松在3D空间中可视化点云。

            # In the first few steps, things are still being instanced and Camera.data
            # can be empty. If we attempt to visualize an empty pointcloud it will crash
            # the sim, so we check that the pointcloud is not empty.
            if pointcloud.size()[0] > 0:
                pc_markers.visualize(translations=pointcloud)

执行脚本#

要运行附带的脚本,请执行以下命令:

# Usage with saving and drawing
./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --save --draw

# Usage with saving only in headless mode
./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --save --headless --enable_cameras

模拟应该开始运行,你可以观察到不同的物体掉落。在 IsaacLab/source/standalone/tutorials/04_sensors 目录中会创建一个输出文件夹,其中将保存图像。此外,你应该看到绘制在视口上的3D空间中的点云。

要停止模拟,关闭窗口,或在终端中使用 Ctrl+C