API#

Python API#

BaseIsaacBenchmarkAsync

Benchmark class for async test cases.

BaseIsaacBenchmark

Benchmark class for standalone (synchronous) scripts.


class BaseIsaacBenchmarkAsync(*args: Any, **kwargs: Any)#

Bases: _BaseIsaacBenchmarkCore, AsyncTestCase

Benchmark class for async test cases.

Example:

class MyBenchmark(BaseIsaacBenchmarkAsync):
    async def setUp(self):
        await super().setUp()

    async def test_my_benchmark(self):
        self.set_phase("loading")
        await self.fully_load_stage("path/to/stage.usd")
        await self.store_measurements()

        self.set_phase("benchmark")
        # ... run benchmark ...
        await self.store_measurements()

    async def tearDown(self):
        await super().tearDown()
async fully_load_stage(usd_path: str)#

Open a stage and wait for it to fully load.

Parameters:

usd_path – Path to USD stage.

Example:

await benchmark.fully_load_stage("/path/to/scene.usd")
async setUp(
backend_type: str = 'JSONFileMetrics',
report_generation: bool = False,
workflow_metadata: dict | None = None,
recorders: list[str] | None = None,
)#

Must be awaited by derived benchmarks to properly set up the benchmark.

Parameters:
  • backend_type – Type of backend used to collect and print metrics.

  • report_generation – Whether to generate a formatted report.

  • workflow_metadata – Metadata describing the benchmark workflow.

  • recorders – List of recorder names to use, or None for defaults.

set_phase(
phase: str,
start_recording_frametime: bool = True,
start_recording_runtime: bool = True,
)#

Set the active benchmarking phase and start recorders.

Parameters:
  • phase – Name of the phase, used in output.

  • start_recording_frametime – False to skip frametime recorders.

  • start_recording_runtime – False to skip runtime recorder.

Raises:

RuntimeError – If the benchmark context or recorders are not initialized.

Example:

benchmark.set_phase("loading", start_recording_frametime=False)
async store_custom_measurement(
phase_name: str,
custom_measurement: <module 'isaacsim.benchmark.services.metrics.measurements' from '/home/sabdulajees/Git/omni_isaac_sim/_build/linux-x86_64/release/exts/isaacsim.benchmark.services/isaacsim/benchmark/services/metrics/measurements.py'>,
)#

Store a custom measurement for the current benchmark.

Parameters:
  • phase_name – The phase name to which the measurement belongs.

  • custom_measurement – The measurement object to store.

Example:

await benchmark.store_custom_measurement("warmup", custom_measurement)
async store_measurements()#

Store measurements and metadata collected during the previous phase.

Example:

await benchmark.store_measurements()
async tearDown()#

Tear down the benchmark and finalize metrics.

class BaseIsaacBenchmark(
benchmark_name: str = 'BaseIsaacBenchmark',
backend_type: str = 'OmniPerfKPIFile',
report_generation: bool = True,
workflow_metadata: dict | None = None,
recorders: list[str] | None = None,
)#

Bases: _BaseIsaacBenchmarkCore

Benchmark class for standalone (synchronous) scripts.

Parameters:
  • benchmark_name – Name of benchmark to use in outputs.

  • backend_type – Type of backend used to collect and print metrics.

  • report_generation – Whether to generate a formatted report.

  • workflow_metadata – Metadata describing benchmark.

  • recorders – List of recorder names to use, or None for defaults.

Example:

benchmark = BaseIsaacBenchmark(benchmark_name="MyBenchmark", workflow_metadata={"metadata": []})
benchmark.set_phase("loading")
# load stage, configure sim, etc.
benchmark.store_measurements()
benchmark.set_phase("benchmark")
# run benchmark
benchmark.store_measurements()
benchmark.stop()
fully_load_stage(usd_path: str)#

Load a USD stage and block until it is fully loaded.

Parameters:

usd_path – Path to USD stage.

Example:

benchmark.fully_load_stage("/path/to/scene.usd")
set_phase(
phase: str,
start_recording_frametime: bool = True,
start_recording_runtime: bool = True,
)#

Set the active benchmarking phase and start recorders.

Parameters:
  • phase – Name of the phase, used in output.

  • start_recording_frametime – False to skip frametime recorders.

  • start_recording_runtime – False to skip runtime recorder.

Raises:

RuntimeError – If the benchmark context or recorders are not initialized.

Example:

benchmark.set_phase("loading", start_recording_frametime=False)
stop()#

Stop benchmarking and write accumulated metrics to file.

Example:

benchmark.stop()
store_custom_measurement(
phase_name: str,
custom_measurement: <module 'isaacsim.benchmark.services.metrics.measurements' from '/home/sabdulajees/Git/omni_isaac_sim/_build/linux-x86_64/release/exts/isaacsim.benchmark.services/isaacsim/benchmark/services/metrics/measurements.py'>,
)#

Store a custom measurement for the current benchmark.

Parameters:
  • phase_name – The phase name to which the measurement belongs.

  • custom_measurement – The measurement object to store.

Example:

benchmark.store_custom_measurement("warmup", custom_measurement)
store_measurements()#

Store measurements and metadata collected during the previous phase.

Example:

benchmark.store_measurements()