From 0ccd58d1a7465e33615d4936ecf1bd6893ce96f9 Mon Sep 17 00:00:00 2001 From: yuecideng Date: Mon, 5 Jan 2026 10:36:44 +0800 Subject: [PATCH 1/9] wip --- docs/source/overview/sim/index.rst | 5 +- docs/source/overview/sim/sim_manager.md | 198 ++++++++++++++++++++++++ embodichain/lab/sim/sim_manager.py | 2 +- 3 files changed, 203 insertions(+), 2 deletions(-) create mode 100644 docs/source/overview/sim/sim_manager.md diff --git a/docs/source/overview/sim/index.rst b/docs/source/overview/sim/index.rst index fd6e56fd..2ee32098 100644 --- a/docs/source/overview/sim/index.rst +++ b/docs/source/overview/sim/index.rst @@ -14,10 +14,13 @@ Overview of the Simulation Framework: - Kinematics Solver - Motion Generation +Table of Contents +================= .. toctree:: :maxdepth: 1 :glob: - + + sim_manager.md solvers/index planners/index diff --git a/docs/source/overview/sim/sim_manager.md b/docs/source/overview/sim/sim_manager.md new file mode 100644 index 00000000..3e5dc7f1 --- /dev/null +++ b/docs/source/overview/sim/sim_manager.md @@ -0,0 +1,198 @@ +# Simulation Manager + +The `SimulationManager` is the central class in EmbodiChain's simulation framework for managing the simulation lifecycle. It handles: +- **Asset Management**: Loading and managing robots, rigid objects, soft objects, articulations, sensors, and lights. +- **Simulation Loop**: Controlling the physics stepping and rendering updates. +- **Rendering**: Managing the simulation window, camera rendering, material settings and ray-tracing configuration. +- **Interaction**: Providing gizmo controls for interactive manipulation of objects. + +## Configuration + +The simulation is configured using the `SimulationManagerCfg` class. + +```python +from embodichain.lab.sim import SimulationManagerCfg + +sim_config = SimulationManagerCfg( + width=1920, # Window width + height=1080, # Window height + num_envs=10, # Number of parallel environments + physics_dt=0.01, # Physics time step + sim_device="cpu", # Simulation device ("cpu" or "cuda:0", etc.) + arena_space=5.0 # Spacing between environments +) +``` + +### Configuration Parameters + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `width` | `int` | `1920` | The width of the simulation window. | +| `height` | `int` | `1080` | The height of the simulation window. | +| `headless` | `bool` | `False` | Whether to run the simulation in headless mode (no Window). | +| `enable_rt` | `bool` | `False` | Whether to enable ray tracing rendering. | +| `enable_denoiser` | `bool` | `True` | Whether to enable denoising for ray tracing rendering. | +| `spp` | `int` | `64` | Samples per pixel for ray tracing rendering. Only valid when ray tracing is enabled and denoiser is False. | +| `gpu_id` | `int` | `0` | The gpu index that the simulation engine will be used. Affects gpu physics device. | +| `thread_mode` | `ThreadMode` | `RENDER_SHARE_ENGINE` | The threading mode for the simulation engine. | +| `cpu_num` | `int` | `1` | The number of CPU threads to use for the simulation engine. | +| `num_envs` | `int` | `1` | The number of parallel environments (arenas) to simulate. | +| `arena_space` | `float` | `5.0` | The distance between each arena when building multiple arenas. | +| `physics_dt` | `float` | `0.01` | The time step for the physics simulation. | +| `sim_device` | `str` \| `torch.device` | `"cpu"` | The device for the physics simulation. | +| `physics_config` | `PhysicsCfg` | `PhysicsCfg()` | The physics configuration parameters. | +| `gpu_memory_config` | `GPUMemoryCfg` | `GPUMemoryCfg()` | The GPU memory configuration parameters. | + +## Initialization + +Initialize the manager with the configuration object: + +```python +from embodichain.lab.sim import SimulationManager, SimulationManagerCfg + +# User can customize the config as needed. +sim_config = SimulationManagerCfg() +sim = SimulationManager(sim_config) +``` + +## Asset Management + +The manager provides methods to add and retrieve various simulation assets. + +### Robots + +Add robots using `RobotCfg`. + +```python +from embodichain.lab.sim.cfg import RobotCfg + +robot_cfg = RobotCfg(uid="my_robot", ...) +robot = sim.add_robot(robot_cfg) + +# Retrieve existing robot +robot = sim.get_robot("my_robot") +``` + +### Rigid Objects + +Add rigid bodies (e.g., cubes, meshes) using `RigidObjectCfg`. + +```python +from embodichain.lab.sim.cfg import RigidObjectCfg + +obj_cfg = RigidObjectCfg(uid="cube", ...) +obj = sim.add_rigid_object(obj_cfg) +``` + +### Sensors + +Add sensors (e.g., Cameras) using `SensorCfg`. + +```python +from embodichain.lab.sim.sensors import CameraCfg + +camera_cfg = CameraCfg(uid="cam1", ...) +camera = sim.add_sensor(camera_cfg) +``` + +### Lights + +Add lights to the scene using `LightCfg`. + +```python +from embodichain.lab.sim.cfg import LightCfg + +light_cfg = LightCfg(uid="sun", light_type="point", ...) +light = sim.add_light(light_cfg) +``` + +## Simulation Loop + +The simulation loop typically involves stepping the physics and rendering the scene. + +```python +while True: + # Step physics and render + sim.update() + + # Or step manually + # sim.step_physics() + # sim.render() +``` + +### Methods + +- **`update(physics_dt=None, step=1)`**: Steps the physics simulation and updates the rendering. +- **`enable_physics(enable: bool)`**: Enable or disable physics simulation. +- **`set_manual_update(enable: bool)`**: Set manual update mode for physics. + +## Rendering + +- **`render_camera_group()`**: Renders all cameras in the scene. +- **`open_window()`**: Opens the visualization window. +- **`close_window()`**: Closes the visualization window. + +## Gizmos + +Gizmos allow interactive control of objects in the simulation window. + +```python +# Enable gizmo for a robot +sim.enable_gizmo(uid="my_robot", control_part="arm") + +# Toggle visibility +sim.toggle_gizmo_visibility(uid="my_robot", control_part="arm") + +# Disable gizmo +sim.disable_gizmo(uid="my_robot", control_part="arm") +``` + +## Example Usage + +Below is a complete example of setting up a simulation with a robot and a sensor. + +```python +import argparse +from embodichain.lab.sim import SimulationManager, SimulationManagerCfg +from embodichain.lab.sim.sensors import CameraCfg +from embodichain.lab.sim.cfg import RobotCfg, RigidObjectCfg +from embodichain.lab.sim.shapes import CubeCfg + +# 1. Configure Simulation +config = SimulationManagerCfg( + headless=False, + sim_device="cuda", + enable_rt=True, + physics_dt=0.01 +) +sim = SimulationManager(config) + +# 2. Add a Robot +# (Assuming robot_cfg is defined) +# robot = sim.add_robot(robot_cfg) + +# 3. Add a Rigid Object +cube_cfg = RigidObjectCfg( + uid="cube", + shape=CubeCfg(size=[0.05, 0.05, 0.05]), + init_pos=[1.0, 0.0, 0.5] +) +sim.add_rigid_object(cube_cfg) + +# 4. Add a Sensor +camera_cfg = CameraCfg( + uid="camera", + width=640, + height=480, + # ... other params +) +camera = sim.add_sensor(camera_cfg) + +# 5. Run Simulation Loop +while True: + sim.update() + + # Access sensor data + # data = camera.get_data() +``` +``` \ No newline at end of file diff --git a/embodichain/lab/sim/sim_manager.py b/embodichain/lab/sim/sim_manager.py index 83c231ce..ddb2dabb 100644 --- a/embodichain/lab/sim/sim_manager.py +++ b/embodichain/lab/sim/sim_manager.py @@ -135,7 +135,7 @@ class SimulationManagerCfg: """The time step for the physics simulation.""" sim_device: Union[str, torch.device] = "cpu" - """The device for the simulation engine. Can be 'cpu', 'cuda', or a torch.device object.""" + """The device for the physics simulation. Can be 'cpu', 'cuda', or a torch.device object.""" physics_config: PhysicsCfg = field(default_factory=PhysicsCfg) """The physics configuration parameters.""" From 7918b7cb6faaeda470a7cda532e9509987d13ea9 Mon Sep 17 00:00:00 2001 From: yuecideng Date: Tue, 6 Jan 2026 00:29:14 +0800 Subject: [PATCH 2/9] wip --- docs/source/overview/sim/index.rst | 3 +- docs/source/overview/sim/sim_assets.md | 157 ++++++++++++++++++++++++ docs/source/overview/sim/sim_manager.md | 147 +++++----------------- docs/source/resources/roadmap.md | 6 +- embodichain/lab/sim/material.py | 28 +++-- examples/sim/demo/scoop_ice.py | 4 +- 6 files changed, 211 insertions(+), 134 deletions(-) create mode 100644 docs/source/overview/sim/sim_assets.md diff --git a/docs/source/overview/sim/index.rst b/docs/source/overview/sim/index.rst index 2ee32098..48e1a3ca 100644 --- a/docs/source/overview/sim/index.rst +++ b/docs/source/overview/sim/index.rst @@ -8,8 +8,7 @@ Overview of the Simulation Framework: - Components - Simulation Manager - - Simulation Object - - Material + - Simulation Assets - Virtual Sensor - Kinematics Solver - Motion Generation diff --git a/docs/source/overview/sim/sim_assets.md b/docs/source/overview/sim/sim_assets.md new file mode 100644 index 00000000..a00f9022 --- /dev/null +++ b/docs/source/overview/sim/sim_assets.md @@ -0,0 +1,157 @@ +# Simulation Assets + +Simulation assets in EmbodiChain are configured using Python dataclasses. This approach provides a structured and type-safe way to define properties for physics, materials, objects and sensors in the simulation environment. + +## Physics Configuration + +The `PhysicsCfg` class controls the global physics simulation parameters. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `gravity` | `np.ndarray` | `[0, 0, -9.81]` | Gravity vector for the simulation environment. | +| `bounce_threshold` | `float` | `2.0` | The speed threshold below which collisions will not produce bounce effects. | +| `enable_pcm` | `bool` | `True` | Enable persistent contact manifold (PCM) for improved collision handling. | +| `enable_tgs` | `bool` | `True` | Enable temporal gauss-seidel (TGS) solver for better stability. | +| `enable_ccd` | `bool` | `False` | Enable continuous collision detection (CCD) for fast-moving objects. | +| `enable_enhanced_determinism` | `bool` | `False` | Enable enhanced determinism for consistent simulation results. | +| `enable_friction_every_iteration` | `bool` | `True` | Enable friction calculations at every solver iteration. | +| `length_tolerance` | `float` | `0.05` | The length tolerance for the simulation. Larger values increase speed. | +| `speed_tolerance` | `float` | `0.25` | The speed tolerance for the simulation. Larger values increase speed. | + +## Materials + +### Visual Materials + +The `VisualMaterialCfg` class defines the visual appearance of objects using Physically Based Rendering (PBR) properties. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `uid` | `str` | `"default_mat"` | Unique identifier for the material. | +| `base_color` | `list` | `[0.5, 0.5, 0.5, 1.0]` | Base color/diffuse color (RGBA). | +| `metallic` | `float` | `0.0` | Metallic factor (0.0 = dielectric, 1.0 = metallic). | +| `roughness` | `float` | `0.5` | Surface roughness (0.0 = smooth, 1.0 = rough). | +| `emissive` | `list` | `[0.0, 0.0, 0.0]` | Emissive color (RGB). | +| `emissive_intensity` | `float` | `1.0` | Emissive intensity multiplier. | +| `base_color_texture` | `str` | `None` | Path to base color texture map. | +| `metallic_texture` | `str` | `None` | Path to metallic map. | +| `roughness_texture` | `str` | `None` | Path to roughness map. | +| `normal_texture` | `str` | `None` | Path to normal map. | +| `ao_texture` | `str` | `None` | Path to ambient occlusion map. | +| `ior` | `float` | `1.5` | Index of refraction for ray tracing materials. | +| `material_type` | `str` | `"BRDF"` | material type. | + +## Objects + +All objects inherit from `ObjectBaseCfg`, which provides common properties. + +**Base Properties (`ObjectBaseCfg`)** + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `uid` | `str` | `None` | Unique identifier. | +| `init_pos` | `tuple` | `(0.0, 0.0, 0.0)` | Position of the root in simulation world frame. | +| `init_rot` | `tuple` | `(0.0, 0.0, 0.0)` | Euler angles (in degrees) of the root. | +| `init_local_pose` | `np.ndarray` | `None` | 4x4 transformation matrix (overrides `init_pos` and `init_rot`). | + +## Rigid Object + +Configured via `RigidObjectCfg`. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `shape` | `ShapeCfg` | `ShapeCfg()` | Shape configuration (e.g., Mesh, Box). | +| `attrs` | `RigidBodyAttributesCfg` | `RigidBodyAttributesCfg()` | Physical attributes. | +| `body_type` | `Literal` | `"dynamic"` | "dynamic", "kinematic", or "static". | +| `max_convex_hull_num` | `int` | `1` | Max convex hulls for decomposition (CoACD). | +| `body_scale` | `tuple` | `(1.0, 1.0, 1.0)` | Scale of the rigid body. | + +### Rigid Body Attributes + +The `RigidBodyAttributesCfg` class defines physical properties for rigid bodies. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `mass` | `float` | `1.0` | Mass in kg. Set to 0 to use density. | +| `density` | `float` | `1000.0` | Density in kg/m^3. | +| `angular_damping` | `float` | `0.7` | Angular damping coefficient. | +| `linear_damping` | `float` | `0.7` | Linear damping coefficient. | +| `max_depenetration_velocity` | `float` | `10.0` | Maximum depenetration velocity. | +| `sleep_threshold` | `float` | `0.001` | Threshold below which the body can go to sleep. | +| `enable_ccd` | `bool` | `False` | Enable continuous collision detection. | +| `contact_offset` | `float` | `0.002` | Contact offset for collision detection. | +| `rest_offset` | `float` | `0.001` | Rest offset for collision detection. | +| `enable_collision` | `bool` | `True` | Enable collision for the rigid body. | +| `restitution` | `float` | `0.0` | Restitution (bounciness) coefficient. | +| `dynamic_friction` | `float` | `0.5` | Dynamic friction coefficient. | +| `static_friction` | `float` | `0.5` | Static friction coefficient. | + +## Soft Object + +Configured via `SoftObjectCfg`. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `voxel_attr` | `SoftbodyVoxelAttributesCfg` | `...` | Voxelization attributes. | +| `physical_attr` | `SoftbodyPhysicalAttributesCfg` | `...` | Physical attributes. | +| `shape` | `MeshCfg` | `MeshCfg()` | Mesh configuration. | + +### Soft Body Attributes + +Soft bodies require both voxelization and physical attributes. + +**Voxel Attributes (`SoftbodyVoxelAttributesCfg`)** + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `triangle_remesh_resolution` | `int` | `8` | Resolution to remesh the softbody mesh before building physx collision mesh. | +| `triangle_simplify_target` | `int` | `0` | Simplify mesh faces to target value. | +| `simulation_mesh_resolution` | `int` | `8` | Resolution to build simulation voxelize textra mesh. | +| `simulation_mesh_output_obj` | `bool` | `False` | Whether to output the simulation mesh as an obj file for debugging. | + +**Physical Attributes (`SoftbodyPhysicalAttributesCfg`)** + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `youngs` | `float` | `1e6` | Young's modulus (higher = stiffer). | +| `poissons` | `float` | `0.45` | Poisson's ratio (higher = closer to incompressible). | +| `dynamic_friction` | `float` | `0.0` | Dynamic friction coefficient. | +| `elasticity_damping` | `float` | `0.0` | Elasticity damping factor. | +| `material_model` | `SoftBodyMaterialModel` | `CO_ROTATIONAL` | Material constitutive model. | +| `enable_kinematic` | `bool` | `False` | If True, (partially) kinematic behavior is enabled. | +| `enable_ccd` | `bool` | `False` | Enable continuous collision detection. | +| `enable_self_collision` | `bool` | `False` | Enable self-collision handling. | +| `mass` | `float` | `-1.0` | Total mass. If negative, density is used. | +| `density` | `float` | `1000.0` | Material density in kg/m^3. | + + +### Articulations & Robots + +Configured via `ArticulationCfg` and `RobotCfg` (which inherits from `ArticulationCfg`). + +These configurations are typically loaded from URDF or MJCF files. + +### Lights + +Configured via `LightCfg`. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `light_type` | `Literal` | `"point"` | Type of light (currently only "point"). | +| `color` | `tuple` | `(1.0, 1.0, 1.0)` | RGB color. | +| `intensity` | `float` | `50.0` | Intensity in watts/m^2. | +| `radius` | `float` | `1e2` | Falloff radius. | + +### Markers + +Configured via `MarkerCfg` for debugging and visualization. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `marker_type` | `Literal` | `"axis"` | "axis", "line", or "point". | +| `axis_size` | `float` | `0.002` | Thickness of axis lines. | +| `axis_len` | `float` | `0.005` | Length of axis arms. | +| `line_color` | `list` | `[1, 1, 0, 1.0]` | RGBA color for lines. | + +### Rigid Object Groups + +`RigidObjectGroupCfg` allows initializing multiple rigid objects, potentially from a folder. diff --git a/docs/source/overview/sim/sim_manager.md b/docs/source/overview/sim/sim_manager.md index 3e5dc7f1..5f77b48c 100644 --- a/docs/source/overview/sim/sim_manager.md +++ b/docs/source/overview/sim/sim_manager.md @@ -55,144 +55,53 @@ sim_config = SimulationManagerCfg() sim = SimulationManager(sim_config) ``` -## Asset Management +## Assets Management -The manager provides methods to add and retrieve various simulation assets. +The manager provides methods to add, retrieve and remove various simulation assets including: +- Rigid Objects +- Soft Objects +- Articulations +- Robots +- Sensors +- Lights +- Materials -### Robots +For more details on simulation assets, please refer to their respective documentation pages. -Add robots using `RobotCfg`. - -```python -from embodichain.lab.sim.cfg import RobotCfg - -robot_cfg = RobotCfg(uid="my_robot", ...) -robot = sim.add_robot(robot_cfg) - -# Retrieve existing robot -robot = sim.get_robot("my_robot") -``` - -### Rigid Objects - -Add rigid bodies (e.g., cubes, meshes) using `RigidObjectCfg`. - -```python -from embodichain.lab.sim.cfg import RigidObjectCfg +## Simulation Loop -obj_cfg = RigidObjectCfg(uid="cube", ...) -obj = sim.add_rigid_object(obj_cfg) -``` +### Manual Update mode -### Sensors +In this mode, the physics simulation should be explicitly stepped by calling `update()` method, which provides precise control over the simulation timing. -Add sensors (e.g., Cameras) using `SensorCfg`. +The use case for manual update mode includes: +- Data generation with openai gym environments, in which the observation and action must be synchronized with the physics simulation. +- Applications that require precise dynamic control over the simulation timing. ```python -from embodichain.lab.sim.sensors import CameraCfg +while True: + # Step physics simulation. + sim.update(step=1) -camera_cfg = CameraCfg(uid="cam1", ...) -camera = sim.add_sensor(camera_cfg) + # Perform other tasks such as get data from the scene or apply sensor update. ``` -### Lights - -Add lights to the scene using `LightCfg`. - -```python -from embodichain.lab.sim.cfg import LightCfg +> The default mode is manual update mode. To switch to automatic update mode, call `set_manual_update(False)`. -light_cfg = LightCfg(uid="sun", light_type="point", ...) -light = sim.add_light(light_cfg) -``` +### Automatic Update mode -## Simulation Loop +In this mode, the physics simulation stepping is automatically handling by the physics thread running in dexsim engine, which makes it easier to use for visualization and interactive applications. -The simulation loop typically involves stepping the physics and rendering the scene. +> When in automatic update mode, user are recommanded to use CPU `sim_device` for simulation. -```python -while True: - # Step physics and render - sim.update() - - # Or step manually - # sim.step_physics() - # sim.render() -``` ### Methods -- **`update(physics_dt=None, step=1)`**: Steps the physics simulation and updates the rendering. +- **`update(physics_dt=None, step=1)`**: Steps the physics simulation with optional custom time step and number of steps. If `physics_dt` is None, uses the configured physics time step. - **`enable_physics(enable: bool)`**: Enable or disable physics simulation. - **`set_manual_update(enable: bool)`**: Set manual update mode for physics. -## Rendering +### Related Tutorials -- **`render_camera_group()`**: Renders all cameras in the scene. -- **`open_window()`**: Opens the visualization window. -- **`close_window()`**: Closes the visualization window. - -## Gizmos - -Gizmos allow interactive control of objects in the simulation window. - -```python -# Enable gizmo for a robot -sim.enable_gizmo(uid="my_robot", control_part="arm") - -# Toggle visibility -sim.toggle_gizmo_visibility(uid="my_robot", control_part="arm") - -# Disable gizmo -sim.disable_gizmo(uid="my_robot", control_part="arm") -``` - -## Example Usage - -Below is a complete example of setting up a simulation with a robot and a sensor. - -```python -import argparse -from embodichain.lab.sim import SimulationManager, SimulationManagerCfg -from embodichain.lab.sim.sensors import CameraCfg -from embodichain.lab.sim.cfg import RobotCfg, RigidObjectCfg -from embodichain.lab.sim.shapes import CubeCfg - -# 1. Configure Simulation -config = SimulationManagerCfg( - headless=False, - sim_device="cuda", - enable_rt=True, - physics_dt=0.01 -) -sim = SimulationManager(config) - -# 2. Add a Robot -# (Assuming robot_cfg is defined) -# robot = sim.add_robot(robot_cfg) - -# 3. Add a Rigid Object -cube_cfg = RigidObjectCfg( - uid="cube", - shape=CubeCfg(size=[0.05, 0.05, 0.05]), - init_pos=[1.0, 0.0, 0.5] -) -sim.add_rigid_object(cube_cfg) - -# 4. Add a Sensor -camera_cfg = CameraCfg( - uid="camera", - width=640, - height=480, - # ... other params -) -camera = sim.add_sensor(camera_cfg) - -# 5. Run Simulation Loop -while True: - sim.update() - - # Access sensor data - # data = camera.get_data() -``` -``` \ No newline at end of file +- [Basic scene creation](https://dexforce.github.io/EmbodiChain/tutorial/create_scene.html) +- [Interactive simulation with Gizmo](https://dexforce.github.io/EmbodiChain/tutorial/gizmo.html) \ No newline at end of file diff --git a/docs/source/resources/roadmap.md b/docs/source/resources/roadmap.md index 4999e8bf..ba160ea2 100644 --- a/docs/source/resources/roadmap.md +++ b/docs/source/resources/roadmap.md @@ -1,6 +1,6 @@ # Roadmap -Currently, EmbodiChain is under active development. Our plan for the feature roadmap is as follows: +Currently, EmbodiChain is under active development. Our roadmap is as follows: - Simulation: - Rendering: @@ -15,12 +15,10 @@ Currently, EmbodiChain is under active development. Our plan for the feature roa - We are also exploring how to integrate [newton physics](https://github.com/newton-physics/newton) into EmbodiChain as an alternative physics backend. - Sensors: - Add contact and force sensors with examples. - - Kinematics Solvers: - - Improve the existing IK solver performance and stability (especially SRSSolver and OPWSolver). - Motion Generation: - Add more advanced motion generation methods and examples. - Useful Tools: - - We are working on USD support for EmbodiChain to enable better scene creation and asset management. + - We are working on USD support for EmbodiChain to enable better asset management and interoperability. - We will release a simple Real2Sim pipeline, which enables automatic task generation from real-world data. - Robots Integration: - Add support for more robot models (eg: LeRobot, Unitree H1/G1, etc). diff --git a/embodichain/lab/sim/material.py b/embodichain/lab/sim/material.py index c66c2c38..9f2f50ca 100644 --- a/embodichain/lab/sim/material.py +++ b/embodichain/lab/sim/material.py @@ -67,10 +67,10 @@ class VisualMaterialCfg: # Ray tracing specific properties ior: float = 1.5 - """Index of refraction for ray tracing materials""" + """Index of refraction for PBR materials, only used in ray tracing.""" - rt_material_type: str = "BRDF_GGX_SMITH" - """Ray tracing material type. Options: 'BRDF_GGX_SMITH', 'BTDF_GGX_SMITH', 'BSDF_GGX_SMITH'""" + material_type: str = "BRDF" + """Ray tracing material type. Options: 'BRDF', 'BTDF', 'BSDF'""" # Currently disabled properties # subsurface: float = 0.0 # Subsurface scattering factor @@ -95,12 +95,24 @@ class VisualMaterial: """ RT_MATERIAL_TYPES = [ - "BRDF_GGX_SMITH", - "BTDF_GGX_SMITH", - "BSDF_GGX_SMITH", + "BRDF", + "BTDF", + "BSDF", ] + MAT_TYPE_MAPPING: Dict[str, str] = { + "BRDF": "BRDF_GGX_SMITH", + "BTDF": "BTDF_GGX_SMITH", + "BSDF": "BSDF_GGX_SMITH", + } + def __init__(self, cfg: VisualMaterialCfg, mat: Material): + if cfg.material_type not in self.RT_MATERIAL_TYPES: + logger.log_error( + f"Invalid material_type '{cfg.material_type}'. " + f"Supported types: {self.RT_MATERIAL_TYPES}" + ) + self.uid = cfg.uid self.cfg = copy.deepcopy(cfg) self._mat = mat @@ -132,7 +144,9 @@ def set_default_properties( if self.is_rt_enabled: mat_inst.set_ior(cfg.ior) - mat_inst.mat.update_pbr_material_type(cfg.rt_material_type) + mat_inst.mat.update_pbr_material_type( + self.MAT_TYPE_MAPPING[cfg.material_type] + ) def create_instance(self, uid: str) -> VisualMaterialInst: """Create a new material instance from this material template. diff --git a/examples/sim/demo/scoop_ice.py b/examples/sim/demo/scoop_ice.py index 1baee81c..f4f75119 100644 --- a/examples/sim/demo/scoop_ice.py +++ b/examples/sim/demo/scoop_ice.py @@ -303,13 +303,13 @@ def create_ice_cubes(sim: SimulationManager): # Set visual material for ice cubes. # The material below only works for ray tracing backend. - # Set ior to 1.31 and material type to "BSDF_GGX_SMITH" for better ice appearance. + # Set ior to 1.31 and material type to "BSDF" for better ice appearance. ice_mat = sim.create_visual_material( cfg=VisualMaterialCfg( base_color=[1.0, 1.0, 1.0, 1.0], ior=1.31, roughness=0.05, - rt_material_type="BSDF_GGX_SMITH", + material_type="BSDF", ) ) ice_cubes.set_visual_material(mat=ice_mat) From 0079f5994811eb9b60bc1f6e30461d97842bc631 Mon Sep 17 00:00:00 2001 From: yuecideng Date: Sun, 11 Jan 2026 22:23:12 +0800 Subject: [PATCH 3/9] wip --- docs/source/overview/sim/sim_assets.md | 67 ++++++++++++++++--------- docs/source/overview/sim/sim_manager.md | 15 ++++++ embodichain/lab/sim/material.py | 8 +++ 3 files changed, 67 insertions(+), 23 deletions(-) diff --git a/docs/source/overview/sim/sim_assets.md b/docs/source/overview/sim/sim_assets.md index a00f9022..dfa07edf 100644 --- a/docs/source/overview/sim/sim_assets.md +++ b/docs/source/overview/sim/sim_assets.md @@ -1,26 +1,10 @@ # Simulation Assets -Simulation assets in EmbodiChain are configured using Python dataclasses. This approach provides a structured and type-safe way to define properties for physics, materials, objects and sensors in the simulation environment. +Simulation assets in EmbodiChain are configured using Python dataclasses. This approach provides a structured and type-safe way to define properties for physics, materials, objects and sensors in the simulation environment. -## Physics Configuration +## Visual Materials -The `PhysicsCfg` class controls the global physics simulation parameters. - -| Parameter | Type | Default | Description | -| :--- | :--- | :--- | :--- | -| `gravity` | `np.ndarray` | `[0, 0, -9.81]` | Gravity vector for the simulation environment. | -| `bounce_threshold` | `float` | `2.0` | The speed threshold below which collisions will not produce bounce effects. | -| `enable_pcm` | `bool` | `True` | Enable persistent contact manifold (PCM) for improved collision handling. | -| `enable_tgs` | `bool` | `True` | Enable temporal gauss-seidel (TGS) solver for better stability. | -| `enable_ccd` | `bool` | `False` | Enable continuous collision detection (CCD) for fast-moving objects. | -| `enable_enhanced_determinism` | `bool` | `False` | Enable enhanced determinism for consistent simulation results. | -| `enable_friction_every_iteration` | `bool` | `True` | Enable friction calculations at every solver iteration. | -| `length_tolerance` | `float` | `0.05` | The length tolerance for the simulation. Larger values increase speed. | -| `speed_tolerance` | `float` | `0.25` | The speed tolerance for the simulation. Larger values increase speed. | - -## Materials - -### Visual Materials +### Configuration The `VisualMaterialCfg` class defines the visual appearance of objects using Physically Based Rendering (PBR) properties. @@ -38,13 +22,43 @@ The `VisualMaterialCfg` class defines the visual appearance of objects using Phy | `normal_texture` | `str` | `None` | Path to normal map. | | `ao_texture` | `str` | `None` | Path to ambient occlusion map. | | `ior` | `float` | `1.5` | Index of refraction for ray tracing materials. | -| `material_type` | `str` | `"BRDF"` | material type. | + +### Visual Material and Visual Material Instance + +A visual material is defined using the `VisualMaterialCfg` class. It is actually a material template that can be used to create multiple instances with different parameters. + +A visual material instance is created from a visual material using the method `create_instance()`. User can set different properties for each instance. For details API usage, please refer to the [VisualMaterialInst](https://dexforce.github.io/EmbodiChain/api_reference/embodichain/embodichain.lab.sim.html#embodichain.lab.sim.material.VisualMaterialInst) documentation. + +For batch simualtion scenarios, when user set a material to a object (eg, a rigid object), the material instance will be created for each simulation instance automatically. + +### Code + +```python +# Create a visual material with base color white and low roughness. +mat: VisualMaterial = sim.create_visual_material( + cfg=VisualMaterialCfg( + base_color=[1.0, 1.0, 1.0, 1.0], + roughness=0.05, + ) +) + +# Set the material to a rigid object. +object: RigidObject +object.set_visual_material(mat) + +# Get all material instances created for this object in the simulation. If `num_envs` is N, there will be N instances. +mat_inst: List[VisualMaterialInst] = object.get_visual_material_inst() + +# We can then modify the properties of each material instance separately. +mat_inst[0].set_base_color([1.0, 0.0, 0.0, 1.0]) +``` + ## Objects All objects inherit from `ObjectBaseCfg`, which provides common properties. -**Base Properties (`ObjectBaseCfg`)** +**Base Properties** | Parameter | Type | Default | Description | | :--- | :--- | :--- | :--- | @@ -85,6 +99,13 @@ The `RigidBodyAttributesCfg` class defines physical properties for rigid bodies. | `dynamic_friction` | `float` | `0.5` | Dynamic friction coefficient. | | `static_friction` | `float` | `0.5` | Static friction coefficient. | +For Rigid Object tutorial, please refer to the [Create Scene](https://dexforce.github.io/EmbodiChain/tutorial/create_scene.html). + +## Rigid Object Groups + +`RigidObjectGroupCfg` allows initializing multiple rigid objects, potentially from a folder. + + ## Soft Object Configured via `SoftObjectCfg`. @@ -123,6 +144,8 @@ Soft bodies require both voxelization and physical attributes. | `mass` | `float` | `-1.0` | Total mass. If negative, density is used. | | `density` | `float` | `1000.0` | Material density in kg/m^3. | +For Soft Object tutorial, please refer to the [Soft Body Simulation](https://dexforce.github.io/EmbodiChain/tutorial/create_softbody.html). + ### Articulations & Robots @@ -152,6 +175,4 @@ Configured via `MarkerCfg` for debugging and visualization. | `axis_len` | `float` | `0.005` | Length of axis arms. | | `line_color` | `list` | `[1, 1, 0, 1.0]` | RGBA color for lines. | -### Rigid Object Groups -`RigidObjectGroupCfg` allows initializing multiple rigid objects, potentially from a folder. diff --git a/docs/source/overview/sim/sim_manager.md b/docs/source/overview/sim/sim_manager.md index 5f77b48c..823786b8 100644 --- a/docs/source/overview/sim/sim_manager.md +++ b/docs/source/overview/sim/sim_manager.md @@ -43,6 +43,21 @@ sim_config = SimulationManagerCfg( | `physics_config` | `PhysicsCfg` | `PhysicsCfg()` | The physics configuration parameters. | | `gpu_memory_config` | `GPUMemoryCfg` | `GPUMemoryCfg()` | The GPU memory configuration parameters. | +### Physics Configuration + +The `PhysicsCfg` class controls the global physics simulation parameters. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `gravity` | `np.ndarray` | `[0, 0, -9.81]` | Gravity vector for the simulation environment. | +| `bounce_threshold` | `float` | `2.0` | The speed threshold below which collisions will not produce bounce effects. | +| `enable_ccd` | `bool` | `False` | Enable continuous collision detection (CCD) for fast-moving objects. | +| `length_tolerance` | `float` | `0.05` | The length tolerance for the simulation. Larger values increase speed. | +| `speed_tolerance` | `float` | `0.25` | The speed tolerance for the simulation. Larger values increase speed. | + +For more parameters and details, refer to the [PhysicsCfg](https://dexforce.github.io/EmbodiChain/api_reference/embodichain/embodichain.lab.sim.html#embodichain.lab.sim.cfg.PhysicsCfg) documentation. + + ## Initialization Initialize the manager with the configuration object: diff --git a/embodichain/lab/sim/material.py b/embodichain/lab/sim/material.py index 9f2f50ca..ac3d99ab 100644 --- a/embodichain/lab/sim/material.py +++ b/embodichain/lab/sim/material.py @@ -116,6 +116,7 @@ def __init__(self, cfg: VisualMaterialCfg, mat: Material): self.uid = cfg.uid self.cfg = copy.deepcopy(cfg) self._mat = mat + self._mat_inst_list: list[str] = [] self._default_mat_inst = self.create_instance(self.uid) @@ -127,6 +128,10 @@ def is_rt_enabled(self) -> bool: def mat(self) -> Material: return self._mat + @property + def inst(self) -> VisualMaterialInst: + return self._default_mat_inst + def set_default_properties( self, mat_inst: VisualMaterialInst, cfg: VisualMaterialCfg ) -> None: @@ -164,6 +169,7 @@ def create_instance(self, uid: str) -> VisualMaterialInst: # TODO: Support change default properties for material. # This will improve the instance creation efficiency. self.set_default_properties(inst, self.cfg) + self._mat_inst_list.append(uid) return inst def get_default_instance(self) -> VisualMaterialInst: @@ -183,6 +189,8 @@ def get_instance(self, uid: str) -> VisualMaterialInst: Returns: VisualMaterialInst: The material instance. """ + if uid not in self._mat_inst_list: + logger.log_error(f"Material instance with uid '{uid}' does not exist.") return VisualMaterialInst(uid, self._mat) From 4aa01b89bcc7be7388d69f27d7842c0726d0658d Mon Sep 17 00:00:00 2001 From: liguilong256 Date: Thu, 15 Jan 2026 09:11:20 +0800 Subject: [PATCH 4/9] add_sim_docs: add run_able documentation for robot, articulation and sensor --- docs/source/overview/sim/sim_articulation.md | 92 ++++++++++++++++ docs/source/overview/sim/sim_robot.md | 105 +++++++++++++++++++ docs/source/overview/sim/sim_sensor.md | 75 +++++++++++++ 3 files changed, 272 insertions(+) create mode 100644 docs/source/overview/sim/sim_articulation.md create mode 100644 docs/source/overview/sim/sim_robot.md create mode 100644 docs/source/overview/sim/sim_sensor.md diff --git a/docs/source/overview/sim/sim_articulation.md b/docs/source/overview/sim/sim_articulation.md new file mode 100644 index 00000000..972893e7 --- /dev/null +++ b/docs/source/overview/sim/sim_articulation.md @@ -0,0 +1,92 @@ +# Articulation + +The `Articulation` class represents the fundamental physics entity for articulated objects (e.g., robots, grippers, cabinets, doors) in EmbodiChain. + +## Configuration + +Articulations are configured using the `ArticulationCfg` dataclass. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `fpath` | `str` | `None` | Path to the asset file (URDF/MJCF). | +| `init_pos` | `tuple` | `(0,0,0)` | Initial root position `(x, y, z)`. | +| `init_rot` | `tuple` | `(0,0,0)` | Initial root rotation `(r, p, y)` in degrees. | +| `fix_base` | `bool` | `True` | Whether to fix the base of the articulation. | +| `drive_pros` | `JointDrivePropertiesCfg` | `...` | Default drive properties. | + +### Setup & Initialization + +```python +import torch +from embodichain.lab.sim import SimulationManager, SimulationManagerCfg +from embodichain.lab.sim.objects import Articulation, ArticulationCfg + +# 1. Initialize Simulation +device = "cuda" if torch.cuda.is_available() else "cpu" +sim_cfg = SimulationManagerCfg(sim_device=device) +sim = SimulationManager(sim_config=sim_cfg) + +# 2. Configure Articulation +art_cfg = ArticulationCfg( + fpath="assets/robots/franka/franka.urdf", + init_pos=(0, 0, 0.5), + fix_base=True +) + +# 3. Spawn Articulation +# Note: The method is 'add_articulation' +articulation: Articulation = sim.add_articulation(cfg=art_cfg) + +# 4. Initialize Physics +sim.reset_objects_state() +``` +## Articulation Class +State Data (Observation) +State data is accessed via properties that return batched tensors. + +| Property | Shape | Description | +| :--- | :--- | :--- | +| `root_pose` | `(N, 7)` | Root link pose `[x, y, z, qw, qx, qy, qz]`. | +| `qpos` | `(N, dof)` | Joint positions. | +| `qvel` | `(N, dof)` | Joint velocities. | + + + +```python +# Example: Accessing state +print(f"Current Joint Positions: {articulation.qpos}") +print(f"Root Pose: {articulation.root_pose}") +``` +### Control & Dynamics +You can control the articulation by setting joint targets. + +### Joint Control +```python +# Set joint position targets (PD Control) +target_qpos = torch.zeros_like(articulation.qpos) +articulation.set_qpos(target_qpos, target=True) + +# Important: Step simulation to apply control +sim.update() +``` +### Drive Configuration +Dynamically adjust drive properties. + +```python +# Set stiffness for all joints +articulation.set_drive( + stiffness=torch.tensor([100.0], device=device), + damping=torch.tensor([10.0], device=device) +) +``` +### Kinematics +Supports differentiable Forward Kinematics (FK) and Jacobian computation. +```python +# Compute Forward Kinematics +# Note: Ensure 'build_pk_chain=True' in cfg +if art_cfg.build_pk_chain: + ee_pose = articulation.compute_fk( + qpos=articulation.qpos, + end_link_name="ee_link" # Replace with actual link name + ) +``` diff --git a/docs/source/overview/sim/sim_robot.md b/docs/source/overview/sim/sim_robot.md new file mode 100644 index 00000000..53db6d19 --- /dev/null +++ b/docs/source/overview/sim/sim_robot.md @@ -0,0 +1,105 @@ +# Robot + +The `Robot` class extends `Articulation` to support advanced robotics features such as kinematic solvers (IK/FK), motion planners, and part-based control (e.g., controlling "arm" and "gripper" separately). + +## Configuration + +Robots are configured using `RobotCfg`. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `control_parts` | `Dict[str, List[str]]` | `None` | Defines groups of joints (e.g., `{"arm": ["joint1", ...], "hand": ["finger1", ...]}`). | +| `solver_cfg` | `SolverCfg` | `None` | Configuration for kinematic solvers (IK/FK). | +| `urdf_cfg` | `URDFCfg` | `None` | Advanced configuration for assembling a robot from multiple URDF components. | + +### Setup & Initialization + +A `Robot` must be spawned within a `SimulationManager`. + +```python +import torch +from embodichain.lab.sim import SimulationManager, SimulationManagerCfg +from embodichain.lab.sim.objects import Robot, RobotCfg + +# 1. Initialize Simulation Environment +# Note: Use 'sim_device' to specify device (e.g., "cuda:0" or "cpu") +device = "cuda" if torch.cuda.is_available() else "cpu" +sim_cfg = SimulationManagerCfg(sim_device=device, physics_dt=0.01) +sim = SimulationManager(sim_config=sim_cfg) + +# 2. Configure Robot +robot_cfg = RobotCfg( + fpath="assets/robots/franka/franka.urdf", + control_parts={ + "arm": ["panda_joint[1-7]"], + "gripper": ["panda_finger_joint[1-2]"] + }, + solver_cfg={} # Enable default solver +) + +# 3. Spawn Robot +# Note: The method is 'add_robot', and it takes 'cfg' as argument +robot: Robot = sim.add_robot(cfg=robot_cfg) + +# 4. Reset Objects State (SimulationManager does not have a global reset() method) +sim.reset_objects_state() +``` + +## Robot Class + +### Control Parts + +A unique feature of the `Robot` class is **Control Parts**. Instead of controlling the entire DoF vector at once, you can target specific body parts by name. + +```python +# Get joint IDs for a specific part +arm_ids = robot.get_joint_ids(name="arm") + +# Control only the arm +# Note: Ensure 'sim.update()' is called in your loop to apply these actions +target_qpos = torch.zeros((robot.num_instances, len(arm_ids)), device=device) +robot.set_qpos(target_qpos, name="arm", target=True) +``` + +### Kinematics (Solvers) +The robot class integrates solvers to perform differentiable Forward Kinematics (FK) and Inverse Kinematics (IK). +#### Forward Kinematics (FK) +Compute the pose of a link (e.g., end-effector) given joint positions. +```python +# Compute FK for a specific part (uses the part's configured solver) +current_qpos = robot.get_qpos() +ee_pose = robot.compute_fk(qpos=current_qpos, name="arm") +print(f"EE Pose: {ee_pose}") +``` +#### Inverse Kinematics (IK) +Compute the required joint positions to reach a target pose. +```python +# Compute IK +# pose: Target pose (N, 7) or (N, 4, 4) +target_pose = ee_pose.clone() # Example target +target_pose[:, 2] += 0.1 # Move up 10cm + +success, solved_qpos = robot.compute_ik( + pose=target_pose, + name="arm", + joint_seed=current_qpos +) +``` +### Proprioception +Get standard proprioceptive observation data for learning agents. +```python +# Returns a dict containing 'qpos', 'qvel', and 'qf' +obs = robot.get_proprioception() +``` +### Advanced API +The Robot class overrides standard Articulation methods to support the name argument for part-specific operations. +| Method | Description | +| :--- | :--- | +| `set_qpos(..., name="part")` | Set joint positions for a specific part. | +| `set_qvel(..., name="part")` | Set joint velocities for a specific part. | +| `set_qf(..., name="part")` | Set joint efforts for a specific part. | +| `get_qpos(name="part")` | Get joint positions of a specific part. | +| `get_qvel(name="part")` | Get joint velocities of a specific part. | + + + diff --git a/docs/source/overview/sim/sim_sensor.md b/docs/source/overview/sim/sim_sensor.md new file mode 100644 index 00000000..ed240f5b --- /dev/null +++ b/docs/source/overview/sim/sim_sensor.md @@ -0,0 +1,75 @@ +# Sensors + +The Simulation framework provides sensor interfaces for agents to perceive the environment. Currently, the primary supported sensor type is the **Camera**. + +## Camera + +### Configuration + +The `CameraCfg` class defines the configuration for camera sensors. It inherits from `SensorCfg` and controls resolution, clipping planes, intrinsics, and active data modalities. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `width` | `int` | `640` | Width of the captured image. | +| `height` | `int` | `480` | Height of the captured image. | +| `intrinsics` | `tuple` | `(600, 600, 320.0, 240.0)` | Camera intrinsics `(fx, fy, cx, cy)`. | +| `extrinsics` | `ExtrinsicsCfg` | `ExtrinsicsCfg()` | Pose configuration (see below). | +| `near` | `float` | `0.005` | Near clipping plane distance. | +| `far` | `float` | `100.0` | Far clipping plane distance. | +| `enable_color` | `bool` | `True` | Enable RGBA image capture. | +| `enable_depth` | `bool` | `False` | Enable depth map capture. | +| `enable_mask` | `bool` | `False` | Enable segmentation mask capture. | +| `enable_normal` | `bool` | `False` | Enable surface normal capture. | +| `enable_position` | `bool` | `False` | Enable 3D position map capture. | + +### Camera Extrinsics + +The `ExtrinsicsCfg` class defines the position and orientation of the camera. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `parent` | `str` | `None` | Name of the link to attach to (e.g., `"ee_link"`). If `None`, camera is fixed in world. | +| `pos` | `list` | `[0.0, 0.0, 0.0]` | Position offset `[x, y, z]`. | +| `quat` | `list` | `[1.0, 0.0, 0.0, 0.0]` | Orientation quaternion `[w, x, y, z]`. | +| `eye` | `tuple` | `None` | (Optional) Camera eye position for look-at mode. | +| `target` | `tuple` | `None` | (Optional) Target position for look-at mode. | +| `up` | `tuple` | `None` | (Optional) Up vector for look-at mode. | + +### Usage + +You can create a camera sensor using `sim.add_sensor()` with a `CameraCfg` object. + +#### Code Example + +```python +from embodichain.lab.sim.sensors import Camera, CameraCfg + +# 1. Define Configuration +camera_cfg = CameraCfg( + width=640, + height=480, + intrinsics=(600, 600, 320.0, 240.0), + extrinsics=CameraCfg.ExtrinsicsCfg( + parent="ee_link", # Attach to robot end-effector + pos=[0.09, 0.05, 0.04], # Relative position + quat=[0, 1, 0, 0], # Relative rotation [w, x, y, z] + ), + enable_color=True, + enable_depth=True, +) + +# 2. Add Sensor to Simulation +camera: Camera = sim.add_sensor(sensor_cfg=camera_cfg) +``` +### Observation Data +Retrieve sensor data using camera.get_data(). The data is returned as a dictionary of tensors on the specified device. + +| Key | Data Type | Shape | Description | +| :--- | :--- | :--- | :--- | +| `color` | `torch.uint8` | `(B, H, W, 4)` | RGBA image data. | +| `depth` | `torch.float32` | `(B, H, W)` | Depth map in meters. | +| `mask` | `torch.int32` | `(B, H, W)` | Segmentation mask / Instance IDs. | +| `normal` | `torch.float32` | `(B, H, W, 3)` | Surface normal vectors. | +| `position` | `torch.float32` | `(B, H, W, 3)` | 3D Position map (OpenGL coords). | + +*Note: `B` represents the number of environments (batch size).* \ No newline at end of file From e45207a17b9e211b24b5434cd211da19e564393b Mon Sep 17 00:00:00 2001 From: liguilong256 Date: Thu, 15 Jan 2026 10:10:23 +0800 Subject: [PATCH 5/9] fix API usage in articulation doc --- docs/source/overview/sim/sim_articulation.md | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/docs/source/overview/sim/sim_articulation.md b/docs/source/overview/sim/sim_articulation.md index 972893e7..e7ff27f6 100644 --- a/docs/source/overview/sim/sim_articulation.md +++ b/docs/source/overview/sim/sim_articulation.md @@ -46,16 +46,17 @@ State data is accessed via properties that return batched tensors. | Property | Shape | Description | | :--- | :--- | :--- | -| `root_pose` | `(N, 7)` | Root link pose `[x, y, z, qw, qx, qy, qz]`. | -| `qpos` | `(N, dof)` | Joint positions. | -| `qvel` | `(N, dof)` | Joint velocities. | +| `get_root_pose` | `(N, 7)` | Root link pose `[x, y, z, qw, qx, qy, qz]`. | +| `get_qpos` | `(N, dof)` | Joint positions. | +| `get_qvel` | `(N, dof)` | Joint velocities. | ```python # Example: Accessing state -print(f"Current Joint Positions: {articulation.qpos}") -print(f"Root Pose: {articulation.root_pose}") +# Note: Use methods (with brackets) instead of properties +print(f"Current Joint Positions: {articulation.get_qpos()}") +print(f"Root Pose: {articulation.get_root_pose()}") ``` ### Control & Dynamics You can control the articulation by setting joint targets. @@ -63,7 +64,10 @@ You can control the articulation by setting joint targets. ### Joint Control ```python # Set joint position targets (PD Control) -target_qpos = torch.zeros_like(articulation.qpos) +# Get current qpos to create a target tensor of correct shape +current_qpos = articulation.get_qpos() +target_qpos = torch.zeros_like(current_qpos) + articulation.set_qpos(target_qpos, target=True) # Important: Step simulation to apply control @@ -84,9 +88,9 @@ Supports differentiable Forward Kinematics (FK) and Jacobian computation. ```python # Compute Forward Kinematics # Note: Ensure 'build_pk_chain=True' in cfg -if art_cfg.build_pk_chain: +if getattr(art_cfg, 'build_pk_chain', False): ee_pose = articulation.compute_fk( - qpos=articulation.qpos, + qpos=articulation.get_qpos(), # Use method call end_link_name="ee_link" # Replace with actual link name ) ``` From 928e83127b9dc8159c5a9286fa0c3ff010b00ad9 Mon Sep 17 00:00:00 2001 From: liguilong256 Date: Thu, 15 Jan 2026 14:48:12 +0800 Subject: [PATCH 6/9] use SolverCfg() and correct reset method comment in robot doc --- docs/source/overview/sim/sim_robot.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/source/overview/sim/sim_robot.md b/docs/source/overview/sim/sim_robot.md index 53db6d19..61134850 100644 --- a/docs/source/overview/sim/sim_robot.md +++ b/docs/source/overview/sim/sim_robot.md @@ -20,6 +20,7 @@ A `Robot` must be spawned within a `SimulationManager`. import torch from embodichain.lab.sim import SimulationManager, SimulationManagerCfg from embodichain.lab.sim.objects import Robot, RobotCfg +from embodichain.lab.sim.solvers import SolverCfg # 1. Initialize Simulation Environment # Note: Use 'sim_device' to specify device (e.g., "cuda:0" or "cpu") @@ -34,14 +35,15 @@ robot_cfg = RobotCfg( "arm": ["panda_joint[1-7]"], "gripper": ["panda_finger_joint[1-2]"] }, - solver_cfg={} # Enable default solver + solver_cfg=SolverCfg() ) # 3. Spawn Robot # Note: The method is 'add_robot', and it takes 'cfg' as argument robot: Robot = sim.add_robot(cfg=robot_cfg) -# 4. Reset Objects State (SimulationManager does not have a global reset() method) +# 4. Reset Simulation +# This performs a global reset of the simulation state sim.reset_objects_state() ``` From 3cbb2fe7bd1611bbc4c8781c2e1aaec843b93a4f Mon Sep 17 00:00:00 2001 From: liguilong256 Date: Thu, 15 Jan 2026 14:53:08 +0800 Subject: [PATCH 7/9] add target param explanation --- docs/source/overview/sim/sim_articulation.md | 24 ++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/docs/source/overview/sim/sim_articulation.md b/docs/source/overview/sim/sim_articulation.md index e7ff27f6..2e77459b 100644 --- a/docs/source/overview/sim/sim_articulation.md +++ b/docs/source/overview/sim/sim_articulation.md @@ -12,7 +12,20 @@ Articulations are configured using the `ArticulationCfg` dataclass. | `init_pos` | `tuple` | `(0,0,0)` | Initial root position `(x, y, z)`. | | `init_rot` | `tuple` | `(0,0,0)` | Initial root rotation `(r, p, y)` in degrees. | | `fix_base` | `bool` | `True` | Whether to fix the base of the articulation. | -| `drive_pros` | `JointDrivePropertiesCfg` | `...` | Default drive properties. | +| `drive_props` | `JointDrivePropertiesCfg` | `...` | Default drive properties. | + +### Drive Configuration + +The `drive_props` parameter controls the joint physics behavior. It is defined using the `JointDrivePropertiesCfg` class. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `stiffness` | `float` / `Dict` | `1.0e4` | Stiffness (P-gain) of the joint drive. Unit: $N/m$ or $Nm/rad$. | +| `damping` | `float` / `Dict` | `1.0e3` | Damping (D-gain) of the joint drive. Unit: $Ns/m$ or $Nms/rad$. | +| `max_effort` | `float` / `Dict` | `1.0e10` | Maximum effort (force/torque) the joint can exert. | +| `max_velocity` | `float` / `Dict` | `1.0e10` | Maximum velocity allowed for the joint ($m/s$ or $rad/s$). | +| `friction` | `float` / `Dict` | `0.0` | Joint friction coefficient. | +| `drive_type` | `str` | `"force"` | Drive mode: `"force"` or `"acceleration"`. | ### Setup & Initialization @@ -42,11 +55,11 @@ sim.reset_objects_state() ``` ## Articulation Class State Data (Observation) -State data is accessed via properties that return batched tensors. +State data is accessed via getter methods that return batched tensors. | Property | Shape | Description | | :--- | :--- | :--- | -| `get_root_pose` | `(N, 7)` | Root link pose `[x, y, z, qw, qx, qy, qz]`. | +| `get_local_pose` | `(N, 7)` | Root link pose `[x, y, z, qw, qx, qy, qz]`. | | `get_qpos` | `(N, dof)` | Joint positions. | | `get_qvel` | `(N, dof)` | Joint velocities. | @@ -56,7 +69,7 @@ State data is accessed via properties that return batched tensors. # Example: Accessing state # Note: Use methods (with brackets) instead of properties print(f"Current Joint Positions: {articulation.get_qpos()}") -print(f"Root Pose: {articulation.get_root_pose()}") +print(f"Root Pose: {articulation.get_local_pose()}") ``` ### Control & Dynamics You can control the articulation by setting joint targets. @@ -68,6 +81,9 @@ You can control the articulation by setting joint targets. current_qpos = articulation.get_qpos() target_qpos = torch.zeros_like(current_qpos) +# Set target position +# target=True: Sets the drive target. The physics engine applies forces to reach this position. +# target=False: Instantly resets/teleports joints to this position (ignoring physics). articulation.set_qpos(target_qpos, target=True) # Important: Step simulation to apply control From 274f1798a57c21ee3fd11bed250c6937063f08ed Mon Sep 17 00:00:00 2001 From: liguilong256 Date: Thu, 15 Jan 2026 15:35:48 +0800 Subject: [PATCH 8/9] add soft object documention --- docs/source/overview/sim/sim_soft_object.md | 79 +++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 docs/source/overview/sim/sim_soft_object.md diff --git a/docs/source/overview/sim/sim_soft_object.md b/docs/source/overview/sim/sim_soft_object.md new file mode 100644 index 00000000..4e4130e8 --- /dev/null +++ b/docs/source/overview/sim/sim_soft_object.md @@ -0,0 +1,79 @@ +# Soft Object + +The `SoftObject` class represents deformable entities (e.g., cloth, sponges, soft robotics) in EmbodiChain. Unlike rigid bodies, soft objects are defined by vertices and meshes rather than a single rigid pose. + +## Configuration + +Soft objects are configured using the `SoftObjectCfg` dataclass. + +| Parameter | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `fpath` | `str` | `None` | Path to the soft body asset file (e.g., `.msh`, `.vtk`). | +| `init_pos` | `tuple` | `(0,0,0)` | Initial position `(x, y, z)`. | +| `init_rot` | `tuple` | `(0,0,0)` | Initial rotation `(r, p, y)` in degrees. | + +### Setup & Initialization + +```python +import torch +from embodichain.lab.sim import SimulationManager, SimulationManagerCfg +from embodichain.lab.sim.objects import SoftObject, SoftObjectCfg + +# 1. Initialize Simulation +device = "cuda" if torch.cuda.is_available() else "cpu" +sim_cfg = SimulationManagerCfg(sim_device=device) +sim = SimulationManager(sim_config=sim_cfg) + +# 2. Configure Soft Object +soft_cfg = SoftObjectCfg( + fpath="assets/objects/sponge.msh", # Example asset path + init_pos=(0, 0, 0.5), + init_rot=(0, 0, 0) +) + +# 3. Spawn Soft Object +# Note: Assuming the method in SimulationManager is 'add_soft_object' +soft_object: SoftObject = sim.add_soft_object(cfg=soft_cfg) + +# 4. Initialize Physics +sim.reset_objects_state() +``` +### Soft Object Class +#### Vertex Data (Observation) +For soft objects, the state is represented by the positions and velocities of its vertices, rather than a single root pose. + +| Method | Return Shape | Description | +| :--- | :--- | :--- | +| `get_current_collision_vertices()` | `(N, V_col, 3)` | Current positions of collision mesh vertices. | +| `get_current_sim_vertices()` | `(N, V_sim, 3)` | Current positions of simulation mesh vertices (nodes). | +| `get_current_sim_vertex_velocities()` | `(N, V_sim, 3)` | Current velocities of simulation vertices. | +| `get_rest_collision_vertices()` | `(N, V_col, 3)` | Rest (initial) positions of collision vertices. | +| `get_rest_sim_vertices()` | `(N, V_sim, 3)` | Rest (initial) positions of simulation vertices. | + +> Note: N is the number of environments/instances, V_col is the number of collision vertices, and V_sim is the number of simulation vertices. + +```python +# Example: Accessing vertex data +sim_verts = soft_object.get_current_sim_vertices() +print(f"Simulation Vertices Shape: {sim_verts.shape}") + +velocities = soft_object.get_current_sim_vertex_velocities() +print(f"Vertex Velocities: {velocities}") +``` +#### Pose Management +You can set the global pose of a soft object (which transforms all its vertices), but getting a single "pose" from a deformed object is not supported. + +| Method | Description | +| :--- | :--- | +| `set_local_pose(pose)` | Sets the pose of the object by transforming all vertices. | +| `get_local_pose()` | **Not Supported**. Raises `NotImplementedError` because a deformed object does not have a single rigid pose. | + + +```python +# Reset or Move the Soft Object +target_pose = torch.tensor([[0, 0, 1.0, 1, 0, 0, 0]], device=device) # (x, y, z, qw, qx, qy, qz) +soft_object.set_local_pose(target_pose) + +# Important: Step simulation to apply changes +sim.update() +``` \ No newline at end of file From 57e3b0072db001bfe6a5ae8dfd750a2b19fa9834 Mon Sep 17 00:00:00 2001 From: liguilong256 Date: Thu, 22 Jan 2026 16:31:15 +0800 Subject: [PATCH 9/9] Add PushT environment, gym config and train config, --- configs/agents/rl/push_t/gym_config.json | 193 ++++++++++++++++++++ configs/agents/rl/push_t/train_config.json | 67 +++++++ embodichain/lab/gym/envs/tasks/rl/push_t.py | 149 +++++++++++++++ 3 files changed, 409 insertions(+) create mode 100644 configs/agents/rl/push_t/gym_config.json create mode 100644 configs/agents/rl/push_t/train_config.json create mode 100644 embodichain/lab/gym/envs/tasks/rl/push_t.py diff --git a/configs/agents/rl/push_t/gym_config.json b/configs/agents/rl/push_t/gym_config.json new file mode 100644 index 00000000..7bb1216d --- /dev/null +++ b/configs/agents/rl/push_t/gym_config.json @@ -0,0 +1,193 @@ +{ + "id": "PushTRL", + "max_episodes": 5, + "env": { + "num_envs": 128, + "sim_steps_per_control": 4, + "events": { + "randomize_t": { + "func": "randomize_rigid_object_pose", + "mode": "reset", + "params": { + "entity_cfg": { + "uid": "t" + }, + "position_range": [ + [-0.2, -0.2, 0.0], + [0.2, 0.2, 0.0] + ], + "relative_position": true + } + }, + "randomize_goal": { + "func": "randomize_target_pose", + "mode": "reset", + "params": { + "position_range": [ + [-0.3, -0.3, 0.05], + [0.3, 0.3, 0.05] + ], + "relative_position": false, + "store_key": "goal_pose" + } + } + }, + "observations": { + "robot_qpos": { + "func": "normalize_robot_joint_data", + "mode": "modify", + "name": "robot/qpos", + "params": { + "joint_ids": [0, 1, 2, 3, 4, 5] + } + }, + "robot_ee_pos": { + "func": "get_robot_eef_pose", + "mode": "add", + "name": "robot/ee_pos", + "params": { + "part_name": "arm" + } + }, + "t_pos": { + "func": "get_rigid_object_pose", + "mode": "add", + "name": "object/t_pos", + "params": { + "entity_cfg": { + "uid": "t" + } + } + }, + "goal_pos": { + "func": "target_position", + "mode": "add", + "name": "object/goal_pos", + "params": { + "target_pose_key": "goal_pose" + } + } + }, + "rewards": { + "reaching_reward": { + "func": "reaching_behind_object", + "mode": "add", + "weight": 0.1, + "params": { + "object_cfg": { + "uid": "t" + }, + "target_pose_key": "goal_pose", + "behind_offset": 0.015, + "height_offset": 0.015, + "distance_scale": 5.0, + "part_name": "arm" + } + }, + "place_reward": { + "func": "incremental_distance_to_target", + "mode": "add", + "weight": 1.0, + "params": { + "source_entity_cfg": { + "uid": "t" + }, + "target_pose_key": "goal_pose", + "tanh_scale": 10.0, + "positive_weight": 2.0, + "negative_weight": 0.5, + "use_xy_only": true + } + }, + "action_penalty": { + "func": "action_smoothness_penalty", + "mode": "add", + "weight": 0.01, + "params": {} + }, + "success_bonus": { + "func": "success_reward", + "mode": "add", + "weight": 10.0, + "params": {} + } + }, + "extensions": { + "obs_mode": "state", + "episode_length": 100, + "joint_limits": 0.5, + "action_scale": 0.1, + "success_threshold": 0.1 + } + }, + "robot": { + "uid": "Manipulator", + "urdf_cfg": { + "components": [ + { + "component_type": "arm", + "urdf_path": "UniversalRobots/UR10/UR10.urdf" + }, + { + "component_type": "hand", + "urdf_path": "DH_PGI_140_80/DH_PGI_140_80.urdf" + } + ] + }, + "init_pos": [0.0, 0.0, 0.0], + "init_rot": [0.0, 0.0, 0.0], + "init_qpos": [0.0, -1.57, 1.57, -1.57, -1.57, 0.0, 0.04, 0.04], + "drive_pros": { + "drive_type": "force", + "stiffness": 100000.0, + "damping": 1000.0, + "max_velocity": 2.0, + "max_effort": 500.0 + }, + "solver_cfg": { + "arm": { + "class_type": "PytorchSolver", + "end_link_name": "ee_link", + "root_link_name": "base_link", + "tcp": [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.16], + [0.0, 0.0, 0.0, 1.0] + ] + } + }, + "control_parts": { + "arm": ["JOINT[1-6]"] + } + }, + "sensor": [], + "light": {}, + "background": [], + "rigid_object": [ + { + "uid": "t", + "shape": { + "shape_type": "Cube", + "size": [0.1, 0.1, 0.1] + }, + "body_type": "dynamic", + "init_pos": [-0.6, -0.4, 0.05], + "attrs": { + "mass": 10.0, + "static_friction": 3.0, + "dynamic_friction": 2.0, + "linear_damping": 2.0, + "angular_damping": 2.0, + "contact_offset": 0.003, + "rest_offset": 0.001, + "restitution": 0.1, + "max_depenetration_velocity": 10.0, + "max_linear_velocity": 1.0, + "max_angular_velocity": 1.0 + } + } + ], + "rigid_object_group": [], + "articulation": [] +} diff --git a/configs/agents/rl/push_t/train_config.json b/configs/agents/rl/push_t/train_config.json new file mode 100644 index 00000000..b7a101b1 --- /dev/null +++ b/configs/agents/rl/push_t/train_config.json @@ -0,0 +1,67 @@ +{ + "trainer": { + "exp_name": "push_t_ppo", + "gym_config": "configs/agents/rl/push_t/gym_config.json", + "seed": 42, + "device": "cuda:0", + "headless": false, + "enable_rt": false, + "gpu_id": 0, + "num_envs": 8, + "iterations": 1000, + "rollout_steps": 1024, + "eval_freq": 200, + "save_freq": 200, + "use_wandb": true, + "wandb_project_name": "embodychain-push_t", + "events": { + "eval": { + "record_camera": { + "func": "record_camera_data_async", + "mode": "interval", + "interval_step": 1, + "params": { + "name": "main_cam", + "resolution": [640, 480], + "eye": [-1.4, 1.4, 2.0], + "target": [0, 0, 0], + "up": [0, 0, 1], + "intrinsics": [600, 600, 320, 240], + "save_path": "./outputs/videos/eval" + } + } + } + } + }, + "policy": { + "name": "actor_critic", + "actor": { + "type": "mlp", + "network_cfg": { + "hidden_sizes": [256, 256], + "activation": "relu" + } + }, + "critic": { + "type": "mlp", + "network_cfg": { + "hidden_sizes": [256, 256], + "activation": "relu" + } + } + }, + "algorithm": { + "name": "ppo", + "cfg": { + "learning_rate": 0.0001, + "n_epochs": 10, + "batch_size": 8192, + "gamma": 0.99, + "gae_lambda": 0.95, + "clip_coef": 0.2, + "ent_coef": 0.01, + "vf_coef": 0.5, + "max_grad_norm": 0.5 + } + } +} diff --git a/embodichain/lab/gym/envs/tasks/rl/push_t.py b/embodichain/lab/gym/envs/tasks/rl/push_t.py new file mode 100644 index 00000000..8b2ba311 --- /dev/null +++ b/embodichain/lab/gym/envs/tasks/rl/push_t.py @@ -0,0 +1,149 @@ +# ---------------------------------------------------------------------------- +# Copyright (c) 2021-2025 DexForce Technology Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ---------------------------------------------------------------------------- + +from typing import Dict, Any, Sequence + +import torch + +from embodichain.lab.gym.utils.registration import register_env +from embodichain.lab.gym.envs import EmbodiedEnv, EmbodiedEnvCfg +from embodichain.lab.sim.cfg import MarkerCfg +from embodichain.lab.sim.types import EnvObs, EnvAction + + +@register_env("PushTRL", max_episode_steps=50, override=True) +class PushTEnv(EmbodiedEnv): + """Push-T task. + + The task requires pushing a T-shaped block to a goal position on the tabletop. + Success is defined by the object being within a planar distance threshold. + """ + + def __init__(self, cfg=None, **kwargs): + if cfg is None: + cfg = EmbodiedEnvCfg() + + super().__init__(cfg, **kwargs) + + self.control_part_name = getattr(self, "control_part_name", "arm") + self.require_on_table = getattr(self, "require_on_table", True) + self.table_height = getattr(self, "table_height", 0.0) + + @property + def goal_pose(self) -> torch.Tensor | None: + """Get current goal poses (4x4 matrices) for all environments.""" + return getattr(self, "_goal_pose", None) + + def _draw_goal_marker(self) -> None: + """Draw axis marker at goal position for visualization.""" + if self.goal_pose is None: + return + + goal_poses = self.goal_pose.detach().cpu().numpy() + for arena_idx in range(self.cfg.num_envs): + marker_name = f"goal_marker_{arena_idx}" + self.sim.remove_marker(marker_name) + + marker_cfg = MarkerCfg( + name=marker_name, + marker_type="axis", + axis_xpos=[goal_poses[arena_idx]], + axis_size=0.003, + axis_len=0.02, + arena_index=arena_idx, + ) + self.sim.draw_marker(cfg=marker_cfg) + + def _initialize_episode( + self, env_ids: Sequence[int] | None = None, **kwargs + ) -> None: + super()._initialize_episode(env_ids=env_ids, **kwargs) + # self._draw_goal_marker() + + def _step_action(self, action: EnvAction) -> EnvAction: + scaled_action = action * self.action_scale + scaled_action = torch.clamp( + scaled_action, -self.joint_limits, self.joint_limits + ) + current_qpos = self.robot.body_data.qpos + target_qpos = current_qpos.clone() + target_qpos[:, :6] += scaled_action[:, :6] + self.robot.set_qpos(qpos=target_qpos) + return scaled_action + + def _get_eef_pos(self) -> torch.Tensor: + """Get end-effector position using FK.""" + if self.control_part_name: + try: + joint_ids = self.robot.get_joint_ids(self.control_part_name) + qpos = self.robot.get_qpos()[:, joint_ids] + ee_pose = self.robot.compute_fk( + name=self.control_part_name, qpos=qpos, to_matrix=True + ) + except (ValueError, KeyError, AttributeError): + qpos = self.robot.get_qpos() + ee_pose = self.robot.compute_fk(qpos=qpos, to_matrix=True) + else: + qpos = self.robot.get_qpos() + ee_pose = self.robot.compute_fk(qpos=qpos, to_matrix=True) + return ee_pose[:, :3, 3] + + def get_info(self, **kwargs) -> Dict[str, Any]: + cube = self.sim.get_rigid_object("cube") + cube_pos = cube.body_data.pose[:, :3] + ee_pos = self._get_eef_pos() + + if self.goal_pose is not None: + goal_pos = self.goal_pose[:, :3, 3] + xy_distance = torch.norm(cube_pos[:, :2] - goal_pos[:, :2], dim=1) + is_success = xy_distance < self.success_threshold + if self.require_on_table: + is_success = is_success & (cube_pos[:, 2] >= self.table_height - 1e-3) + else: + xy_distance = torch.zeros(self.cfg.num_envs, device=self.device) + is_success = torch.zeros( + self.cfg.num_envs, device=self.device, dtype=torch.bool + ) + + ee_to_cube = torch.norm(ee_pos - cube_pos, dim=1) + info = { + "success": is_success, + "fail": torch.zeros( + self.cfg.num_envs, device=self.device, dtype=torch.bool + ), + "elapsed_steps": self._elapsed_steps, + "goal_pose": self.goal_pose, + } + info["metrics"] = { + "distance_to_goal": xy_distance, + "eef_to_cube": ee_to_cube, + "cube_height": cube_pos[:, 2], + } + return info + + def check_truncated(self, obs: EnvObs, info: Dict[str, Any]) -> torch.Tensor: + is_timeout = self._elapsed_steps >= self.episode_length + cube = self.sim.get_rigid_object("cube") + cube_pos = cube.body_data.pose[:, :3] + is_fallen = cube_pos[:, 2] < -0.1 + return is_timeout | is_fallen + + def evaluate(self, **kwargs) -> Dict[str, Any]: + info = self.get_info(**kwargs) + return { + "success": info["success"][0].item(), + "distance_to_goal": info["metrics"]["distance_to_goal"], + }