From f26590171b1894f6d64c2a000425682f9c211f23 Mon Sep 17 00:00:00 2001 From: Stephen Aylward Date: Sat, 21 Feb 2026 15:44:56 -0500 Subject: [PATCH 01/10] ENH: Split VTK by type or connectivity. Greedy Registration --- README.md | 1 + docs/developer/architecture.rst | 1 + docs/testing.rst | 2 + .../convert_chop_valve_to_usd.ipynb | 527 ++++++------------ .../0-download_and_convert_4d_to_3d.ipynb | 2 +- .../1-register_images.ipynb | 9 +- .../2-generate_segmentation.ipynb | 2 +- .../4-merge_dynamic_and_static_usd.ipynb | 2 +- .../test_compare_registration_speed.ipynb | 265 +++++++++ pyproject.toml | 2 + src/physiomotion4d/__init__.py | 4 + src/physiomotion4d/cli/__init__.py | 1 + src/physiomotion4d/cli/convert_vtk_to_usd.py | 236 ++++++++ src/physiomotion4d/register_images_greedy.py | 408 ++++++++++++++ src/physiomotion4d/register_models_pca.py | 8 +- src/physiomotion4d/usd_anatomy_tools.py | 42 ++ src/physiomotion4d/usd_tools.py | 114 +++- src/physiomotion4d/vtk_to_usd/__init__.py | 9 + src/physiomotion4d/vtk_to_usd/converter.py | 177 +++++- .../vtk_to_usd/data_structures.py | 10 + src/physiomotion4d/vtk_to_usd/mesh_utils.py | 313 +++++++++++ .../workflow_convert_vtk_to_usd.py | 280 ++++++++++ tests/conftest.py | 7 + tests/test_register_images_greedy.py | 211 +++++++ 24 files changed, 2249 insertions(+), 384 deletions(-) create mode 100644 experiments/Heart-GatedCT_To_USD/test_compare_registration_speed.ipynb create mode 100644 src/physiomotion4d/cli/convert_vtk_to_usd.py create mode 100644 src/physiomotion4d/register_images_greedy.py create mode 100644 src/physiomotion4d/vtk_to_usd/mesh_utils.py create mode 100644 src/physiomotion4d/workflow_convert_vtk_to_usd.py create mode 100644 tests/test_register_images_greedy.py diff --git a/README.md b/README.md index e7324ef..fec42fb 100644 --- a/README.md +++ b/README.md @@ -560,6 +560,7 @@ pytest tests/ -m "not slow and not requires_data" -v pytest tests/test_usd_merge.py -v # USD merge functionality pytest tests/test_usd_time_preservation.py -v # Time-varying data preservation pytest tests/test_register_images_ants.py -v # ANTs registration +pytest tests/test_register_images_greedy.py -v # Greedy registration pytest tests/test_register_images_icon.py -v # Icon registration pytest tests/test_register_time_series_images.py -v # Time series registration pytest tests/test_segment_chest_total_segmentator.py -v # TotalSegmentator diff --git a/docs/developer/architecture.rst b/docs/developer/architecture.rst index bf67f13..203b77d 100644 --- a/docs/developer/architecture.rst +++ b/docs/developer/architecture.rst @@ -59,6 +59,7 @@ The package is organized into functional modules: │ ├── Image Registration │ │ ├── register_images_base.py Base registration │ │ ├── register_images_ants.py ANTs registration + │ │ ├── register_images_greedy.py Greedy registration │ │ ├── register_images_icon.py ICON registration │ │ └── register_time_series_images.py Time series │ │ diff --git a/docs/testing.rst b/docs/testing.rst index 3651ee0..1e7ac4c 100644 --- a/docs/testing.rst +++ b/docs/testing.rst @@ -71,6 +71,7 @@ Specific Test Modules # Registration (slow, ~5-10 minutes each) pytest tests/test_register_images_ants.py -v + pytest tests/test_register_images_greedy.py -v pytest tests/test_register_images_icon.py -v pytest tests/test_register_time_series_images.py -v @@ -107,6 +108,7 @@ Tests are organized by functionality: │ ├── Registration Tests (Slow ~5-10 min) │ ├── test_register_images_ants.py # ANTs registration + │ ├── test_register_images_greedy.py # Greedy registration │ ├── test_register_images_icon.py # Icon registration │ └── test_register_time_series_images.py # Time series registration │ diff --git a/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb b/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb index 71219a0..86de65a 100644 --- a/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb +++ b/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb @@ -8,14 +8,13 @@ "\n", "This notebook demonstrates converting time-varying cardiac valve simulation data from VTK format to animated USD.\n", "\n", - "## Dataset: CHOP-Valve4D\n", + "## Dataset: CHOP-Valve4D (TPV25)\n", "\n", - "Two cardiac valve models with time-varying geometry:\n", + "One cardiac valve model with time-varying geometry:\n", "\n", - "- **Alterra**: 232 time steps (cardiac cycle simulation)\n", "- **TPV25**: 265 time steps (cardiac cycle simulation)\n", "\n", - "These datasets represent 4D (3D + time) simulations of prosthetic heart valves during a cardiac cycle.\n", + "This dataset represents 4D (3D + time) simulation of a prosthetic heart valve during a cardiac cycle.\n", "\n", "## Goals\n", "\n", @@ -55,7 +54,7 @@ "# Configuration: Control which conversions to run\n", "# Set to True to compute full time series (all frames) - takes longer\n", "# Set to False to only compute subsampled time series (faster, for preview)\n", - "COMPUTE_FULL_TIME_SERIES = False # Default: only subsampled\n", + "COMPUTE_FULL_TIME_SERIES = True # Default: only subsampled\n", "\n", "print(\"Time Series Configuration:\")\n", "print(f\" - Compute Full Time Series: {COMPUTE_FULL_TIME_SERIES}\")\n", @@ -83,6 +82,7 @@ " VTKToUSDConverter,\n", " ConversionSettings,\n", " MaterialData,\n", + " cell_type_name_for_vertex_count,\n", " read_vtk_file,\n", " validate_time_series_topology,\n", ")\n", @@ -107,9 +107,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Define data directories\n", + "# Define data directories (TPV25 only)\n", "data_dir = Path.cwd().parent.parent / \"data\" / \"CHOP-Valve4D\"\n", - "alterra_dir = data_dir / \"Alterra\"\n", "tpv25_dir = data_dir / \"TPV25\"\n", "output_dir = Path.cwd() / \"output\" / \"valve4d\"\n", "output_dir.mkdir(parents=True, exist_ok=True)\n", @@ -117,7 +116,6 @@ "print(f\"Data directory: {data_dir}\")\n", "print(f\"Output directory: {output_dir}\")\n", "print(\"\\nDirectory status:\")\n", - "print(f\" Alterra: {'✓' if alterra_dir.exists() else '✗'} {alterra_dir}\")\n", "print(f\" TPV25: {'✓' if tpv25_dir.exists() else '✗'} {tpv25_dir}\")" ] }, @@ -153,20 +151,12 @@ " return time_series\n", "\n", "\n", - "# Discover both datasets\n", - "alterra_series = discover_time_series(alterra_dir)\n", + "# Discover TPV25 time series\n", "tpv25_series = discover_time_series(tpv25_dir)\n", "\n", "print(\"=\" * 60)\n", - "print(\"Time-Series Discovery\")\n", + "print(\"Time-Series Discovery (TPV25)\")\n", "print(\"=\" * 60)\n", - "print(\"\\nAlterra:\")\n", - "print(f\" Files found: {len(alterra_series)}\")\n", - "if alterra_series:\n", - " print(f\" Time range: t{alterra_series[0][0]} to t{alterra_series[-1][0]}\")\n", - " print(f\" First file: {alterra_series[0][1].name}\")\n", - " print(f\" Last file: {alterra_series[-1][1].name}\")\n", - "\n", "print(\"\\nTPV25:\")\n", "print(f\" Files found: {len(tpv25_series)}\")\n", "if tpv25_series:\n", @@ -184,48 +174,6 @@ "Examine the first time step to understand the data structure." ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Read first frame of Alterra\n", - "if alterra_series:\n", - " print(\"=\" * 60)\n", - " print(\"Alterra - First Frame Analysis\")\n", - " print(\"=\" * 60)\n", - "\n", - " first_file = alterra_series[0][1]\n", - " mesh_data = read_vtk_file(first_file, extract_surface=True)\n", - "\n", - " print(f\"\\nFile: {first_file.name}\")\n", - " print(\"\\nGeometry:\")\n", - " print(f\" Points: {len(mesh_data.points):,}\")\n", - " print(f\" Faces: {len(mesh_data.face_vertex_counts):,}\")\n", - " print(f\" Normals: {'Yes' if mesh_data.normals is not None else 'No'}\")\n", - " print(f\" Colors: {'Yes' if mesh_data.colors is not None else 'No'}\")\n", - "\n", - " # Bounding box\n", - " bbox_min = np.min(mesh_data.points, axis=0)\n", - " bbox_max = np.max(mesh_data.points, axis=0)\n", - " bbox_size = bbox_max - bbox_min\n", - " print(\"\\nBounding Box:\")\n", - " print(f\" Min: [{bbox_min[0]:.3f}, {bbox_min[1]:.3f}, {bbox_min[2]:.3f}]\")\n", - " print(f\" Max: [{bbox_max[0]:.3f}, {bbox_max[1]:.3f}, {bbox_max[2]:.3f}]\")\n", - " print(f\" Size: [{bbox_size[0]:.3f}, {bbox_size[1]:.3f}, {bbox_size[2]:.3f}]\")\n", - "\n", - " print(f\"\\nData Arrays ({len(mesh_data.generic_arrays)}):\")\n", - " for i, array in enumerate(mesh_data.generic_arrays, 1):\n", - " print(f\" {i}. {array.name}:\")\n", - " print(f\" - Type: {array.data_type.value}\")\n", - " print(f\" - Components: {array.num_components}\")\n", - " print(f\" - Interpolation: {array.interpolation}\")\n", - " print(f\" - Elements: {len(array.data):,}\")\n", - " if array.data.size > 0:\n", - " print(f\" - Range: [{np.min(array.data):.6f}, {np.max(array.data):.6f}]\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -265,7 +213,16 @@ " print(f\" - Interpolation: {array.interpolation}\")\n", " print(f\" - Elements: {len(array.data):,}\")\n", " if array.data.size > 0:\n", - " print(f\" - Range: [{np.min(array.data):.6f}, {np.max(array.data):.6f}]\")" + " print(f\" - Range: [{np.min(array.data):.6f}, {np.max(array.data):.6f}]\")\n", + "\n", + " # Cell types (face vertex count) - TPV data has multiple cell types (triangle, quad, etc.)\n", + " unique_counts, num_each = np.unique(\n", + " mesh_data.face_vertex_counts, return_counts=True\n", + " )\n", + " print(\"\\nCell types (faces by vertex count):\")\n", + " for u, n in zip(unique_counts, num_each):\n", + " name = cell_type_name_for_vertex_count(int(u))\n", + " print(f\" {name} ({u} vertices): {n:,} faces\")" ] }, { @@ -278,7 +235,7 @@ "# The workflow has changed to: convert to USD first, then apply colormap post-processing\n", "\n", "# Configuration: choose colormap for visualization\n", - "DEFAULT_COLORMAP = \"plasma\" # matplotlib colormap name\n", + "DEFAULT_COLORMAP = \"viridis\" # matplotlib colormap name\n", "\n", "# Enable automatic colorization (will pick strain/stress primvars if available)\n", "ENABLE_AUTO_COLORIZATION = True\n", @@ -304,6 +261,8 @@ " compute_normals=False, # Use existing normals if available\n", " preserve_point_arrays=True,\n", " preserve_cell_arrays=True,\n", + " separate_objects_by_cell_type=False,\n", + " separate_objects_by_connectivity=True,\n", " up_axis=\"Y\",\n", " times_per_second=60.0, # 60 FPS for smooth animation\n", " use_time_samples=True,\n", @@ -311,6 +270,7 @@ "\n", "print(\"Conversion settings configured\")\n", "print(f\" - Triangulate: {settings.triangulate_meshes}\")\n", + "print(f\" - Separate objects by cell type: {settings.separate_objects_by_cell_type}\")\n", "print(f\" - FPS: {settings.times_per_second}\")\n", "print(f\" - Up axis: {settings.up_axis}\")" ] @@ -319,225 +279,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 3. Convert Full Time Series - Alterra\n", - "\n", - "Convert the complete Alterra dataset to animated USD." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create material for Alterra\n", - "# Note: Vertex colors will be applied post-conversion by USDTools\n", - "alterra_material = MaterialData(\n", - " name=\"alterra_valve\",\n", - " diffuse_color=(0.4, 0.5, 0.8),\n", - " roughness=0.3,\n", - " metallic=0.1,\n", - " use_vertex_colors=False, # USDTools will bind vertex color material during colorization\n", - ")\n", - "\n", - "print(\"=\" * 60)\n", - "print(\"Converting Alterra Time Series\")\n", - "print(\"=\" * 60)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Convert Alterra (full resolution)\n", - "if COMPUTE_FULL_TIME_SERIES and alterra_series:\n", - " converter = VTKToUSDConverter(settings)\n", - "\n", - " # Extract file paths and time codes\n", - " alterra_files = [file_path for _, file_path in alterra_series]\n", - " alterra_times = [float(time_step) for time_step, _ in alterra_series]\n", - "\n", - " output_usd = output_dir / \"alterra_full.usd\"\n", - "\n", - " print(f\"\\nConverting to: {output_usd}\")\n", - " print(f\"Time codes: {alterra_times[0]:.1f} to {alterra_times[-1]:.1f}\")\n", - " print(\"\\nThis may take several minutes...\\n\")\n", - "\n", - " start_time = time_module.time()\n", - "\n", - " # Read MeshData\n", - " mesh_data_sequence = [read_vtk_file(f, extract_surface=True) for f in alterra_files]\n", - "\n", - " # Validate topology consistency across time series\n", - " validation_report = validate_time_series_topology(\n", - " mesh_data_sequence, filenames=alterra_files\n", - " )\n", - " if not validation_report[\"is_consistent\"]:\n", - " print(\n", - " f\"Warning: Found {len(validation_report['warnings'])} topology/primvar issues\"\n", - " )\n", - " if validation_report[\"topology_changes\"]:\n", - " print(\n", - " f\" Topology changes in {len(validation_report['topology_changes'])} frames\"\n", - " )\n", - "\n", - " # Convert to USD (preserves all primvars from VTK)\n", - " stage = converter.convert_mesh_data_sequence(\n", - " mesh_data_sequence=mesh_data_sequence,\n", - " output_usd=output_usd,\n", - " mesh_name=\"AlterraValve\",\n", - " time_codes=alterra_times,\n", - " material=alterra_material,\n", - " )\n", - "\n", - " # Repair elementSize for multi-component primvars (e.g. 9-component stress tensor)\n", - " usd_tools = USDTools()\n", - " mesh_path = \"/World/Meshes/AlterraValve\"\n", - " repair_report = usd_tools.repair_mesh_primvar_element_sizes(\n", - " str(output_usd), mesh_path\n", - " )\n", - " if repair_report[\"updated\"]:\n", - " print(f\"Repaired elementSize for {len(repair_report['updated'])} primvar(s)\")\n", - "\n", - " # Post-process: apply colormap visualization using USDTools\n", - " if ENABLE_AUTO_COLORIZATION:\n", - " # Inspect and select primvar for coloring\n", - " primvars = usd_tools.list_mesh_primvars(str(output_usd), mesh_path)\n", - " color_primvar = usd_tools.pick_color_primvar(\n", - " primvars, keywords=(\"strain\", \"stress\")\n", - " )\n", - "\n", - " if color_primvar:\n", - " print(f\"\\nApplying colormap to '{color_primvar}' using {DEFAULT_COLORMAP}\")\n", - " usd_tools.apply_colormap_from_primvar(\n", - " str(output_usd),\n", - " mesh_path,\n", - " color_primvar,\n", - " cmap=DEFAULT_COLORMAP,\n", - " bind_vertex_color_material=True,\n", - " )\n", - " else:\n", - " print(\"\\nNo strain/stress primvar found for coloring\")\n", - "\n", - " elapsed = time_module.time() - start_time\n", - "\n", - " print(f\"\\n✓ Conversion completed in {elapsed:.1f} seconds\")\n", - " print(f\" Output: {output_usd}\")\n", - " print(f\" Size: {output_usd.stat().st_size / (1024 * 1024):.2f} MB\")\n", - " print(f\" Time range: {stage.GetStartTimeCode()} - {stage.GetEndTimeCode()}\")\n", - " print(\n", - " f\" Duration: {(stage.GetEndTimeCode() - stage.GetStartTimeCode()) / settings.times_per_second:.2f} seconds @ {settings.times_per_second} FPS\"\n", - " )\n", - "elif not COMPUTE_FULL_TIME_SERIES:\n", - " print(\"⏭️ Skipping Alterra full time series (COMPUTE_FULL_TIME_SERIES = False)\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. Convert Subsampled Time Series - Alterra\n", - "\n", - "For faster previews, create a subsampled version (every Nth frame)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Subsample Alterra (every 5th frame)\n", - "if alterra_series:\n", - " subsample_rate = 5\n", - " alterra_subsampled = alterra_series[::subsample_rate]\n", - "\n", - " print(\"=\" * 60)\n", - " print(f\"Converting Subsampled Alterra (every {subsample_rate}th frame)\")\n", - " print(\"=\" * 60)\n", - " print(f\"Frames: {len(alterra_series)} → {len(alterra_subsampled)}\")\n", - "\n", - " converter = VTKToUSDConverter(settings)\n", - "\n", - " alterra_files_sub = [file_path for _, file_path in alterra_subsampled]\n", - " alterra_times_sub = [float(time_step) for time_step, _ in alterra_subsampled]\n", - "\n", - " output_usd_sub = output_dir / f\"alterra_subsample_{subsample_rate}x.usd\"\n", - "\n", - " print(f\"\\nConverting to: {output_usd_sub}\")\n", - "\n", - " start_time = time_module.time()\n", - "\n", - " # Read MeshData\n", - " mesh_data_sequence = [\n", - " read_vtk_file(f, extract_surface=True) for f in alterra_files_sub\n", - " ]\n", - "\n", - " # Validate topology consistency across time series\n", - " validation_report = validate_time_series_topology(\n", - " mesh_data_sequence, filenames=alterra_files_sub\n", - " )\n", - " if not validation_report[\"is_consistent\"]:\n", - " print(\n", - " f\"Warning: Found {len(validation_report['warnings'])} topology/primvar issues\"\n", - " )\n", - " if validation_report[\"topology_changes\"]:\n", - " print(\n", - " f\" Topology changes in {len(validation_report['topology_changes'])} frames\"\n", - " )\n", - "\n", - " # Convert to USD (preserves all primvars from VTK)\n", - " stage_sub = converter.convert_mesh_data_sequence(\n", - " mesh_data_sequence=mesh_data_sequence,\n", - " output_usd=output_usd_sub,\n", - " mesh_name=\"AlterraValve\",\n", - " time_codes=alterra_times_sub,\n", - " material=alterra_material,\n", - " )\n", - "\n", - " # Repair elementSize for multi-component primvars (e.g. 9-component stress tensor)\n", - " usd_tools = USDTools()\n", - " mesh_path = \"/World/Meshes/AlterraValve\"\n", - " repair_report = usd_tools.repair_mesh_primvar_element_sizes(\n", - " str(output_usd_sub), mesh_path\n", - " )\n", - " if repair_report[\"updated\"]:\n", - " print(f\"Repaired elementSize for {len(repair_report['updated'])} primvar(s)\")\n", - "\n", - " # Post-process: apply colormap visualization using USDTools\n", - " if ENABLE_AUTO_COLORIZATION:\n", - " # Inspect and select primvar for coloring\n", - " primvars = usd_tools.list_mesh_primvars(str(output_usd_sub), mesh_path)\n", - " color_primvar = usd_tools.pick_color_primvar(\n", - " primvars, keywords=(\"strain\", \"stress\")\n", - " )\n", - "\n", - " if color_primvar:\n", - " print(f\"\\nApplying colormap to '{color_primvar}' using {DEFAULT_COLORMAP}\")\n", - " usd_tools.apply_colormap_from_primvar(\n", - " str(output_usd_sub),\n", - " mesh_path,\n", - " color_primvar,\n", - " cmap=DEFAULT_COLORMAP,\n", - " bind_vertex_color_material=True,\n", - " )\n", - " else:\n", - " print(\"\\nNo strain/stress primvar found for coloring\")\n", - "\n", - " elapsed = time_module.time() - start_time\n", - "\n", - " print(f\"\\n✓ Conversion completed in {elapsed:.1f} seconds\")\n", - " print(f\" Output: {output_usd_sub}\")\n", - " print(f\" Size: {output_usd_sub.stat().st_size / (1024 * 1024):.2f} MB\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Convert Full Time Series - TPV25" + "## 3. Convert Full Time Series - TPV25" ] }, { @@ -601,15 +343,31 @@ " mesh_name=\"TPV25Valve\",\n", " time_codes=tpv25_times,\n", " material=tpv25_material,\n", - " )\n", - "\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if COMPUTE_FULL_TIME_SERIES and tpv25_series:\n", " # Post-process: apply colormap visualization using USDTools\n", " if ENABLE_AUTO_COLORIZATION:\n", " usd_tools = USDTools()\n", - " mesh_path = \"/World/Meshes/TPV25Valve\"\n", + " if settings.separate_objects_by_connectivity is True:\n", + " mesh_path1 = \"/World/Meshes/object4\"\n", + " mesh_path2 = \"/World/Meshes/object3\"\n", + " elif settings.separate_objects_by_cell_type is True:\n", + " mesh_path1 = \"/World/Meshes/triangle1\"\n", + " mesh_path2 = \"/World/Meshes/triangle1\"\n", + " else:\n", + " mesh_path1 = \"/World/Meshes/TPV25Valve\"\n", + " mesh_path2 = \"/World/Meshes/TPV25Valve\"\n", "\n", " # Inspect and select primvar for coloring\n", - " primvars = usd_tools.list_mesh_primvars(str(output_usd), mesh_path)\n", + " primvars = usd_tools.list_mesh_primvars(str(output_usd), mesh_path1)\n", " color_primvar = usd_tools.pick_color_primvar(\n", " primvars, keywords=(\"strain\", \"stress\")\n", " )\n", @@ -618,18 +376,24 @@ " print(f\"\\nApplying colormap to '{color_primvar}' using {DEFAULT_COLORMAP}\")\n", " usd_tools.apply_colormap_from_primvar(\n", " str(output_usd),\n", - " mesh_path,\n", + " mesh_path1,\n", " color_primvar,\n", - " cmap=DEFAULT_COLORMAP,\n", + " cmap=\"hot\",\n", + " bind_vertex_color_material=True,\n", + " )\n", + " usd_tools.apply_colormap_from_primvar(\n", + " str(output_usd),\n", + " mesh_path2,\n", + " color_primvar,\n", + " cmap=\"gray\",\n", " bind_vertex_color_material=True,\n", " )\n", " else:\n", " print(\"\\nNo strain/stress primvar found for coloring\")\n", "\n", - " elapsed = time_module.time() - start_time\n", + " # Read MeshData\n", + " mesh_data_sequence = [read_vtk_file(f, extract_surface=True) for f in tpv25_files]\n", "\n", - " print(f\"\\n✓ Conversion completed in {elapsed:.1f} seconds\")\n", - " print(f\" Output: {output_usd}\")\n", " print(f\" Size: {output_usd.stat().st_size / (1024 * 1024):.2f} MB\")\n", " print(f\" Time range: {stage.GetStartTimeCode()} - {stage.GetEndTimeCode()}\")\n", " print(\n", @@ -643,7 +407,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 6. Convert Subsampled Time Series - TPV25" + "## 4. Convert Subsampled Time Series - TPV25 (single mesh)\n", + "\n", + "Convert TPV25 with every 5th frame to **tpv25_subsample_5x.usd**. Uses default settings (no split); one mesh prim `TPV25Valve`." ] }, { @@ -703,7 +469,12 @@ " # Post-process: apply colormap visualization using USDTools\n", " if ENABLE_AUTO_COLORIZATION:\n", " usd_tools = USDTools()\n", - " mesh_path = \"/World/Meshes/TPV25Valve\"\n", + " if settings.separate_objects_by_connectivity is True:\n", + " mesh_path = \"/World/Meshes/object3\"\n", + " elif settings.separate_objects_by_cell_type is True:\n", + " mesh_path = \"/World/Meshes/triangle1\"\n", + " else:\n", + " mesh_path = \"/World/Meshes/TPV25Valve\"\n", "\n", " # Inspect and select primvar for coloring\n", " primvars = usd_tools.list_mesh_primvars(str(output_usd_sub), mesh_path)\n", @@ -734,9 +505,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 7. Create Combined Scene\n", + "## 5. TPV25 Subsampled — Split by Cell Type\n", + "\n", + "When `separate_objects_by_cell_type=True`, the converter splits the mesh into **separate USD prims** by cell type (triangle, quad, etc.). Output: **tpv25_subsample_5x_by_cell_type.usd** (distinct from the single-mesh subsample).\n", "\n", - "Create a single USD file with both valves side-by-side for comparison." + "TPV data contains multiple cell types (see first-frame analysis). Here we convert the same subsampled TPV25 sequence with triangulation off so quads remain quads; the stage has one mesh per cell type (e.g. `Triangle_0`, `Quad_0`)." ] }, { @@ -745,50 +518,120 @@ "metadata": {}, "outputs": [], "source": [ - "# Create combined scene with both valves using USDTools\n", - "if alterra_series and tpv25_series:\n", - " print(\"=\" * 60)\n", - " print(\"Creating Combined Scene\")\n", - " print(\"=\" * 60)\n", + "# Convert TPV25 subsampled with separate meshes per cell type (triangulate=False to preserve quads)\n", + "if tpv25_series:\n", + " settings_by_cell_type = ConversionSettings(\n", + " triangulate_meshes=False, # Keep quads so we get both Triangle_0 and Quad_0\n", + " compute_normals=False,\n", + " preserve_point_arrays=True,\n", + " preserve_cell_arrays=True,\n", + " separate_objects_by_cell_type=True,\n", + " separate_objects_by_connectivity=False,\n", + " up_axis=\"Y\",\n", + " times_per_second=60.0,\n", + " use_time_samples=True,\n", + " )\n", "\n", - " # Use the subsampled USD files created earlier\n", " subsample_rate = 5\n", - " alterra_usd = output_dir / f\"alterra_subsample_{subsample_rate}x.usd\"\n", - " tpv25_usd = output_dir / f\"tpv25_subsample_{subsample_rate}x.usd\"\n", + " tpv25_subsampled = tpv25_series[::subsample_rate]\n", + " tpv25_files_sub = [file_path for _, file_path in tpv25_subsampled]\n", + " tpv25_times_sub = [float(t) for t, _ in tpv25_subsampled]\n", + "\n", + " output_by_cell_type = output_dir / \"tpv25_subsample_5x_by_cell_type.usd\"\n", + " print(\"Converting TPV25 (subsampled) with separate objects by cell type...\")\n", + " print(\n", + " \" triangulate_meshes=False → triangles and quads preserved as separate meshes\"\n", + " )\n", + " print(f\" Output: {output_by_cell_type.name}\")\n", + "\n", + " converter_ct = VTKToUSDConverter(settings_by_cell_type)\n", + " mesh_data_sequence = [\n", + " read_vtk_file(f, extract_surface=True) for f in tpv25_files_sub\n", + " ]\n", + " stage_ct = converter_ct.convert_mesh_data_sequence(\n", + " mesh_data_sequence=mesh_data_sequence,\n", + " output_usd=output_by_cell_type,\n", + " mesh_name=\"TPV25Valve\", # base name when not splitting; ignored when splitting\n", + " time_codes=tpv25_times_sub,\n", + " material=tpv25_material,\n", + " )\n", "\n", - " # Check if the files exist\n", - " if alterra_usd.exists() and tpv25_usd.exists():\n", - " combined_usd = output_dir / \"valves_combined.usd\"\n", + " # List mesh prims under /World/Meshes (each cell type is a separate prim)\n", + " meshes_prim = stage_ct.GetPrimAtPath(\"/World/Meshes\")\n", + " if meshes_prim:\n", + " children = meshes_prim.GetChildren()\n", + " print(f\"\\nMesh prims created (by cell type): {len(children)}\")\n", + " for child in children:\n", + " print(f\" - {child.GetPath().pathString}\")\n", + " print(f\"\\n✓ Saved: {output_by_cell_type}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. TPV25 Subsampled — Split by Connectivity\n", "\n", - " print(\"Input files:\")\n", - " print(f\" - {alterra_usd.name}\")\n", - " print(f\" - {tpv25_usd.name}\")\n", - " print(f\"Output: {combined_usd.name}\")\n", + "When `separate_objects_by_connectivity=True`, the converter splits the mesh into **separate USD prims** by connected component (object1, object2, ...). Output: **tpv25_subsample_5x_by_connectivity.usd** (distinct from single-mesh and by-cell-type).\n", "\n", - " # Use USDTools to arrange the valves side-by-side\n", - " from physiomotion4d.usd_tools import USDTools\n", + "Only one of `separate_objects_by_cell_type` and `separate_objects_by_connectivity` can be enabled at a time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Convert TPV25 subsampled with separate meshes per connected component\n", + "if tpv25_series:\n", + " settings_by_connectivity = ConversionSettings(\n", + " triangulate_meshes=True,\n", + " compute_normals=False,\n", + " preserve_point_arrays=True,\n", + " preserve_cell_arrays=True,\n", + " separate_objects_by_cell_type=False,\n", + " separate_objects_by_connectivity=True,\n", + " up_axis=\"Y\",\n", + " times_per_second=60.0,\n", + " use_time_samples=True,\n", + " )\n", "\n", - " usd_tools = USDTools()\n", + " subsample_rate = 5\n", + " tpv25_subsampled = tpv25_series[::subsample_rate]\n", + " tpv25_files_sub = [file_path for _, file_path in tpv25_subsampled]\n", + " tpv25_times_sub = [float(t) for t, _ in tpv25_subsampled]\n", "\n", - " usd_tools.save_usd_file_arrangement(\n", - " str(combined_usd), [str(alterra_usd), str(tpv25_usd)]\n", - " )\n", + " output_by_connectivity = output_dir / \"tpv25_subsample_5x_by_connectivity.usd\"\n", + " print(\"Converting TPV25 (subsampled) with separate objects by connectivity...\")\n", + " print(f\" Output: {output_by_connectivity.name}\")\n", "\n", - " print(f\"\\n✓ Combined scene created: {combined_usd.name}\")\n", - " print(\" - Both valves arranged in a spatial grid\")\n", - " print(\" - Ready to view in Omniverse or USDView\")\n", - " else:\n", - " print(\"\\n⚠ Subsampled USD files not found.\")\n", - " print(\"Run the conversion cells above first to create:\")\n", - " print(f\" - {alterra_usd.name}\")\n", - " print(f\" - {tpv25_usd.name}\")" + " converter_conn = VTKToUSDConverter(settings_by_connectivity)\n", + " mesh_data_sequence = [\n", + " read_vtk_file(f, extract_surface=True) for f in tpv25_files_sub\n", + " ]\n", + " stage_conn = converter_conn.convert_mesh_data_sequence(\n", + " mesh_data_sequence=mesh_data_sequence,\n", + " output_usd=output_by_connectivity,\n", + " mesh_name=\"TPV25Valve\",\n", + " time_codes=tpv25_times_sub,\n", + " material=tpv25_material,\n", + " )\n", + "\n", + " meshes_prim = stage_conn.GetPrimAtPath(\"/World/Meshes\")\n", + " if meshes_prim:\n", + " children = meshes_prim.GetChildren()\n", + " print(f\"\\nMesh prims created (by connectivity): {len(children)}\")\n", + " for child in children:\n", + " print(f\" - {child.GetPath().pathString}\")\n", + " print(f\"\\n✓ Saved: {output_by_connectivity}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## 8. Summary and File Inspection" + "## 7. Summary and File Inspection" ] }, { @@ -852,7 +695,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 9. Detailed USD Inspection\n", + "## 8. Detailed USD Inspection\n", "\n", "Examine the converted USD files to verify data preservation." ] @@ -864,7 +707,7 @@ "outputs": [], "source": [ "# Inspect one of the converted files in detail\n", - "inspect_file = output_dir / \"alterra_subsample_5x.usd\"\n", + "inspect_file = output_dir / \"tpv25_subsample_5x.usd\"\n", "\n", "if inspect_file.exists():\n", " print(\"=\" * 60)\n", @@ -936,7 +779,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 9.5. Post-Process USD with USDTools\n", + "## 8.5. Post-Process USD with USDTools\n", "\n", "Demonstrate using the new `USDTools` methods to inspect primvars and apply colormap visualization to existing USD files." ] @@ -953,7 +796,7 @@ "usd_tools = USDTools()\n", "\n", "# Pick a USD file to post-process\n", - "postprocess_file = output_dir / \"alterra_subsample_5x.usd\"\n", + "postprocess_file = output_dir / \"tpv25_subsample_5x.usd\"\n", "\n", "if postprocess_file.exists():\n", " print(\"=\" * 60)\n", @@ -1017,7 +860,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 10. Performance Analysis" + "## 9. Performance Analysis" ] }, { @@ -1032,11 +875,11 @@ "print(\"=\" * 60)\n", "\n", "# Read a few frames to estimate per-frame metrics\n", - "if alterra_series:\n", + "if tpv25_series:\n", " sample_files = [\n", - " alterra_series[0][1],\n", - " alterra_series[len(alterra_series) // 2][1],\n", - " alterra_series[-1][1],\n", + " tpv25_series[0][1],\n", + " tpv25_series[len(tpv25_series) // 2][1],\n", + " tpv25_series[-1][1],\n", " ]\n", "\n", " total_points = 0\n", @@ -1053,13 +896,13 @@ " avg_faces = total_faces / len(sample_files)\n", " avg_arrays = total_arrays / len(sample_files)\n", "\n", - " print(\"\\nAlterra Dataset:\")\n", + " print(\"\\nTPV25 Dataset:\")\n", " print(f\" Average points per frame: {avg_points:,.0f}\")\n", " print(f\" Average faces per frame: {avg_faces:,.0f}\")\n", " print(f\" Average data arrays per frame: {avg_arrays:.0f}\")\n", - " print(f\" Total frames: {len(alterra_series)}\")\n", - " print(f\" Estimated total points: {avg_points * len(alterra_series):,.0f}\")\n", - " print(f\" Estimated total faces: {avg_faces * len(alterra_series):,.0f}\")\n", + " print(f\" Total frames: {len(tpv25_series)}\")\n", + " print(f\" Estimated total points: {avg_points * len(tpv25_series):,.0f}\")\n", + " print(f\" Estimated total faces: {avg_faces * len(tpv25_series):,.0f}\")\n", "\n", "print(f\"\\n{'=' * 60}\")\n", "print(\"\\n✓ All conversions completed!\")\n", @@ -1089,10 +932,10 @@ "\n", "### File Outputs\n", "\n", - "- `alterra_full.usd` - Complete 232-frame animation\n", - "- `alterra_subsample_5x.usd` - Subsampled for preview\n", - "- `tpv25_full.usd` - Complete 265-frame animation\n", - "- `tpv25_subsample_5x.usd` - Subsampled for preview\n", + "- `tpv25_full.usd` - Complete 265-frame animation (single mesh)\n", + "- `tpv25_subsample_5x.usd` - Subsampled, single mesh\n", + "- `tpv25_subsample_5x_by_cell_type.usd` - Subsampled, split by cell type (Triangle_0, Quad_0, ...)\n", + "- `tpv25_subsample_5x_by_connectivity.usd` - Subsampled, split by connectivity (object1, object2, ...)\n", "\n", "### Performance Notes\n", "\n", @@ -1106,7 +949,7 @@ "1. **View animations** in USDView or Omniverse\n", "2. **Analyze primvars** to visualize simulation data\n", "3. **Create custom materials** based on data arrays\n", - "4. **Compose scenes** with multiple valves for comparison\n", + "4. **Compose scenes** or add multiple assets for comparison\n", "5. **Add cameras and lighting** for publication-quality renders" ] } diff --git a/experiments/Heart-GatedCT_To_USD/0-download_and_convert_4d_to_3d.ipynb b/experiments/Heart-GatedCT_To_USD/0-download_and_convert_4d_to_3d.ipynb index 62986a2..1f67df8 100644 --- a/experiments/Heart-GatedCT_To_USD/0-download_and_convert_4d_to_3d.ipynb +++ b/experiments/Heart-GatedCT_To_USD/0-download_and_convert_4d_to_3d.ipynb @@ -78,4 +78,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb b/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb index 96dbb3b..bd005cf 100644 --- a/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb +++ b/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb @@ -11,7 +11,7 @@ "\n", "import itk\n", "\n", - "from physiomotion4d.register_images_icon import RegisterImagesICON\n", + "from physiomotion4d.register_images_ants import RegisterImagesANTs\n", "from physiomotion4d.segment_chest_total_segmentator import SegmentChestTotalSegmentator\n", "from physiomotion4d.transform_tools import TransformTools" ] @@ -91,8 +91,9 @@ "metadata": {}, "outputs": [], "source": [ - "reg = RegisterImagesICON()\n", - "reg.set_mask_dilation(5)" + "reg = RegisterImagesANTs()\n", + "reg.set_mask_dilation(5)\n", + "reg.set_number_of_iterations([10, 5, 2])" ] }, { @@ -102,7 +103,7 @@ "metadata": {}, "outputs": [], "source": [ - "for i in range(0, 21, 4): # Process every 4th slice to save time testing\n", + "for i in range(0, 21, 1): # Process every 4th slice to save time testing\n", " print(f\"Processing slice {i:03d}\")\n", " moving_image = itk.imread(os.path.join(data_dir, f\"slice_{i:03d}.mha\"))\n", " result = seg.segment(moving_image, contrast_enhanced_study=True)\n", diff --git a/experiments/Heart-GatedCT_To_USD/2-generate_segmentation.ipynb b/experiments/Heart-GatedCT_To_USD/2-generate_segmentation.ipynb index f5995fe..3570e57 100644 --- a/experiments/Heart-GatedCT_To_USD/2-generate_segmentation.ipynb +++ b/experiments/Heart-GatedCT_To_USD/2-generate_segmentation.ipynb @@ -339,4 +339,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/experiments/Heart-GatedCT_To_USD/4-merge_dynamic_and_static_usd.ipynb b/experiments/Heart-GatedCT_To_USD/4-merge_dynamic_and_static_usd.ipynb index 557eb5d..4f1fd2b 100644 --- a/experiments/Heart-GatedCT_To_USD/4-merge_dynamic_and_static_usd.ipynb +++ b/experiments/Heart-GatedCT_To_USD/4-merge_dynamic_and_static_usd.ipynb @@ -66,4 +66,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/experiments/Heart-GatedCT_To_USD/test_compare_registration_speed.ipynb b/experiments/Heart-GatedCT_To_USD/test_compare_registration_speed.ipynb new file mode 100644 index 0000000..9884865 --- /dev/null +++ b/experiments/Heart-GatedCT_To_USD/test_compare_registration_speed.ipynb @@ -0,0 +1,265 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "intro", + "metadata": {}, + "source": [ + "# Compare registration speed: Greedy vs ANTs vs ICON\n", + "\n", + "This notebook times **Greedy**, **ANTs**, and **ICON** when registering two time points of CT from the Slicer-Heart-CT data (TruncalValve 4D CT).\n", + "\n", + "**Prerequisites:** Run `0-download_and_convert_4d_to_3d.ipynb` first so that `data/Slicer-Heart-CT/` contains the 4D NRRD and the 3D slice series (`slice_000.mha`, `slice_001.mha`, ...), and `results/slice_fixed.mha` exists." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "imports", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "\n", + "import itk\n", + "import matplotlib.pyplot as plt\n", + "import pandas as pd\n", + "from itk import TubeTK as ttk\n", + "\n", + "from physiomotion4d.register_images_ants import RegisterImagesANTs\n", + "from physiomotion4d.register_images_greedy import RegisterImagesGreedy\n", + "from physiomotion4d.register_images_icon import RegisterImagesICON" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "paths", + "metadata": {}, + "outputs": [], + "source": [ + "data_dir = \"../../data/Slicer-Heart-CT\"\n", + "output_dir = \"./results\"\n", + "os.makedirs(output_dir, exist_ok=True)\n", + "\n", + "# Fixed = reference time point; moving = time point to align to fixed\n", + "fixed_image_path = os.path.join(output_dir, \"slice_fixed.mha\")\n", + "moving_image_path = os.path.join(data_dir, \"slice_000.mha\")\n", + "\n", + "if not os.path.exists(fixed_image_path):\n", + " raise FileNotFoundError(\n", + " f\"Fixed image not found: {fixed_image_path}. \"\n", + " \"Run 0-download_and_convert_4d_to_3d.ipynb first.\"\n", + " )\n", + "if not os.path.exists(moving_image_path):\n", + " raise FileNotFoundError(\n", + " f\"Moving image not found: {moving_image_path}. \"\n", + " \"Run 0-download_and_convert_4d_to_3d.ipynb first.\"\n", + " )\n", + "\n", + "fixed_image = itk.imread(fixed_image_path)\n", + "moving_image = itk.imread(moving_image_path)\n", + "print(f\"Fixed image: {itk.size(fixed_image)}, spacing {itk.spacing(fixed_image)}\")\n", + "print(f\"Moving image: {itk.size(moving_image)}, spacing {itk.spacing(moving_image)}\")" + ] + }, + { + "cell_type": "markdown", + "id": "downsample", + "metadata": {}, + "source": [ + "## Optional: downsample for faster comparison\n", + "\n", + "Set `downsample_factor = 1.0` to use full resolution (slower). Use e.g. `0.5` to halve each dimension for a quicker run." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "resample", + "metadata": {}, + "outputs": [], + "source": [ + "downsample_factor = 0.5 # 1.0 = full resolution\n", + "\n", + "if downsample_factor != 1.0:\n", + " resampler_f = ttk.ResampleImage.New(Input=fixed_image)\n", + " resampler_f.SetResampleFactor([downsample_factor] * 3)\n", + " resampler_f.Update()\n", + " fixed_image = resampler_f.GetOutput()\n", + "\n", + " resampler_m = ttk.ResampleImage.New(Input=moving_image)\n", + " resampler_m.SetResampleFactor([downsample_factor] * 3)\n", + " resampler_m.Update()\n", + " moving_image = resampler_m.GetOutput()\n", + " print(f\"Downsampled to factor {downsample_factor}\")\n", + " print(f\" Fixed: {itk.size(fixed_image)}\")\n", + " print(f\" Moving: {itk.size(moving_image)}\")\n", + "else:\n", + " print(\"Using full resolution.\")" + ] + }, + { + "cell_type": "markdown", + "id": "run", + "metadata": {}, + "source": [ + "## Run each method and record time\n", + "\n", + "All three use **deformable** registration (Greedy: affine + deformable; ANTs: SyN; ICON: deep learning). Settings are chosen for a fair comparison with reduced iterations so the notebook runs in a few minutes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "timing", + "metadata": {}, + "outputs": [], + "source": [ + "results_list = []\n", + "\n", + "# --- Greedy (deformable) ---\n", + "try:\n", + " reg_g = RegisterImagesGreedy()\n", + " reg_g.set_modality(\"ct\")\n", + " reg_g.set_transform_type(\"Deformable\")\n", + " reg_g.set_number_of_iterations([10, 5, 2])\n", + " reg_g.set_fixed_image(fixed_image)\n", + "\n", + " t0 = time.perf_counter()\n", + " out_g = reg_g.register(moving_image)\n", + " elapsed_g = time.perf_counter() - t0\n", + "\n", + " loss_g = out_g.get(\"loss\")\n", + " results_list.append(\n", + " {\n", + " \"method\": \"Greedy\",\n", + " \"time_sec\": round(elapsed_g, 2),\n", + " \"loss\": float(loss_g) if loss_g is not None else None,\n", + " }\n", + " )\n", + " print(f\"Greedy: {elapsed_g:.2f} s\")\n", + "except Exception as e:\n", + " results_list.append({\"method\": \"Greedy\", \"time_sec\": None, \"loss\": None})\n", + " print(f\"Greedy: failed - {e}\")\n", + "\n", + "# --- ANTs (deformable SyN) ---\n", + "try:\n", + " reg_a = RegisterImagesANTs()\n", + " reg_a.set_modality(\"ct\")\n", + " reg_a.set_transform_type(\"Deformable\")\n", + " reg_a.set_number_of_iterations([10, 5, 2]) # reduced for speed\n", + " reg_a.set_fixed_image(fixed_image)\n", + "\n", + " t0 = time.perf_counter()\n", + " out_a = reg_a.register(moving_image)\n", + " elapsed_a = time.perf_counter() - t0\n", + "\n", + " loss_a = out_a.get(\"loss\")\n", + " results_list.append(\n", + " {\n", + " \"method\": \"ANTs\",\n", + " \"time_sec\": round(elapsed_a, 2),\n", + " \"loss\": float(loss_a) if loss_a is not None else None,\n", + " }\n", + " )\n", + " print(f\"ANTs: {elapsed_a:.2f} s\")\n", + "except Exception as e:\n", + " results_list.append({\"method\": \"ANTs\", \"time_sec\": None, \"loss\": None})\n", + " print(f\"ANTs: failed - {e}\")\n", + "\n", + "# --- ICON (deformable, GPU) ---\n", + "try:\n", + " reg_i = RegisterImagesICON()\n", + " reg_i.set_modality(\"ct\")\n", + " reg_i.set_number_of_iterations(50)\n", + " reg_i.set_fixed_image(fixed_image)\n", + "\n", + " t0 = time.perf_counter()\n", + " out_i = reg_i.register(moving_image)\n", + " elapsed_i = time.perf_counter() - t0\n", + "\n", + " loss_i = out_i.get(\"loss\")\n", + " results_list.append(\n", + " {\n", + " \"method\": \"ICON\",\n", + " \"time_sec\": round(elapsed_i, 2),\n", + " \"loss\": float(loss_i) if loss_i is not None else None,\n", + " }\n", + " )\n", + " print(f\"ICON: {elapsed_i:.2f} s\")\n", + "except Exception as e:\n", + " results_list.append({\"method\": \"ICON\", \"time_sec\": None, \"loss\": None})\n", + " print(f\"ICON: failed - {e}\")\n", + "\n", + "df = pd.DataFrame(results_list)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "table", + "metadata": {}, + "outputs": [], + "source": [ + "display(df)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "plot", + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(figsize=(6, 4))\n", + "valid = df[\"time_sec\"].notna()\n", + "if valid.any():\n", + " methods = df.loc[valid, \"method\"]\n", + " times = df.loc[valid, \"time_sec\"]\n", + " ax.bar(methods, times, color=[\"#2ecc71\", \"#3498db\", \"#9b59b6\"])\n", + " ax.set_ylabel(\"Time (seconds)\")\n", + " ax.set_title(\"Registration time: two time points (Slicer-Heart-CT)\")\n", + " plt.tight_layout()\n", + " plt.show()\n", + "else:\n", + " print(\"No successful runs to plot.\")" + ] + }, + { + "cell_type": "markdown", + "id": "notes", + "metadata": {}, + "source": [ + "## Notes\n", + "\n", + "- **Greedy**: CPU-based, often faster than ANTs for comparable quality; see [Greedy](https://greedy.readthedocs.io/) and [picsl-greedy](https://pypi.org/project/picsl-greedy/).\n", + "- **ANTs**: CPU-based, very widely used; typically slower than Greedy for similar settings.\n", + "- **ICON**: GPU-based (UniGradIcon); speed depends on GPU. Loss values are not directly comparable across methods.\n", + "- For a quicker comparison, use `downsample_factor = 0.5` or reduce `number_of_iterations` further." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pyproject.toml b/pyproject.toml index ae220f5..76a6f37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,6 +71,7 @@ dependencies = [ # Registration "antspyx>=0.4.0", "icon-registration>=1.0.0", + "picsl-greedy>=0.0.12", "unigradicon>=1.0.0", # Visualization and USD @@ -163,6 +164,7 @@ Changelog = "https://github.com/aylward/PhysioMotion4d/blob/main/CHANGELOG.md" # CLI commands installed via pip # Entry points reference the main() functions in the cli submodule physiomotion4d-heart-gated-ct = "physiomotion4d.cli.convert_heart_gated_ct_to_usd:main" +physiomotion4d-convert-vtk-to-usd = "physiomotion4d.cli.convert_vtk_to_usd:main" physiomotion4d-create-statistical-model = "physiomotion4d.cli.create_statistical_model:main" physiomotion4d-fit-statistical-model-to-patient = "physiomotion4d.cli.fit_statistical_model_to_patient:main" physiomotion4d-visualize-pca-modes = "physiomotion4d.cli.visualize_pca_modes:main" diff --git a/src/physiomotion4d/__init__.py b/src/physiomotion4d/__init__.py index 16e27cc..b1d7183 100644 --- a/src/physiomotion4d/__init__.py +++ b/src/physiomotion4d/__init__.py @@ -34,6 +34,7 @@ # Base classes from .physiomotion4d_base import PhysioMotion4DBase from .register_images_ants import RegisterImagesANTs +from .register_images_greedy import RegisterImagesGreedy # Registration classes from .register_images_base import RegisterImagesBase @@ -57,6 +58,7 @@ # Core workflow processor from .workflow_convert_heart_gated_ct_to_usd import WorkflowConvertHeartGatedCTToUSD +from .workflow_convert_vtk_to_usd import WorkflowConvertVTKToUSD from .workflow_reconstruct_highres_4d_ct import WorkflowReconstructHighres4DCT from .workflow_create_statistical_model import WorkflowCreateStatisticalModel from .workflow_fit_statistical_model_to_patient import ( @@ -66,6 +68,7 @@ __all__ = [ # Workflow classes "WorkflowConvertHeartGatedCTToUSD", + "WorkflowConvertVTKToUSD", "WorkflowCreateStatisticalModel", "WorkflowReconstructHighres4DCT", "WorkflowFitStatisticalModelToPatient", @@ -80,6 +83,7 @@ "RegisterImagesBase", "RegisterImagesICON", "RegisterImagesANTs", + "RegisterImagesGreedy", "RegisterTimeSeriesImages", "RegisterModelsPCA", "RegisterModelsICP", diff --git a/src/physiomotion4d/cli/__init__.py b/src/physiomotion4d/cli/__init__.py index 632cf1c..3889cf9 100644 --- a/src/physiomotion4d/cli/__init__.py +++ b/src/physiomotion4d/cli/__init__.py @@ -2,6 +2,7 @@ __all__ = [ "convert_heart_gated_ct_to_usd", + "convert_vtk_to_usd", "create_statistical_model", "fit_statistical_model_to_patient", "visualize_pca_modes", diff --git a/src/physiomotion4d/cli/convert_vtk_to_usd.py b/src/physiomotion4d/cli/convert_vtk_to_usd.py new file mode 100644 index 0000000..179c82e --- /dev/null +++ b/src/physiomotion4d/cli/convert_vtk_to_usd.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python +""" +Command-line interface for VTK to USD conversion workflow. + +Converts one or more VTK files to USD with optional splitting by connectivity +or cell type, and applies a chosen appearance: solid color, anatomic material, +or colormap from a primvar (with auto or specified intensity range). +""" + +import argparse +import os +import sys + +from physiomotion4d import WorkflowConvertVTKToUSD + +ANATOMY_TYPES = [ + "heart", + "lung", + "bone", + "major_vessels", + "contrast", + "soft_tissue", + "other", + "liver", + "spleen", + "kidney", +] + + +def _parse_color(s: str) -> tuple[float, float, float]: + """Parse 'R G B' or 'R,G,B' into (r,g,b) in [0,1].""" + parts = s.replace(",", " ").split() + if len(parts) != 3: + raise ValueError("Color must be three numbers (R G B) in [0,1] or [0,255]") + vals = [float(x) for x in parts] + if all(0 <= v <= 1 for v in vals): + return (vals[0], vals[1], vals[2]) + if all(0 <= v <= 255 for v in vals): + return (vals[0] / 255.0, vals[1] / 255.0, vals[2] / 255.0) + raise ValueError("Color values must be in [0,1] or [0,255]") + + +def main() -> int: + """Command-line interface for VTK to USD conversion.""" + parser = argparse.ArgumentParser( + description="Convert VTK file(s) to USD with optional splitting and appearance", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Single file, solid gray + %(prog)s mesh.vtk -o output.usd --appearance solid + + # Single file, red + %(prog)s mesh.vtk -o output.usd --appearance solid --color 1 0 0 + + # Time series, split by connectivity, colormap from stress (auto range) + %(prog)s frame_*.vtk -o out.usd --by-connectivity --appearance colormap --primvar vtk_point_stress_c0 + + # Time series, colormap with specified intensity range + %(prog)s frame_*.vtk -o out.usd --appearance colormap --primvar stress --intensity-range 0 500 + + # Single file, anatomic heart material + %(prog)s heart.vtp -o heart.usd --appearance anatomy --anatomy-type heart + + # Split by cell type (triangle vs quad), solid color + %(prog)s mesh.vtk -o out.usd --by-cell-type --appearance solid + """, + ) + + parser.add_argument( + "vtk_files", + nargs="+", + help="One or more VTK files (.vtk, .vtp, .vtu). Multiple files form a time series.", + ) + parser.add_argument( + "-o", + "--output", + required=True, + dest="output_usd", + help="Output USD file path", + ) + parser.add_argument( + "--by-connectivity", + action="store_true", + dest="separate_by_connectivity", + help="Split mesh into separate objects by connected components (default)", + ) + parser.add_argument( + "--by-cell-type", + action="store_true", + dest="separate_by_cell_type", + help="Split mesh by cell type (triangle, quad, etc.). Cannot use with --by-connectivity", + ) + parser.add_argument( + "--no-split", + action="store_true", + help="Do not split; output a single mesh (clears --by-connectivity and --by-cell-type)", + ) + parser.add_argument( + "--mesh-name", + default="Mesh", + help="Base mesh name (default: Mesh)", + ) + parser.add_argument( + "--fps", + type=float, + default=60.0, + dest="times_per_second", + help="Frames per second for time series (default: 60)", + ) + parser.add_argument( + "--up-axis", + choices=["Y", "Z"], + default="Y", + help="USD up axis (default: Y)", + ) + parser.add_argument( + "--no-extract-surface", + action="store_false", + dest="extract_surface", + help="Do not extract surface for .vtu files", + ) + + # Appearance + parser.add_argument( + "--appearance", + choices=["solid", "anatomy", "colormap"], + default="solid", + help="Appearance to apply to all meshes: solid color, anatomic material, or colormap from primvar (default: solid)", + ) + parser.add_argument( + "--color", + type=str, + metavar="R G B", + help="Solid color as R G B in [0,1] or [0,255] (default: 0.8 0.8 0.8). Used when --appearance solid.", + ) + parser.add_argument( + "--anatomy-type", + choices=ANATOMY_TYPES, + default="heart", + help="Anatomy material when --appearance anatomy (default: heart)", + ) + parser.add_argument( + "--primvar", + dest="colormap_primvar", + default=None, + help="Primvar name for colormap (e.g. vtk_point_stress_c0). If omitted, one is auto-picked when --appearance colormap.", + ) + parser.add_argument( + "--cmap", + dest="colormap_name", + default="viridis", + help="Matplotlib colormap name when --appearance colormap (default: viridis)", + ) + parser.add_argument( + "--intensity-range", + nargs=2, + type=float, + metavar=("VMIN", "VMAX"), + default=None, + dest="colormap_intensity_range", + help="Colormap range (vmin vmax). If omitted, range is computed from data. Use with --appearance colormap.", + ) + + args = parser.parse_args() + + # Resolve split mode + if args.no_split: + separate_by_connectivity = False + separate_by_cell_type = False + else: + separate_by_connectivity = args.separate_by_connectivity + separate_by_cell_type = args.separate_by_cell_type + if not separate_by_connectivity and not separate_by_cell_type: + separate_by_connectivity = True # default + + if separate_by_connectivity and separate_by_cell_type: + print("Error: --by-connectivity and --by-cell-type cannot both be set.") + return 1 + + # Solid color + solid_color = (0.8, 0.8, 0.8) + if args.color: + try: + solid_color = _parse_color(args.color) + except ValueError as e: + print(f"Error: {e}") + return 1 + + # Colormap intensity range + intensity_range = None + if args.colormap_intensity_range is not None: + intensity_range = tuple(args.colormap_intensity_range) + + # Validate input files + for p in args.vtk_files: + if not os.path.exists(p): + print(f"Error: Input file not found: {p}") + return 1 + + try: + workflow = WorkflowConvertVTKToUSD( + vtk_files=args.vtk_files, + output_usd=args.output_usd, + separate_by_connectivity=separate_by_connectivity, + separate_by_cell_type=separate_by_cell_type, + mesh_name=args.mesh_name, + times_per_second=args.times_per_second, + up_axis=args.up_axis, + extract_surface=args.extract_surface, + appearance=args.appearance, + solid_color=solid_color, + anatomy_type=args.anatomy_type, + colormap_primvar=args.colormap_primvar, + colormap_name=args.colormap_name, + colormap_intensity_range=intensity_range, + ) + except ValueError as e: + print(f"Error: {e}") + return 1 + + try: + out_path = workflow.run() + print("\nConversion completed successfully.") + print(f"Output: {out_path}") + return 0 + except Exception as e: + print(f"\nError during conversion: {e}") + import traceback + + traceback.print_exc() + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/physiomotion4d/register_images_greedy.py b/src/physiomotion4d/register_images_greedy.py new file mode 100644 index 0000000..bd12bb2 --- /dev/null +++ b/src/physiomotion4d/register_images_greedy.py @@ -0,0 +1,408 @@ +"""Greedy-based image registration implementation. + +This module provides the RegisterImagesGreedy class, a concrete implementation of +RegisterImagesBase that uses the PICSL Greedy algorithm for image registration. +Greedy is a fast CPU-based deformable registration tool from the Penn Image +Computing and Science Lab. It supports affine and deformable registration and +can be used as an alternative to ANTs for 4D cardiac/lung CT registration. + +See https://greedy.readthedocs.io/ and https://pypi.org/project/picsl-greedy/. +""" + +from __future__ import annotations + +import logging +from typing import Any, Optional, Union + +import itk +import numpy as np +from numpy.typing import NDArray + +from physiomotion4d.image_tools import ImageTools +from physiomotion4d.register_images_base import RegisterImagesBase +from physiomotion4d.transform_tools import TransformTools + + +def _try_import_greedy() -> Any: + """Import picsl_greedy; raise with helpful message if missing.""" + try: + from picsl_greedy import Greedy3D + + return Greedy3D + except ImportError as e: + raise ImportError( + "picsl-greedy is required for RegisterImagesGreedy. " + "Install with: pip install picsl-greedy" + ) from e + + +class RegisterImagesGreedy(RegisterImagesBase): + """Greedy-based deformable image registration implementation. + + This class extends RegisterImagesBase to provide deformable image registration + using the PICSL Greedy algorithm. Greedy is a fast CPU-based tool for 2D/3D + medical image registration, supporting rigid, affine, and deformable (NCC/SSD) + registration. + + Greedy-specific features: + - Rigid and affine registration (-a -dof 6 or 12) + - Deformable registration with multi-resolution (-n, -s) + - Metrics: NMI, NCC, SSD (mapped from CC, Mattes, MeanSquares) + - Optional mask support (-gm) + - SimpleITK in-memory interface via ImageTools + + Inherits from RegisterImagesBase: + - Fixed and moving image management + - Binary mask processing with optional dilation + - Modality-specific parameter configuration + - Standardized registration interface + + Attributes: + number_of_iterations: List of iterations per level (e.g. [40, 20, 10]) + transform_type: 'Deformable', 'Affine', or 'Rigid' + metric: 'CC' (→NCC), 'Mattes' (→NMI), or 'MeanSquares' (→SSD) + deformable_smoothing: Smoothing sigmas for deformable (e.g. "2.0vox 0.5vox") + """ + + def __init__(self, log_level: int | str = logging.INFO) -> None: + """Initialize the Greedy image registration class. + + Args: + log_level: Logging level (default: logging.INFO) + """ + super().__init__(log_level=log_level) + + self.number_of_iterations: list[int] = [40, 20, 10] + self.transform_type = "Deformable" + self.metric = "CC" + self.deformable_smoothing = "2.0vox 0.5vox" + + def set_number_of_iterations(self, number_of_iterations: list[int]) -> None: + """Set the number of iterations per resolution level. + + Args: + number_of_iterations: List of iterations (e.g. [40, 20, 10]). + """ + self.number_of_iterations = number_of_iterations + + def set_transform_type(self, transform_type: str) -> None: + """Set the type of transform: Deformable, Affine, or Rigid. + + Args: + transform_type: 'Deformable', 'Affine', or 'Rigid'. + """ + self.transform_type = transform_type + if transform_type not in ("Deformable", "Affine", "Rigid"): + self.log_error("Invalid transform type: %s", transform_type) + raise ValueError(f"Invalid transform type: {transform_type}") + + def set_metric(self, metric: str) -> None: + """Set the similarity metric (CC→NCC, Mattes→NMI, MeanSquares→SSD). + + Args: + metric: 'CC', 'Mattes', or 'MeanSquares'. + """ + self.metric = metric + if metric not in ("CC", "Mattes", "MeanSquares"): + self.log_error("Invalid metric: %s", metric) + raise ValueError(f"Invalid metric: {metric}") + + def _itk_to_sitk(self, itk_image: itk.Image) -> Any: + """Convert ITK image to SimpleITK (for Greedy).""" + image_tools = ImageTools() + return image_tools.convert_itk_image_to_sitk(itk_image) + + def _sitk_to_itk(self, sitk_image: Any) -> itk.Image: + """Convert SimpleITK image to ITK.""" + image_tools = ImageTools() + return image_tools.convert_sitk_image_to_itk(sitk_image) + + def _greedy_metric(self) -> str: + """Map base metric to Greedy metric string.""" + if self.metric == "CC": + return "NCC 2x2x2" + if self.metric == "Mattes": + return "NMI" + if self.metric == "MeanSquares": + return "SSD" + return "NCC 2x2x2" + + def _greedy_iterations_str(self) -> str: + """Format iterations as Greedy -n string (e.g. 40x20x10).""" + return "x".join(str(i) for i in self.number_of_iterations) + + def _matrix_to_itk_affine(self, mat_4x4: NDArray[np.float64]) -> itk.Transform: + """Convert 4x4 affine matrix to ITK AffineTransform.""" + mat_4x4 = np.asarray(mat_4x4, dtype=np.float64) + if mat_4x4.shape != (4, 4): + raise ValueError(f"Expected 4x4 matrix, got shape {mat_4x4.shape}") + M = mat_4x4[:3, :3] + t = mat_4x4[:3, 3] + center = itk.Point[itk.D, 3]() + for i in range(3): + center[i] = 0.0 + affine_tfm = itk.AffineTransform[itk.D, 3].New() + affine_tfm.SetCenter(center) + affine_tfm.SetMatrix(itk.GetMatrixFromArray(M)) + translation = itk.Vector[itk.D, 3]() + for i in range(3): + translation[i] = float(t[i]) + affine_tfm.SetTranslation(translation) + return affine_tfm + + def _sitk_warp_to_itk_displacement_transform( + self, warp_sitk: Any, reference_image: itk.Image + ) -> itk.Transform: + """Convert SimpleITK displacement field to ITK DisplacementFieldTransform.""" + field_itk = self._sitk_to_itk(warp_sitk) + from physiomotion4d.image_tools import ImageTools + + image_tools = ImageTools() + arr = itk.array_from_image(field_itk) + disp_itk = image_tools.convert_array_to_image_of_vectors( + arr, reference_image, itk.D + ) + disp_tfm = itk.DisplacementFieldTransform[itk.D, 3].New() + disp_tfm.SetDisplacementField(disp_itk) + return disp_tfm + + def _registration_method_affine_or_rigid( + self, + fixed_sitk: Any, + moving_sitk: Any, + fixed_mask_sitk: Optional[Any], + moving_mask_sitk: Optional[Any], + iterations_str: str, + metric_str: str, + dof: int, + initial_affine: Optional[NDArray[np.float64]] = None, + ) -> tuple[NDArray[np.float64], float]: + """Run Greedy affine or rigid registration. Returns (4x4 matrix, loss).""" + Greedy3D = _try_import_greedy() + g = Greedy3D() + + cmd = f"-i fixed moving -a -dof {dof} -n {iterations_str} -m {metric_str} -o aff_out" + kwargs: dict[str, Any] = { + "fixed": fixed_sitk, + "moving": moving_sitk, + "aff_out": None, + } + if fixed_mask_sitk is not None and moving_mask_sitk is not None: + cmd += " -gm fixed_mask" + kwargs["fixed_mask"] = fixed_mask_sitk + if initial_affine is not None: + cmd += " -ia aff_initial" + kwargs["aff_initial"] = initial_affine + + g.execute(cmd, **kwargs) + mat = np.array(g["aff_out"], dtype=np.float64) + try: + ml = g.metric_log() + loss = float(ml[-1]["TotalPerPixelMetric"][-1]) if ml else 0.0 + except Exception: + loss = 0.0 + return mat, loss + + def _registration_method_deformable( + self, + fixed_sitk: Any, + moving_sitk: Any, + fixed_mask_sitk: Optional[Any], + moving_mask_sitk: Optional[Any], + iterations_str: str, + initial_affine: Optional[NDArray[np.float64]] = None, + ) -> tuple[Optional[NDArray[np.float64]], Any, float]: + """Run Greedy deformable registration. Returns (affine 4x4 or None, warp_sitk, loss).""" + Greedy3D = _try_import_greedy() + g = Greedy3D() + + # Optional affine init + if initial_affine is None: + cmd_aff = ( + f"-i fixed moving -a -dof 6 -n {iterations_str} -m NMI -o aff_init" + ) + kwargs_aff = {"fixed": fixed_sitk, "moving": moving_sitk, "aff_init": None} + if fixed_mask_sitk is not None and moving_mask_sitk is not None: + cmd_aff += " -gm fixed_mask" + kwargs_aff["fixed_mask"] = fixed_mask_sitk + g.execute(cmd_aff, **kwargs_aff) + initial_affine = np.array(g["aff_init"], dtype=np.float64) + + cmd_def = ( + f"-i fixed moving -it aff_init -n {iterations_str} " + f"-m NCC 2x2x2 -s {self.deformable_smoothing} -o warp_out" + ) + kwargs_def = { + "fixed": fixed_sitk, + "moving": moving_sitk, + "aff_init": initial_affine, + "warp_out": None, + } + if fixed_mask_sitk is not None and moving_mask_sitk is not None: + cmd_def += " -gm fixed_mask" + kwargs_def["fixed_mask"] = fixed_mask_sitk + + g.execute(cmd_def, **kwargs_def) + warp_out = g["warp_out"] + try: + ml = g.metric_log() + loss = float(ml[-1]["TotalPerPixelMetric"][-1]) if ml else 0.0 + except Exception: + loss = 0.0 + return initial_affine, warp_out, loss + + def registration_method( + self, + moving_image: itk.Image, + moving_mask: Optional[itk.Image] = None, + moving_image_pre: Optional[itk.Image] = None, + initial_forward_transform: Optional[itk.Transform] = None, + ) -> dict[str, Union[itk.Transform, float]]: + """Register moving image to fixed image using Greedy. + + Converts ITK images to SimpleITK, runs Greedy (affine and/or deformable), + then converts outputs back to ITK transforms. Composes with + initial_forward_transform when provided. + """ + if self.fixed_image is None or self.fixed_image_pre is None: + raise ValueError("Fixed image must be set before registration.") + + moving_pre = moving_image_pre if moving_image_pre is not None else moving_image + fixed_sitk = self._itk_to_sitk(self.fixed_image_pre) + moving_sitk = self._itk_to_sitk(moving_pre) + + fixed_mask_sitk = None + moving_mask_sitk = None + if self.fixed_mask is not None: + fixed_mask_sitk = self._itk_to_sitk(self.fixed_mask) + if moving_mask is not None: + moving_mask_sitk = self._itk_to_sitk(moving_mask) + + iterations_str = self._greedy_iterations_str() + metric_str = self._greedy_metric() + + # Optional initial transform: convert ITK -> 4x4 for Greedy + initial_affine: Optional[NDArray[np.float64]] = None + if initial_forward_transform is not None: + transform_tools = TransformTools() + # If it's affine-like, extract 4x4; else convert to displacement and skip for Greedy init + if hasattr(initial_forward_transform, "GetMatrix"): + M = np.eye(4, dtype=np.float64) + M[:3, :3] = np.asarray(initial_forward_transform.GetMatrix()).reshape( + 3, 3 + ) + if hasattr(initial_forward_transform, "GetTranslation"): + M[:3, 3] = np.asarray(initial_forward_transform.GetTranslation()) + if hasattr(initial_forward_transform, "GetCenter"): + c = np.asarray(initial_forward_transform.GetCenter()) + M[:3, 3] += c - M[:3, :3] @ c + initial_affine = M + # Non-affine initial: we could convert to disp field and pass; for simplicity we skip Greedy init + # and compose at the end (same as ANTs). + + forward_transform: itk.Transform + inverse_transform: itk.Transform + loss_val: float + + if self.transform_type == "Rigid": + mat, loss_val = self._registration_method_affine_or_rigid( + fixed_sitk, + moving_sitk, + fixed_mask_sitk, + moving_mask_sitk, + iterations_str, + metric_str, + dof=6, + initial_affine=initial_affine, + ) + forward_transform = self._matrix_to_itk_affine(mat) + inverse_affine = itk.AffineTransform[itk.D, 3].New() + forward_transform.GetInverse(inverse_affine) + inverse_transform = inverse_affine + elif self.transform_type == "Affine": + mat, loss_val = self._registration_method_affine_or_rigid( + fixed_sitk, + moving_sitk, + fixed_mask_sitk, + moving_mask_sitk, + iterations_str, + metric_str, + dof=12, + initial_affine=initial_affine, + ) + forward_transform = self._matrix_to_itk_affine(mat) + inverse_affine = itk.AffineTransform[itk.D, 3].New() + forward_transform.GetInverse(inverse_affine) + inverse_transform = inverse_affine + else: + # Deformable: affine + warp + aff_mat, warp_sitk, loss_val = self._registration_method_deformable( + fixed_sitk, + moving_sitk, + fixed_mask_sitk, + moving_mask_sitk, + iterations_str, + initial_affine=initial_affine, + ) + aff_tfm = ( + self._matrix_to_itk_affine(aff_mat) if aff_mat is not None else None + ) + # warp_sitk can be displacement field (SimpleITK image) or numpy + if hasattr(warp_sitk, "GetSize"): + disp_tfm = self._sitk_warp_to_itk_displacement_transform( + warp_sitk, self.fixed_image + ) + else: + # Assume numpy displacement field (z,y,x,3) + from physiomotion4d.image_tools import ImageTools + + image_tools = ImageTools() + warp_arr = np.asarray(warp_sitk, dtype=np.float64) + ref = self.fixed_image + disp_itk = image_tools.convert_array_to_image_of_vectors( + warp_arr, ref, itk.D + ) + disp_tfm = itk.DisplacementFieldTransform[itk.D, 3].New() + disp_tfm.SetDisplacementField(disp_itk) + # Forward = warp then affine (moving -> fixed: first affine then deformable in Greedy) + forward_composite = itk.CompositeTransform[itk.D, 3].New() + if aff_tfm is not None: + forward_composite.AddTransform(aff_tfm) + forward_composite.AddTransform(disp_tfm) + forward_transform = forward_composite + # Inverse: inverse warp then inverse affine + inv_disp = TransformTools().invert_displacement_field_transform(disp_tfm) + inv_aff = itk.AffineTransform[itk.D, 3].New() + if aff_tfm is not None: + aff_tfm.GetInverse(inv_aff) + inverse_composite = itk.CompositeTransform[itk.D, 3].New() + inverse_composite.AddTransform(inv_disp) + if aff_tfm is not None: + inverse_composite.AddTransform(inv_aff) + inverse_transform = inverse_composite + + # Compose with user-provided initial transform (same semantics as ANTs) + if initial_forward_transform is not None: + transform_tools = TransformTools() + forward_composite = itk.CompositeTransform[itk.D, 3].New() + forward_composite.AddTransform(initial_forward_transform) + forward_composite.AddTransform(forward_transform) + initial_disp = ( + transform_tools.convert_transform_to_displacement_field_transform( + initial_forward_transform, self.moving_image + ) + ) + inv_initial = transform_tools.invert_displacement_field_transform( + initial_disp + ) + inverse_composite = itk.CompositeTransform[itk.D, 3].New() + inverse_composite.AddTransform(inverse_transform) + inverse_composite.AddTransform(inv_initial) + forward_transform = forward_composite + inverse_transform = inverse_composite + + return { + "forward_transform": forward_transform, + "inverse_transform": inverse_transform, + "loss": loss_val, + } diff --git a/src/physiomotion4d/register_models_pca.py b/src/physiomotion4d/register_models_pca.py index 952e70e..c4b65c6 100644 --- a/src/physiomotion4d/register_models_pca.py +++ b/src/physiomotion4d/register_models_pca.py @@ -74,7 +74,7 @@ class RegisterModelsPCA(PhysioMotion4DBase): def __init__( self, - pca_template_model: pv.UnstructuredGrid, + pca_template_model: pv.UnstructuredGrid | pv.PolyData, pca_eigenvectors: np.ndarray, pca_std_deviations: np.ndarray, pca_number_of_modes: int = 0, @@ -180,7 +180,7 @@ def __init__( @classmethod def from_json( cls, - pca_template_model: pv.UnstructuredGrid, + pca_template_model: pv.UnstructuredGrid | pv.PolyData, pca_json_filename: str, pca_number_of_modes: int = 0, pca_template_model_point_subsample: int = 4, @@ -287,13 +287,13 @@ def from_json( @classmethod def from_pca_model( cls, - pca_template_model: pv.UnstructuredGrid, + pca_template_model: pv.UnstructuredGrid | pv.PolyData, pca_model: dict, pca_number_of_modes: int = 0, pca_template_model_point_subsample: int = 4, pre_pca_transform: Optional[itk.Transform] = None, fixed_distance_map: Optional[itk.Image] = None, - fixed_model: Optional[pv.UnstructuredGrid] = None, + fixed_model: Optional[pv.UnstructuredGrid | pv.PolyData] = None, reference_image: Optional[itk.Image] = None, log_level: int | str = logging.INFO, ) -> Self: diff --git a/src/physiomotion4d/usd_anatomy_tools.py b/src/physiomotion4d/usd_anatomy_tools.py index db91468..ea38729 100644 --- a/src/physiomotion4d/usd_anatomy_tools.py +++ b/src/physiomotion4d/usd_anatomy_tools.py @@ -178,6 +178,48 @@ def __init__(self, stage: Any, log_level: int | str = logging.INFO) -> None: "coat_weight": 0.1, } + # Map anatomy type name (CLI/workflow) to params for apply_anatomy_material_to_mesh + self._anatomy_params_by_type: Mapping[str, Mapping[str, Any]] = { + "heart": self.heart_params, + "lung": self.lung_params, + "bone": self.bone_params, + "major_vessels": self.major_vessels_params, + "contrast": self.contrast_params, + "soft_tissue": self.soft_tissue_params, + "other": self.other_params, + "liver": self.liver_params, + "spleen": self.spleen_params, + "kidney": self.kidney_params, + } + + def get_anatomy_types(self) -> list[str]: + """Return list of supported anatomy type names for apply_anatomy_material_to_mesh.""" + return list(self._anatomy_params_by_type.keys()) + + def apply_anatomy_material_to_mesh(self, mesh_path: str, anatomy_type: str) -> None: + """Apply an anatomic OmniSurface material to a single mesh prim by type. + + Args: + mesh_path: USD path to the mesh prim (e.g. "/World/Meshes/MyMesh"). + anatomy_type: One of: heart, lung, bone, major_vessels, contrast, + soft_tissue, other, liver, spleen, kidney. + + Raises: + ValueError: If mesh_path is invalid or anatomy_type is not supported. + """ + params = self._anatomy_params_by_type.get(anatomy_type.lower()) + if params is None: + raise ValueError( + f"Unknown anatomy_type '{anatomy_type}'. " + f"Supported: {', '.join(self.get_anatomy_types())}" + ) + prim = self.stage.GetPrimAtPath(mesh_path) + if not prim.IsValid(): + raise ValueError(f"Invalid prim at path: {mesh_path}") + if not prim.IsA(UsdGeom.Mesh): + raise ValueError(f"Prim at {mesh_path} is not a Mesh") + self._apply_surgical_materials(prim, params) + def _apply_surgical_materials( self, prim: Any, material_params: Mapping[str, Any] ) -> None: diff --git a/src/physiomotion4d/usd_tools.py b/src/physiomotion4d/usd_tools.py index 56add23..0244c9a 100644 --- a/src/physiomotion4d/usd_tools.py +++ b/src/physiomotion4d/usd_tools.py @@ -720,6 +720,7 @@ def apply_colormap_from_primvar( *, cmap: str = "viridis", time_codes: list[float] | None = None, + intensity_range: tuple[float, float] | None = None, write_default_at_t0: bool = True, bind_vertex_color_material: bool = True, ) -> None: @@ -738,6 +739,7 @@ def apply_colormap_from_primvar( - Handles multi-component data (vectors/tensors) by computing magnitude - Converts uniform (per-face) data to vertex data by averaging - Computes global value range across all time samples for consistent coloring + (or uses intensity_range when provided) - Writes both default and time-sampled displayColor for Omniverse compatibility Args: @@ -746,6 +748,7 @@ def apply_colormap_from_primvar( source_primvar: Name of primvar to visualize (e.g., "vtk_cell_stress") cmap: Matplotlib colormap name (default: "viridis") time_codes: List of time codes to process. If None, uses stage time range. + intensity_range: Optional (vmin, vmax) for colormap. If None, computed from data. write_default_at_t0: If True, also write default value at t=0 bind_vertex_color_material: If True, create/bind material using displayColor @@ -903,11 +906,15 @@ def apply_colormap_from_primvar( if not scalar_samples: raise ValueError(f"No valid data found for primvar '{source_primvar}'") - # Compute global value range - all_values = np.concatenate([s for _, s in scalar_samples]) - vmin = float(np.min(all_values)) - vmax = float(np.max(all_values)) - self.log_info(f"Value range: {vmin:.6g} to {vmax:.6g}") + # Value range: use provided intensity_range or compute from data + if intensity_range is not None: + vmin, vmax = intensity_range + self.log_info(f"Using specified intensity range: {vmin:.6g} to {vmax:.6g}") + else: + all_values = np.concatenate([s for _, s in scalar_samples]) + vmin = float(np.min(all_values)) + vmax = float(np.max(all_values)) + self.log_info(f"Value range: {vmin:.6g} to {vmax:.6g}") # Apply colormap to each time sample try: @@ -978,6 +985,103 @@ def apply_colormap_from_primvar( stage.Save() self.log_info(f"Saved USD file: {stage_path}") + def set_solid_display_color( + self, + stage_or_path: Usd.Stage | str, + mesh_path: str, + color: tuple[float, float, float], + *, + time_codes: list[float] | None = None, + bind_vertex_color_material: bool = True, + ) -> None: + """ + Set a constant (solid) displayColor for a mesh. + + Fills the mesh's displayColor primvar with the same RGB for every vertex, + optionally at each time code for animated meshes, and binds the vertex + color material so the color is visible in Omniverse. + + Args: + stage_or_path: USD Stage or path to USD file + mesh_path: Path to mesh prim (e.g., "/World/Meshes/MyMesh") + color: RGB tuple in [0, 1] (e.g., (1, 0, 0) for red) + time_codes: If provided, set displayColor at each time. If None, set default only. + bind_vertex_color_material: If True, bind material that uses displayColor + """ + from pxr import Gf, Sdf, Vt + + if isinstance(stage_or_path, str): + stage = Usd.Stage.Open(stage_or_path) + stage_path = stage_or_path + else: + stage = stage_or_path + stage_path = None + + mesh_prim = stage.GetPrimAtPath(mesh_path) + if not mesh_prim.IsValid() or not mesh_prim.IsA(UsdGeom.Mesh): + raise ValueError(f"Invalid mesh prim at path: {mesh_path}") + + mesh = UsdGeom.Mesh(mesh_prim) + primvars_api = UsdGeom.PrimvarsAPI(mesh) + points_attr = mesh.GetPointsAttr() + + # Resolve time codes: default only or at each sample + if time_codes is None: + time_codes = [Usd.TimeCode.Default().GetValue()] + vec = Gf.Vec3f(float(color[0]), float(color[1]), float(color[2])) + + display_color_pv = primvars_api.CreatePrimvar( + "displayColor", Sdf.ValueTypeNames.Color3fArray, UsdGeom.Tokens.vertex + ) + + for tc in time_codes: + # Get point count at this time + pts = points_attr.Get(Usd.TimeCode(tc)) + n_points = len(pts) if pts is not None else 0 + if n_points == 0 and tc == Usd.TimeCode.Default().GetValue(): + pts = points_attr.Get() + n_points = len(pts) if pts is not None else 0 + if n_points == 0: + continue + color_array = Vt.Vec3fArray([vec] * n_points) + if tc == Usd.TimeCode.Default().GetValue(): + display_color_pv.Set(color_array) + else: + display_color_pv.Set(color_array, Usd.TimeCode(tc)) + + if bind_vertex_color_material: + self._ensure_vertex_color_material(stage, mesh_prim) + if stage_path: + stage.Save() + self.log_info(f"Set solid displayColor on {mesh_path}") + + def list_mesh_paths_under( + self, stage_or_path: Usd.Stage | str, parent_path: str = "/World/Meshes" + ) -> list[str]: + """ + List paths of all mesh prims under a parent path. + + Args: + stage_or_path: USD Stage or path to USD file + parent_path: Parent prim path (default: /World/Meshes) + + Returns: + List of mesh prim paths (e.g. ["/World/Meshes/Mesh0", "/World/Meshes/Mesh1"]) + """ + if isinstance(stage_or_path, str): + stage = Usd.Stage.Open(stage_or_path) + else: + stage = stage_or_path + + parent = stage.GetPrimAtPath(parent_path) + if not parent.IsValid(): + return [] + result = [] + for prim in parent.GetAllChildren(): + if prim.IsA(UsdGeom.Mesh): + result.append(str(prim.GetPath())) + return result + def repair_mesh_primvar_element_sizes( self, stage_or_path: Usd.Stage | str, diff --git a/src/physiomotion4d/vtk_to_usd/__init__.py b/src/physiomotion4d/vtk_to_usd/__init__.py index e39f439..b6b97bd 100644 --- a/src/physiomotion4d/vtk_to_usd/__init__.py +++ b/src/physiomotion4d/vtk_to_usd/__init__.py @@ -33,6 +33,11 @@ VolumeData, ) from .material_manager import MaterialManager +from .mesh_utils import ( + cell_type_name_for_vertex_count, + split_mesh_data_by_cell_type, + split_mesh_data_by_connectivity, +) from .usd_mesh_converter import UsdMeshConverter from .usd_utils import ( compute_mesh_extent, @@ -76,6 +81,10 @@ "sanitize_primvar_name", "triangulate_face", "compute_mesh_extent", + # Mesh utils (cell type split) + "cell_type_name_for_vertex_count", + "split_mesh_data_by_cell_type", + "split_mesh_data_by_connectivity", # Readers "VTKReader", "PolyDataReader", diff --git a/src/physiomotion4d/vtk_to_usd/converter.py b/src/physiomotion4d/vtk_to_usd/converter.py index a63b186..b26bdfc 100644 --- a/src/physiomotion4d/vtk_to_usd/converter.py +++ b/src/physiomotion4d/vtk_to_usd/converter.py @@ -5,12 +5,13 @@ import logging from pathlib import Path -from typing import Any, Optional +from typing import Any, Optional, Sequence from pxr import Usd, UsdGeom from .data_structures import ConversionSettings, MaterialData, MeshData from .material_manager import MaterialManager +from .mesh_utils import split_mesh_data_by_cell_type, split_mesh_data_by_connectivity from .usd_mesh_converter import UsdMeshConverter from .vtk_reader import read_vtk_file @@ -86,11 +87,24 @@ def convert_file( if material is not None: material_mgr.get_or_create_material(material) - # Create mesh - mesh_path = f"/World/Meshes/{mesh_name}" - self._ensure_parent_path(mesh_path) - - mesh_converter.create_mesh(mesh_data, mesh_path, bind_material=True) + # Create mesh(es) - by connectivity, by cell type, or single + if self.settings.separate_objects_by_connectivity: + parts = split_mesh_data_by_connectivity(mesh_data) + for _idx, (part_data, base_name) in enumerate(parts): + mesh_path = f"/World/Meshes/{base_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) + elif self.settings.separate_objects_by_cell_type: + parts = split_mesh_data_by_cell_type(mesh_data) + for idx, (part_data, base_name) in enumerate(parts): + prim_name = f"{base_name}_{idx}" + mesh_path = f"/World/Meshes/{prim_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) + else: + mesh_path = f"/World/Meshes/{mesh_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_mesh(mesh_data, mesh_path, bind_material=True) # Save stage stage.Save() @@ -100,7 +114,7 @@ def convert_file( def convert_sequence( self, - vtk_files: list[str | Path], + vtk_files: Sequence[str | Path], output_usd: str | Path, mesh_name: str = "Mesh", time_codes: Optional[list[float]] = None, @@ -160,13 +174,62 @@ def convert_sequence( stage.SetEndTimeCode(time_codes[-1]) stage.SetTimeCodesPerSecond(self.settings.times_per_second) - # Create time-varying mesh - mesh_path = f"/World/Meshes/{mesh_name}" - self._ensure_parent_path(mesh_path) - - mesh_converter.create_time_varying_mesh( - mesh_data_sequence, mesh_path, time_codes, bind_material=True - ) + # Create time-varying mesh(es) - by connectivity, by cell type, or single + if self.settings.separate_objects_by_connectivity: + parts_sequence = [ + split_mesh_data_by_connectivity(m) for m in mesh_data_sequence + ] + n_parts = len(parts_sequence[0]) + if not all(len(p) == n_parts for p in parts_sequence): + logger.warning( + "Connectivity split count varies across time steps; " + "outputting single mesh per frame instead of splitting by connectivity." + ) + mesh_path = f"/World/Meshes/{mesh_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + mesh_data_sequence, mesh_path, time_codes, bind_material=True + ) + else: + for part_idx in range(n_parts): + part_sequence = [p[part_idx][0] for p in parts_sequence] + base_name = parts_sequence[0][part_idx][1] + mesh_path = f"/World/Meshes/{base_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + part_sequence, mesh_path, time_codes, bind_material=True + ) + elif self.settings.separate_objects_by_cell_type: + parts_sequence = [ + split_mesh_data_by_cell_type(m) for m in mesh_data_sequence + ] + n_parts = len(parts_sequence[0]) + if not all(len(p) == n_parts for p in parts_sequence): + logger.warning( + "Cell type split count varies across time steps; " + "outputting single mesh per frame instead of splitting by cell type." + ) + mesh_path = f"/World/Meshes/{mesh_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + mesh_data_sequence, mesh_path, time_codes, bind_material=True + ) + else: + for part_idx in range(n_parts): + part_sequence = [p[part_idx][0] for p in parts_sequence] + base_name = parts_sequence[0][part_idx][1] + prim_name = f"{base_name}_{part_idx}" + mesh_path = f"/World/Meshes/{prim_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + part_sequence, mesh_path, time_codes, bind_material=True + ) + else: + mesh_path = f"/World/Meshes/{mesh_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + mesh_data_sequence, mesh_path, time_codes, bind_material=True + ) # Save stage stage.Save() @@ -212,11 +275,24 @@ def convert_mesh_data( if material is not None: material_mgr.get_or_create_material(material) - # Create mesh - mesh_path = f"/World/Meshes/{mesh_name}" - self._ensure_parent_path(mesh_path) - - mesh_converter.create_mesh(mesh_data, mesh_path, bind_material=True) + # Create mesh(es) - by connectivity, by cell type, or single + if self.settings.separate_objects_by_connectivity: + parts = split_mesh_data_by_connectivity(mesh_data) + for _idx, (part_data, base_name) in enumerate(parts): + mesh_path = f"/World/Meshes/{base_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) + elif self.settings.separate_objects_by_cell_type: + parts = split_mesh_data_by_cell_type(mesh_data) + for idx, (part_data, base_name) in enumerate(parts): + prim_name = f"{base_name}_{idx}" + mesh_path = f"/World/Meshes/{prim_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) + else: + mesh_path = f"/World/Meshes/{mesh_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_mesh(mesh_data, mesh_path, bind_material=True) # Save stage stage.Save() @@ -283,13 +359,62 @@ def convert_mesh_data_sequence( stage.SetEndTimeCode(time_codes[-1]) stage.SetTimeCodesPerSecond(self.settings.times_per_second) - # Create time-varying mesh - mesh_path = f"/World/Meshes/{mesh_name}" - self._ensure_parent_path(mesh_path) - - mesh_converter.create_time_varying_mesh( - mesh_data_sequence, mesh_path, time_codes, bind_material=True - ) + # Create time-varying mesh(es) - by connectivity, by cell type, or single + if self.settings.separate_objects_by_connectivity: + parts_sequence = [ + split_mesh_data_by_connectivity(m) for m in mesh_data_sequence + ] + n_parts = len(parts_sequence[0]) + if not all(len(p) == n_parts for p in parts_sequence): + logger.warning( + "Connectivity split count varies across time steps; " + "outputting single mesh per frame instead of splitting by connectivity." + ) + mesh_path = f"/World/Meshes/{mesh_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + mesh_data_sequence, mesh_path, time_codes, bind_material=True + ) + else: + for part_idx in range(n_parts): + part_sequence = [p[part_idx][0] for p in parts_sequence] + base_name = parts_sequence[0][part_idx][1] + mesh_path = f"/World/Meshes/{base_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + part_sequence, mesh_path, time_codes, bind_material=True + ) + elif self.settings.separate_objects_by_cell_type: + parts_sequence = [ + split_mesh_data_by_cell_type(m) for m in mesh_data_sequence + ] + n_parts = len(parts_sequence[0]) + if not all(len(p) == n_parts for p in parts_sequence): + logger.warning( + "Cell type split count varies across time steps; " + "outputting single mesh per frame instead of splitting by cell type." + ) + mesh_path = f"/World/Meshes/{mesh_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + mesh_data_sequence, mesh_path, time_codes, bind_material=True + ) + else: + for part_idx in range(n_parts): + part_sequence = [p[part_idx][0] for p in parts_sequence] + base_name = parts_sequence[0][part_idx][1] + prim_name = f"{base_name}_{part_idx}" + mesh_path = f"/World/Meshes/{prim_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + part_sequence, mesh_path, time_codes, bind_material=True + ) + else: + mesh_path = f"/World/Meshes/{mesh_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_time_varying_mesh( + mesh_data_sequence, mesh_path, time_codes, bind_material=True + ) # Save stage stage.Save() diff --git a/src/physiomotion4d/vtk_to_usd/data_structures.py b/src/physiomotion4d/vtk_to_usd/data_structures.py index 10c1128..3494a70 100644 --- a/src/physiomotion4d/vtk_to_usd/data_structures.py +++ b/src/physiomotion4d/vtk_to_usd/data_structures.py @@ -153,6 +153,8 @@ class ConversionSettings: compute_normals: bool = True preserve_point_arrays: bool = True preserve_cell_arrays: bool = True + separate_objects_by_cell_type: bool = False # Split into separate USD meshes by cell type (triangle/quad/tetra/hex etc.) + separate_objects_by_connectivity: bool = True # Split into separate USD meshes by connected components (object1, object2, ...). Mutually exclusive with separate_objects_by_cell_type. # Material settings use_preview_surface: bool = True @@ -165,3 +167,11 @@ class ConversionSettings: # Array prefixes point_array_prefix: str = "vtk_point_" cell_array_prefix: str = "vtk_cell_" + + def __post_init__(self) -> None: + """Validate that at most one of the split options is enabled.""" + if self.separate_objects_by_cell_type and self.separate_objects_by_connectivity: + raise ValueError( + "separate_objects_by_cell_type and separate_objects_by_connectivity " + "cannot both be True; enable only one." + ) diff --git a/src/physiomotion4d/vtk_to_usd/mesh_utils.py b/src/physiomotion4d/vtk_to_usd/mesh_utils.py new file mode 100644 index 0000000..97d3c37 --- /dev/null +++ b/src/physiomotion4d/vtk_to_usd/mesh_utils.py @@ -0,0 +1,313 @@ +"""Mesh utilities for VTK to USD conversion. + +Includes splitting meshes by cell type (face vertex count) or by connectivity +for separate USD prims. +""" + +from __future__ import annotations + +from collections import defaultdict + +import numpy as np +from numpy.typing import NDArray + +from .data_structures import GenericArray, MeshData + + +# Map face vertex count to cell type name (matches VTK semantics: triangle=3, quad=4, tetra=4, hex=8) +# For 4 we use "Quad" (surface); volume tet would also be 4 - we don't distinguish here. +CELL_TYPE_NAME_BY_VERTEX_COUNT: dict[int, str] = { + 3: "Triangle", + 4: "Quad", + 5: "Pentagon", + 6: "Wedge", + 8: "Hexahedron", +} + + +def cell_type_name_for_vertex_count(count: int) -> str: + """Return a readable name for a cell type given its vertex count.""" + return CELL_TYPE_NAME_BY_VERTEX_COUNT.get(count, f"Cell_{count}") + + +def split_mesh_data_by_cell_type(mesh_data: MeshData) -> list[tuple[MeshData, str]]: + """Split MeshData into one mesh per distinct face vertex count (cell type). + + Each part is named by cell type (e.g. Triangle, Quad, Hexahedron). The caller + should append a unique number to form final prim names (e.g. Triangle_0, Quad_0). + + Args: + mesh_data: Single mesh that may contain mixed cell types. + + Returns: + List of (MeshData, base_name) for each cell type present. base_name is + the cell type name (e.g. "Triangle", "Quad"). + """ + counts = np.asarray(mesh_data.face_vertex_counts, dtype=np.int32) + indices = np.asarray(mesh_data.face_vertex_indices, dtype=np.int32) + points = np.asarray(mesh_data.points) + n_points = len(points) + n_faces = len(counts) + + if n_faces == 0: + return [(mesh_data, "Mesh")] + + cum = np.concatenate([[0], np.cumsum(counts)]).astype(np.int64) + + unique_counts = np.unique(counts) + if len(unique_counts) <= 1: + # Single cell type: return one mesh with that type name + name = cell_type_name_for_vertex_count(int(counts[0])) if n_faces else "Mesh" + return [(mesh_data, name)] + + result: list[tuple[MeshData, str]] = [] + + for count in unique_counts: + count = int(count) + face_mask = counts == count + face_idxs = np.where(face_mask)[0] + num_faces = len(face_idxs) + + # Gather vertex indices used by these faces + seg_starts = cum[face_idxs] + seg_ends = cum[face_idxs + 1] + used = np.concatenate( + [indices[seg_starts[i] : seg_ends[i]] for i in range(num_faces)] + ) + unique_pts = np.unique(used) + old_to_new = np.full(n_points, -1, dtype=np.int32) + old_to_new[unique_pts] = np.arange(len(unique_pts), dtype=np.int32) + + new_points = points[unique_pts] + new_counts = np.full(num_faces, count, dtype=np.int32) + new_indices_list: list[int] = [] + for i in range(num_faces): + seg = indices[seg_starts[i] : seg_ends[i]] + new_indices_list.extend(old_to_new[seg].tolist()) + new_indices = np.array(new_indices_list, dtype=np.int32) + + # Subset normals (per-vertex) + new_normals = None + if mesh_data.normals is not None: + arr = np.asarray(mesh_data.normals) + if arr.shape[0] == n_points and arr.ndim == 2: + new_normals = arr[unique_pts] + elif arr.shape[0] == cum[-1] and arr.ndim == 2: + flat = np.concatenate( + [arr[seg_starts[j] : seg_ends[j]] for j in range(num_faces)] + ) + new_normals = flat + + # Subset colors (per-vertex) + new_colors = None + if mesh_data.colors is not None: + arr = np.asarray(mesh_data.colors) + if arr.shape[0] == n_points: + new_colors = arr[unique_pts] + + # Subset generic arrays: vertex by point index, uniform by face index + new_arrays: list[GenericArray] = [] + for arr in mesh_data.generic_arrays: + data = np.asarray(arr.data) + if arr.interpolation == "vertex": + if data.shape[0] == n_points: + new_data = data[unique_pts] + else: + continue + else: + if data.shape[0] == n_faces: + new_data = data[face_idxs] + else: + continue + new_arrays.append( + GenericArray( + name=arr.name, + data=new_data, + num_components=arr.num_components, + data_type=arr.data_type, + interpolation=arr.interpolation, + ) + ) + + part = MeshData( + points=new_points, + face_vertex_counts=new_counts, + face_vertex_indices=new_indices, + normals=new_normals, + uvs=None, + colors=new_colors, + generic_arrays=new_arrays, + material_id=mesh_data.material_id, + ) + name = cell_type_name_for_vertex_count(count) + result.append((part, name)) + + return result + + +def _connected_components_face_indices( + n_faces: int, + indices: NDArray, + cum: NDArray, +) -> list[list[int]]: + """Return list of face-index lists, one per connected component. + + Two faces are in the same component if they share at least one vertex. + Uses union-find on face indices. + """ + # vertex -> list of face indices that use that vertex + vertex_to_faces: dict[int, list[int]] = defaultdict(list) + for i in range(n_faces): + start, end = int(cum[i]), int(cum[i + 1]) + for k in range(start, end): + v = int(indices[k]) + vertex_to_faces[v].append(i) + + # Union-find for faces + parent = list(range(n_faces)) + + def find(x: int) -> int: + if parent[x] != x: + parent[x] = find(parent[x]) + return parent[x] + + def union(x: int, y: int) -> None: + px, py = find(x), find(y) + if px != py: + parent[px] = py + + for face_list in vertex_to_faces.values(): + if len(face_list) < 2: + continue + r = find(face_list[0]) + for f in face_list[1:]: + union(r, find(f)) + + # Group face indices by component root + components: dict[int, list[int]] = defaultdict(list) + for i in range(n_faces): + components[find(i)].append(i) + + # Return as list of lists, sorted by min face index for stable order + return sorted(components.values(), key=lambda x: min(x)) + + +def _extract_mesh_part_by_face_indices( + mesh_data: MeshData, + face_idxs: list[int], + n_points: int, + n_faces: int, + counts: NDArray, + indices: NDArray, + cum: NDArray, + points: NDArray, +) -> MeshData: + """Build a new MeshData containing only the given faces (and their points).""" + face_idxs_arr = np.asarray(face_idxs, dtype=np.int32) + num_faces = len(face_idxs) + + seg_starts = cum[face_idxs_arr] + seg_ends = cum[face_idxs_arr + 1] + used = np.concatenate( + [indices[seg_starts[i] : seg_ends[i]] for i in range(num_faces)] + ) + unique_pts = np.unique(used) + old_to_new = np.full(n_points, -1, dtype=np.int32) + old_to_new[unique_pts] = np.arange(len(unique_pts), dtype=np.int32) + + new_points = points[unique_pts] + new_counts = counts[face_idxs_arr] + new_indices_list: list[int] = [] + for i in range(num_faces): + seg = indices[seg_starts[i] : seg_ends[i]] + new_indices_list.extend(old_to_new[seg].tolist()) + new_indices = np.array(new_indices_list, dtype=np.int32) + + new_normals = None + if mesh_data.normals is not None: + arr = np.asarray(mesh_data.normals) + if arr.shape[0] == n_points and arr.ndim == 2: + new_normals = arr[unique_pts] + elif arr.shape[0] == cum[-1] and arr.ndim == 2: + flat = np.concatenate( + [arr[seg_starts[j] : seg_ends[j]] for j in range(num_faces)] + ) + new_normals = flat + + new_colors = None + if mesh_data.colors is not None: + arr = np.asarray(mesh_data.colors) + if arr.shape[0] == n_points: + new_colors = arr[unique_pts] + + new_arrays: list[GenericArray] = [] + for arr in mesh_data.generic_arrays: + data = np.asarray(arr.data) + if arr.interpolation == "vertex": + if data.shape[0] == n_points: + new_data = data[unique_pts] + else: + continue + else: + if data.shape[0] == n_faces: + new_data = data[face_idxs_arr] + else: + continue + new_arrays.append( + GenericArray( + name=arr.name, + data=new_data, + num_components=arr.num_components, + data_type=arr.data_type, + interpolation=arr.interpolation, + ) + ) + + return MeshData( + points=new_points, + face_vertex_counts=new_counts, + face_vertex_indices=new_indices, + normals=new_normals, + uvs=None, + colors=new_colors, + generic_arrays=new_arrays, + material_id=mesh_data.material_id, + ) + + +def split_mesh_data_by_connectivity(mesh_data: MeshData) -> list[tuple[MeshData, str]]: + """Split MeshData into one mesh per connected component. + + A connected component is a maximal set of cells that share vertices (directly + or transitively). Components are named object1, object2, etc. + + Args: + mesh_data: Single mesh that may contain multiple disconnected parts. + + Returns: + List of (MeshData, base_name) for each component. base_name is + "object1", "object2", ... + """ + counts = np.asarray(mesh_data.face_vertex_counts, dtype=np.int32) + indices = np.asarray(mesh_data.face_vertex_indices, dtype=np.int32) + points = np.asarray(mesh_data.points) + n_points = len(points) + n_faces = len(counts) + + if n_faces == 0: + return [(mesh_data, "object1")] + + cum = np.concatenate([[0], np.cumsum(counts)]).astype(np.int64) + + component_face_lists = _connected_components_face_indices(n_faces, indices, cum) + if len(component_face_lists) <= 1: + return [(mesh_data, "object1")] + + result: list[tuple[MeshData, str]] = [] + for k, face_idxs in enumerate(component_face_lists, start=1): + part = _extract_mesh_part_by_face_indices( + mesh_data, face_idxs, n_points, n_faces, counts, indices, cum, points + ) + result.append((part, f"object{k}")) + + return result diff --git a/src/physiomotion4d/workflow_convert_vtk_to_usd.py b/src/physiomotion4d/workflow_convert_vtk_to_usd.py new file mode 100644 index 0000000..5b1cd43 --- /dev/null +++ b/src/physiomotion4d/workflow_convert_vtk_to_usd.py @@ -0,0 +1,280 @@ +""" +VTK to USD conversion workflow and batch runner. + +Implements the pipeline from the Convert_VTK_To_USD experiment notebooks: +load one or more VTK files, optionally split by connectivity or cell type, +convert to USD, then apply a chosen appearance (solid color, anatomic material, +or colormap from a primvar with auto or specified intensity range). +""" + +import logging +import re +from pathlib import Path +from typing import Literal + + +from physiomotion4d.physiomotion4d_base import PhysioMotion4DBase +from physiomotion4d.usd_anatomy_tools import USDAnatomyTools +from physiomotion4d.usd_tools import USDTools +from physiomotion4d.vtk_to_usd import ( + ConversionSettings, + MaterialData, + VTKToUSDConverter, + read_vtk_file, + validate_time_series_topology, +) + + +def discover_time_series( + paths: list[Path], + pattern: str = r"\.t(\d+)\.(vtk|vtp|vtu)$", +) -> list[tuple[int, Path]]: + """Discover and sort time-series VTK files by extracted time index. + + Args: + paths: List of paths to VTK files + pattern: Regex with one group for time step number (default matches .t123.vtk) + + Returns: + Sorted list of (time_step, path) tuples. If no match, returns [(0, p) for p in paths]. + """ + time_series: list[tuple[int, Path]] = [] + regex = re.compile(pattern, re.IGNORECASE) + for p in paths: + match = regex.search(p.name) + if match: + time_series.append((int(match.group(1)), Path(p))) + else: + time_series.append((0, Path(p))) + time_series.sort(key=lambda x: (x[0], str(x[1]))) + return time_series + + +AppearanceKind = Literal["solid", "anatomy", "colormap"] + + +class WorkflowConvertVTKToUSD(PhysioMotion4DBase): + """ + Workflow to convert one or more VTK files to USD with configurable + splitting and appearance (solid color, anatomic material, or colormap). + """ + + def __init__( + self, + vtk_files: list[str | Path], + output_usd: str | Path, + *, + separate_by_connectivity: bool = True, + separate_by_cell_type: bool = False, + mesh_name: str = "Mesh", + times_per_second: float = 60.0, + up_axis: str = "Y", + triangulate: bool = True, + extract_surface: bool = True, + time_series_pattern: str = r"\.t(\d+)\.(vtk|vtp|vtu)$", + appearance: AppearanceKind = "solid", + solid_color: tuple[float, float, float] = (0.8, 0.8, 0.8), + anatomy_type: str = "heart", + colormap_primvar: str | None = None, + colormap_name: str = "viridis", + colormap_intensity_range: tuple[float, float] | None = None, + log_level: int | str = logging.INFO, + ): + """ + Initialize the VTK-to-USD workflow. + + Args: + vtk_files: List of paths to VTK files (.vtk, .vtp, .vtu). One file = single frame; + multiple files = time series (ordered by time_series_pattern). + output_usd: Path to output USD file. + separate_by_connectivity: If True, split mesh into separate objects by connectivity. + separate_by_cell_type: If True, split mesh by cell type (triangle/quad/...). + Cannot be True when separate_by_connectivity is True. + mesh_name: Base name for the mesh (or first mesh when not splitting). + times_per_second: FPS for time-varying data. + up_axis: "Y" or "Z". + triangulate: Triangulate meshes. + extract_surface: For .vtu, extract surface before conversion. + time_series_pattern: Regex to extract time index from filenames (one group). + appearance: "solid" | "anatomy" | "colormap". + solid_color: RGB in [0,1] when appearance == "solid". + anatomy_type: Anatomy material name when appearance == "anatomy" + (e.g. heart, lung, bone, soft_tissue). + colormap_primvar: Primvar name for coloring when appearance == "colormap" + (e.g. vtk_point_stress_c0). If None, a candidate is auto-picked when possible. + colormap_name: Matplotlib colormap name when appearance == "colormap". + colormap_intensity_range: Optional (vmin, vmax) for colormap; None = auto from data. + log_level: Logging level. + """ + super().__init__(class_name=self.__class__.__name__, log_level=log_level) + self.vtk_files = [Path(f) for f in vtk_files] + self.output_usd = Path(output_usd) + self.separate_by_connectivity = separate_by_connectivity + self.separate_by_cell_type = separate_by_cell_type + self.mesh_name = mesh_name + self.times_per_second = times_per_second + self.up_axis = up_axis + self.triangulate = triangulate + self.extract_surface = extract_surface + self.time_series_pattern = time_series_pattern + self.appearance = appearance + self.solid_color = solid_color + self.anatomy_type = anatomy_type + self.colormap_primvar = colormap_primvar + self.colormap_name = colormap_name + self.colormap_intensity_range = colormap_intensity_range + + if separate_by_connectivity and separate_by_cell_type: + raise ValueError( + "separate_by_connectivity and separate_by_cell_type cannot both be True" + ) + + def run(self) -> str: + """ + Run the full workflow: convert VTK to USD, then apply the chosen appearance. + + Returns: + Path to the created USD file (str). + """ + self.log_section("VTK to USD conversion workflow") + + if not self.vtk_files: + raise ValueError("vtk_files must not be empty") + + # Discover time series + time_series = discover_time_series( + self.vtk_files, pattern=self.time_series_pattern + ) + time_steps = [t for t, _ in time_series] + paths_ordered = [p for _, p in time_series] + n_frames = len(paths_ordered) + + self.log_info("Input: %d file(s), time steps: %s", n_frames, time_steps[:5]) + if n_frames > 5: + self.log_info(" ... and %d more", n_frames - 5) + self.log_info("Output: %s", self.output_usd) + + settings = ConversionSettings( + triangulate_meshes=self.triangulate, + compute_normals=False, + preserve_point_arrays=True, + preserve_cell_arrays=True, + separate_objects_by_connectivity=self.separate_by_connectivity, + separate_objects_by_cell_type=self.separate_by_cell_type, + up_axis=self.up_axis, + times_per_second=self.times_per_second, + use_time_samples=True, + ) + + converter = VTKToUSDConverter(settings) + default_material = MaterialData( + name="default_material", + diffuse_color=self.solid_color, + use_vertex_colors=False, + ) + + if n_frames == 1: + stage = converter.convert_file( + paths_ordered[0], + self.output_usd, + mesh_name=self.mesh_name, + material=default_material, + extract_surface=self.extract_surface, + ) + else: + time_codes = [float(t) for t in time_steps] + # Optional: validate topology consistency across frames + try: + mesh_sequence = [ + read_vtk_file(p, extract_surface=self.extract_surface) + for p in paths_ordered + ] + report = validate_time_series_topology(mesh_sequence) + if report.get("topology_changes"): + self.log_warning( + "Topology changes across %d frames", + len(report["topology_changes"]), + ) + except Exception as e: + self.log_debug("Time series validation skipped: %s", e) + + stage = converter.convert_sequence( + paths_ordered, + self.output_usd, + mesh_name=self.mesh_name, + time_codes=time_codes, + material=default_material, + extract_surface=self.extract_surface, + ) + + # Post-process: apply chosen appearance to all meshes under /World/Meshes + usd_tools = USDTools(log_level=self.log_level) + mesh_paths = usd_tools.list_mesh_paths_under( + str(self.output_usd), parent_path="/World/Meshes" + ) + if not mesh_paths: + self.log_warning("No mesh prims found under /World/Meshes") + return str(self.output_usd) + + self.log_info( + "Applying appearance '%s' to %d mesh(es)", self.appearance, len(mesh_paths) + ) + + if self.appearance == "solid": + time_codes_for_color: list[float] | None = None + if n_frames > 1 and stage.HasAuthoredTimeCodeRange(): + time_codes_for_color = [ + float(t) + for t in range( + int(stage.GetStartTimeCode()), + int(stage.GetEndTimeCode()) + 1, + ) + ] + for mesh_path in mesh_paths: + usd_tools.set_solid_display_color( + str(self.output_usd), + mesh_path, + self.solid_color, + time_codes=time_codes_for_color, + bind_vertex_color_material=True, + ) + + elif self.appearance == "anatomy": + anatomy_tools = USDAnatomyTools(stage, log_level=self.log_level) + for mesh_path in mesh_paths: + anatomy_tools.apply_anatomy_material_to_mesh( + mesh_path, self.anatomy_type + ) + stage.Save() + + elif self.appearance == "colormap": + primvar = self.colormap_primvar + for mesh_path in mesh_paths: + if primvar is None: + primvars = usd_tools.list_mesh_primvars( + str(self.output_usd), mesh_path + ) + primvar = usd_tools.pick_color_primvar(primvars) + if primvar is None: + self.log_warning( + "No color primvar found for %s; skip colormap", mesh_path + ) + primvar = self.colormap_primvar + continue + self.log_info( + "Applying colormap to %s from primvar %s", mesh_path, primvar + ) + usd_tools.apply_colormap_from_primvar( + str(self.output_usd), + mesh_path, + primvar, + cmap=self.colormap_name, + intensity_range=self.colormap_intensity_range, + write_default_at_t0=True, + bind_vertex_color_material=True, + ) + if self.colormap_primvar is None: + primvar = None # next mesh: auto-pick again + + self.log_info("Workflow complete: %s", self.output_usd) + return str(self.output_usd) diff --git a/tests/conftest.py b/tests/conftest.py index 620b897..4bdda9f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -18,6 +18,7 @@ from physiomotion4d.contour_tools import ContourTools from physiomotion4d.convert_nrrd_4d_to_3d import ConvertNRRD4DTo3D from physiomotion4d.register_images_ants import RegisterImagesANTs +from physiomotion4d.register_images_greedy import RegisterImagesGreedy from physiomotion4d.register_images_icon import RegisterImagesICON from physiomotion4d.segment_chest_total_segmentator import SegmentChestTotalSegmentator from physiomotion4d.segment_chest_vista_3d import SegmentChestVista3D @@ -495,6 +496,12 @@ def registrar_ants(): return RegisterImagesANTs() +@pytest.fixture(scope="session") +def registrar_greedy(): + """Create a RegisterImagesGreedy instance.""" + return RegisterImagesGreedy() + + @pytest.fixture(scope="session") def registrar_icon(): """Create a RegisterImagesICON instance.""" diff --git a/tests/test_register_images_greedy.py b/tests/test_register_images_greedy.py new file mode 100644 index 0000000..22c239a --- /dev/null +++ b/tests/test_register_images_greedy.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python +""" +Tests for Greedy-based image registration. + +Uses the same fixtures as test_register_images_ants (converted 3D CT images). +Requires the picsl-greedy package and test data. +""" + +import itk +import numpy as np +import pytest + +from physiomotion4d.transform_tools import TransformTools + + +@pytest.mark.requires_data +@pytest.mark.slow +class TestRegisterImagesGreedy: + """Test suite for Greedy-based image registration.""" + + def test_registrar_initialization(self, registrar_greedy) -> None: + """Test that RegisterImagesGreedy initializes correctly.""" + assert registrar_greedy is not None, "Registrar not initialized" + assert hasattr(registrar_greedy, "fixed_image"), "Missing fixed_image attribute" + assert hasattr(registrar_greedy, "fixed_mask"), "Missing fixed_mask attribute" + + print("\n✓ Greedy registrar initialized successfully") + + def test_set_modality(self, registrar_greedy) -> None: + """Test setting imaging modality.""" + registrar_greedy.set_modality("ct") + assert registrar_greedy.modality == "ct", "Modality not set correctly" + + registrar_greedy.set_modality("mr") + assert registrar_greedy.modality == "mr", "Modality change failed" + + print("\n✓ Modality setting works correctly") + + def test_set_transform_type_and_metric(self, registrar_greedy) -> None: + """Test setting transform type and metric.""" + registrar_greedy.set_transform_type("Rigid") + assert registrar_greedy.transform_type == "Rigid" + + registrar_greedy.set_transform_type("Affine") + assert registrar_greedy.transform_type == "Affine" + + registrar_greedy.set_transform_type("Deformable") + assert registrar_greedy.transform_type == "Deformable" + + registrar_greedy.set_metric("CC") + assert registrar_greedy.metric == "CC" + registrar_greedy.set_metric("Mattes") + assert registrar_greedy.metric == "Mattes" + registrar_greedy.set_metric("MeanSquares") + assert registrar_greedy.metric == "MeanSquares" + + with pytest.raises(ValueError, match="Invalid transform type"): + registrar_greedy.set_transform_type("Invalid") + with pytest.raises(ValueError, match="Invalid metric"): + registrar_greedy.set_metric("Invalid") + + print("\n✓ Transform type and metric setting work correctly") + + def test_set_fixed_image(self, registrar_greedy, test_images) -> None: + """Test setting fixed image.""" + fixed_image = test_images[0] + registrar_greedy.set_fixed_image(fixed_image) + assert registrar_greedy.fixed_image is not None, "Fixed image not set" + + print("\n✓ Fixed image set successfully") + print(f" Image size: {itk.size(registrar_greedy.fixed_image)}") + + def test_register_affine_without_mask( + self, registrar_greedy, test_images, test_directories + ) -> None: + """Test affine registration without masks.""" + output_dir = test_directories["output"] + reg_output_dir = output_dir / "registration_greedy" + reg_output_dir.mkdir(exist_ok=True) + + fixed_image = test_images[0] + moving_image = test_images[1] + + print("\nGreedy affine registration without mask...") + + registrar_greedy.set_modality("ct") + registrar_greedy.set_transform_type("Affine") + registrar_greedy.set_fixed_image(fixed_image) + + result = registrar_greedy.register(moving_image=moving_image) + + assert isinstance(result, dict), "Result should be a dictionary" + assert "inverse_transform" in result, "Missing inverse_transform in result" + assert "forward_transform" in result, "Missing forward_transform in result" + + inverse_transform = result["inverse_transform"] + forward_transform = result["forward_transform"] + + assert inverse_transform is not None, "inverse_transform is None" + assert forward_transform is not None, "forward_transform is None" + + print("✓ Greedy affine registration complete without mask") + + itk.transformwrite( + [inverse_transform], + str(reg_output_dir / "greedy_affine_inverse_no_mask.hdf"), + compression=True, + ) + itk.transformwrite( + [forward_transform], + str(reg_output_dir / "greedy_affine_forward_no_mask.hdf"), + compression=True, + ) + + def test_register_affine_with_mask( + self, registrar_greedy, test_images, test_directories + ) -> None: + """Test affine registration with binary masks.""" + output_dir = test_directories["output"] + reg_output_dir = output_dir / "registration_greedy" + reg_output_dir.mkdir(exist_ok=True) + + fixed_image = test_images[0] + moving_image = test_images[1] + + fixed_size_itk = itk.size(fixed_image) + moving_size_itk = itk.size(moving_image) + fixed_size = ( + int(fixed_size_itk[0]), + int(fixed_size_itk[1]), + int(fixed_size_itk[2]), + ) + moving_size = ( + int(moving_size_itk[0]), + int(moving_size_itk[1]), + int(moving_size_itk[2]), + ) + + fixed_mask_arr = np.zeros(fixed_size[::-1], dtype=np.uint8) + moving_mask_arr = np.zeros(moving_size[::-1], dtype=np.uint8) + fixed_mask_arr[ + fixed_size[2] // 4 : 3 * fixed_size[2] // 4, + fixed_size[1] // 4 : 3 * fixed_size[1] // 4, + fixed_size[0] // 4 : 3 * fixed_size[0] // 4, + ] = 1 + moving_mask_arr[ + moving_size[2] // 4 : 3 * moving_size[2] // 4, + moving_size[1] // 4 : 3 * moving_size[1] // 4, + moving_size[0] // 4 : 3 * moving_size[0] // 4, + ] = 1 + + fixed_mask = itk.image_from_array(fixed_mask_arr) + fixed_mask.CopyInformation(fixed_image) + moving_mask = itk.image_from_array(moving_mask_arr) + moving_mask.CopyInformation(moving_image) + + registrar_greedy.set_modality("ct") + registrar_greedy.set_transform_type("Affine") + registrar_greedy.set_fixed_image(fixed_image) + registrar_greedy.set_fixed_mask(fixed_mask) + + result = registrar_greedy.register( + moving_image=moving_image, moving_mask=moving_mask + ) + + assert isinstance(result, dict), "Result should be a dictionary" + assert result["inverse_transform"] is not None + assert result["forward_transform"] is not None + + print("✓ Greedy affine registration complete with masks") + + def test_transform_application( + self, registrar_greedy, test_images, test_directories + ) -> None: + """Test applying registration transform to moving image.""" + output_dir = test_directories["output"] + reg_output_dir = output_dir / "registration_greedy" + reg_output_dir.mkdir(exist_ok=True) + + fixed_image = test_images[0] + moving_image = test_images[1] + + registrar_greedy.set_modality("ct") + registrar_greedy.set_transform_type("Affine") + registrar_greedy.set_fixed_image(fixed_image) + result = registrar_greedy.register(moving_image=moving_image) + + forward_transform = result["forward_transform"] + transform_tools = TransformTools() + registered_image = transform_tools.transform_image( + moving_image, forward_transform, fixed_image, interpolation_method="linear" + ) + + assert registered_image is not None, "Registered image is None" + assert itk.size(registered_image) == itk.size(fixed_image), "Size mismatch" + + moving_arr = itk.array_from_image(moving_image) + registered_arr = itk.array_from_image(registered_image) + difference = np.sum( + np.abs(moving_arr.astype(float) - registered_arr.astype(float)) + ) + + print("✓ Greedy transform applied successfully") + print(f" Registered image size: {itk.size(registered_image)}") + print(f" Total difference: {difference:.2f}") + + itk.imwrite( + registered_image, + str(reg_output_dir / "greedy_registered_image.mha"), + compression=True, + ) From c640a3cb85c806508dd6005db0079ce2e119446b Mon Sep 17 00:00:00 2001 From: Stephen Aylward Date: Mon, 23 Feb 2026 10:41:44 -0500 Subject: [PATCH 02/10] DOC: Add missing member var descriptions --- tests/test_vtk_to_usd_library.py | 45 +++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/tests/test_vtk_to_usd_library.py b/tests/test_vtk_to_usd_library.py index 0617661..0470466 100644 --- a/tests/test_vtk_to_usd_library.py +++ b/tests/test_vtk_to_usd_library.py @@ -293,9 +293,13 @@ def test_single_file_conversion(self, test_directories, kcl_average_surface): # Get test data vtp_file = kcl_average_surface - # Convert to USD + # Single mesh (no split) so path is /World/Meshes/HeartSurface + settings = ConversionSettings( + separate_objects_by_connectivity=False, + separate_objects_by_cell_type=False, + ) output_usd = output_dir / "heart_surface.usd" - converter = VTKToUSDConverter() + converter = VTKToUSDConverter(settings) stage = converter.convert_file( vtp_file, output_usd, @@ -336,9 +340,13 @@ def test_conversion_with_material(self, test_directories, kcl_average_surface): metallic=0.0, ) - # Convert with material + # Single mesh so path is /World/Meshes/HeartSurface + settings = ConversionSettings( + separate_objects_by_connectivity=False, + separate_objects_by_cell_type=False, + ) output_usd = output_dir / "heart_with_material.usd" - converter = VTKToUSDConverter() + converter = VTKToUSDConverter(settings) stage = converter.convert_file( vtp_file, output_usd, @@ -369,12 +377,14 @@ def test_conversion_settings(self, test_directories, kcl_average_surface): vtp_file = kcl_average_surface - # Create custom settings + # Create custom settings (single mesh for predictable path) settings = ConversionSettings( triangulate_meshes=True, compute_normals=True, preserve_point_arrays=True, preserve_cell_arrays=True, + separate_objects_by_connectivity=False, + separate_objects_by_cell_type=False, meters_per_unit=0.001, # mm to meters up_axis="Y", ) @@ -382,7 +392,7 @@ def test_conversion_settings(self, test_directories, kcl_average_surface): # Convert with settings output_usd = output_dir / "heart_custom_settings.usd" converter = VTKToUSDConverter(settings) - stage = converter.convert_file(vtp_file, output_usd) + stage = converter.convert_file(vtp_file, output_usd, mesh_name="Mesh") # Verify stage metadata assert UsdGeom.GetStageMetersPerUnit(stage) == 0.001 @@ -404,10 +414,14 @@ def test_primvar_preservation(self, test_directories, kcl_average_surface): mesh_data = read_vtk_file(vtp_file) array_names = [arr.name for arr in mesh_data.generic_arrays] - # Convert to USD + # Single mesh so path is /World/Meshes/Mesh + settings = ConversionSettings( + separate_objects_by_connectivity=False, + separate_objects_by_cell_type=False, + ) output_usd = output_dir / "heart_with_primvars.usd" - converter = VTKToUSDConverter() - stage = converter.convert_file(vtp_file, output_usd) + converter = VTKToUSDConverter(settings) + stage = converter.convert_file(vtp_file, output_usd, mesh_name="Mesh") # Check primvars exist mesh_prim = stage.GetPrimAtPath("/World/Meshes/Mesh") @@ -441,13 +455,18 @@ def test_time_series_conversion(self, test_directories, kcl_average_surface): vtk_files = [vtp_file] * 3 time_codes = [0.0, 1.0, 2.0] - # Convert time series + # Single mesh so path is /World/Meshes/Mesh + settings = ConversionSettings( + separate_objects_by_connectivity=False, + separate_objects_by_cell_type=False, + ) output_usd = output_dir / "heart_time_series.usd" - converter = VTKToUSDConverter() + converter = VTKToUSDConverter(settings) stage = converter.convert_sequence( vtk_files=vtk_files, output_usd=output_usd, time_codes=time_codes, + mesh_name="Mesh", ) # Verify time range @@ -483,11 +502,13 @@ def test_end_to_end_conversion(self, test_directories, kcl_average_surface): vtp_file = kcl_average_surface - # Configure everything + # Configure everything (single mesh for predictable path) settings = ConversionSettings( triangulate_meshes=True, compute_normals=True, preserve_point_arrays=True, + separate_objects_by_connectivity=False, + separate_objects_by_cell_type=False, meters_per_unit=0.001, times_per_second=24.0, ) From e9cf7fd06ff1118a9a2cd7ca926d52184ea3deba Mon Sep 17 00:00:00 2001 From: Stephen Aylward Date: Wed, 25 Feb 2026 17:29:45 -0500 Subject: [PATCH 03/10] ENH: More precise control over colormap allocation --- .../convert_chop_valve_to_usd.ipynb | 47 ++++++++++++------- .../0-register_dirlab_4dct.ipynb | 2 +- .../1-make_dirlab_models.ipynb | 2 +- .../2-paint_dirlab_models.ipynb | 2 +- src/physiomotion4d/cli/convert_vtk_to_usd.py | 5 +- src/physiomotion4d/register_images_greedy.py | 25 ++++++---- .../segment_chest_total_segmentator.py | 22 ++++++++- src/physiomotion4d/usd_anatomy_tools.py | 6 +-- src/physiomotion4d/usd_tools.py | 44 +++++++++++------ src/physiomotion4d/vtk_to_usd/converter.py | 20 ++++---- src/physiomotion4d/vtk_to_usd/mesh_utils.py | 19 ++++---- .../workflow_convert_vtk_to_usd.py | 14 +----- 12 files changed, 129 insertions(+), 79 deletions(-) diff --git a/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb b/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb index 86de65a..0fbe890 100644 --- a/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb +++ b/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb @@ -33,7 +33,9 @@ "source": [ "from pathlib import Path\n", "import re\n", - "import time as time_module" + "import time as time_module\n", + "\n", + "import shutil" ] }, { @@ -89,6 +91,7 @@ "\n", "# Import USDTools for post-processing colormap\n", "from physiomotion4d.usd_tools import USDTools\n", + "from physiomotion4d.usd_anatomy_tools import USDAnatomyTools\n", "\n", "# Configure logging\n", "logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")" @@ -343,7 +346,9 @@ " mesh_name=\"TPV25Valve\",\n", " time_codes=tpv25_times,\n", " material=tpv25_material,\n", - " )" + " )\n", + "\n", + " shutil.copy(output_usd, output_usd.with_suffix(\".save.usd\"))" ] }, { @@ -356,43 +361,49 @@ " # Post-process: apply colormap visualization using USDTools\n", " if ENABLE_AUTO_COLORIZATION:\n", " usd_tools = USDTools()\n", + " usd_anatomy_tools = USDAnatomyTools(stage)\n", " if settings.separate_objects_by_connectivity is True:\n", - " mesh_path1 = \"/World/Meshes/object4\"\n", - " mesh_path2 = \"/World/Meshes/object3\"\n", + " mesh_path1 = \"/World/Meshes/TPV25Valve_object4\"\n", + " mesh_path2 = \"/World/Meshes/TPV25Valve_object3\"\n", " elif settings.separate_objects_by_cell_type is True:\n", - " mesh_path1 = \"/World/Meshes/triangle1\"\n", - " mesh_path2 = \"/World/Meshes/triangle1\"\n", + " mesh_path1 = \"/World/Meshes/TPV25Valve_triangle1\"\n", + " mesh_path2 = \"/World/Meshes/TPV25Valve_triangle1\"\n", " else:\n", " mesh_path1 = \"/World/Meshes/TPV25Valve\"\n", - " mesh_path2 = \"/World/Meshes/TPV25Valve\"\n", + " mesh_path2 = None\n", "\n", " # Inspect and select primvar for coloring\n", " primvars = usd_tools.list_mesh_primvars(str(output_usd), mesh_path1)\n", + " print(primvars)\n", " color_primvar = usd_tools.pick_color_primvar(\n", " primvars, keywords=(\"strain\", \"stress\")\n", " )\n", "\n", + " shutil.copy(output_usd.with_suffix(\".save.usd\"), Path(output_usd))\n", + "\n", " if color_primvar:\n", " print(f\"\\nApplying colormap to '{color_primvar}' using {DEFAULT_COLORMAP}\")\n", " usd_tools.apply_colormap_from_primvar(\n", " str(output_usd),\n", " mesh_path1,\n", " color_primvar,\n", + " # intensity_range=(75, 200),\n", " cmap=\"hot\",\n", + " # use_sigmoid_scale=True,\n", " bind_vertex_color_material=True,\n", " )\n", - " usd_tools.apply_colormap_from_primvar(\n", - " str(output_usd),\n", - " mesh_path2,\n", - " color_primvar,\n", - " cmap=\"gray\",\n", - " bind_vertex_color_material=True,\n", - " )\n", - " else:\n", - " print(\"\\nNo strain/stress primvar found for coloring\")\n", + " if mesh_path2 is not None:\n", + " mesh_prim = stage.GetPrimAtPath(mesh_path2)\n", + " usd_anatomy_tools.apply_anatomy_material_to_prim(\n", + " mesh_prim, usd_anatomy_tools.bone_params\n", + " )\n", "\n", - " # Read MeshData\n", - " mesh_data_sequence = [read_vtk_file(f, extract_surface=True) for f in tpv25_files]\n", + " if not validation_report[\"is_consistent\"]:\n", + " print(\n", + " f\"Warning: Found {len(validation_report['warnings'])} topology/primvar issues\"\n", + " )\n", + " if validation_report[\"topology_changes\"]:\n", + " print(\"\\nNo strain/stress primvar found for coloring\")\n", "\n", " print(f\" Size: {output_usd.stat().st_size / (1024 * 1024):.2f} MB\")\n", " print(f\" Time range: {stage.GetStartTimeCode()} - {stage.GetEndTimeCode()}\")\n", diff --git a/experiments/Lung-GatedCT_To_USD/0-register_dirlab_4dct.ipynb b/experiments/Lung-GatedCT_To_USD/0-register_dirlab_4dct.ipynb index e817549..0fce159 100644 --- a/experiments/Lung-GatedCT_To_USD/0-register_dirlab_4dct.ipynb +++ b/experiments/Lung-GatedCT_To_USD/0-register_dirlab_4dct.ipynb @@ -22,7 +22,7 @@ "heart_mask_dilation = 5\n", "\n", "case_names = DataDirLab4DCT().case_names\n", - "case_names = [case_names[0], case_names[1]]\n", + "case_names = [case_names[4]]\n", "images = range(10)\n", "# images = [1]\n", "\n", diff --git a/experiments/Lung-GatedCT_To_USD/1-make_dirlab_models.ipynb b/experiments/Lung-GatedCT_To_USD/1-make_dirlab_models.ipynb index 92ea3db..711b771 100644 --- a/experiments/Lung-GatedCT_To_USD/1-make_dirlab_models.ipynb +++ b/experiments/Lung-GatedCT_To_USD/1-make_dirlab_models.ipynb @@ -18,7 +18,7 @@ "\n", "\n", "case_names = DataDirLab4DCT().case_names\n", - "case_names = [case_names[0], case_names[1]]\n", + "case_names = [case_names[4]]\n", "\n", "base_timepoint = 30\n", "\n", diff --git a/experiments/Lung-GatedCT_To_USD/2-paint_dirlab_models.ipynb b/experiments/Lung-GatedCT_To_USD/2-paint_dirlab_models.ipynb index 67a061b..1f90253 100644 --- a/experiments/Lung-GatedCT_To_USD/2-paint_dirlab_models.ipynb +++ b/experiments/Lung-GatedCT_To_USD/2-paint_dirlab_models.ipynb @@ -16,7 +16,7 @@ "\n", "case_names = DataDirLab4DCT().case_names\n", "\n", - "case_names = [case_names[0], case_names[1]]\n", + "case_names = [case_names[4]]\n", "\n", "output_dir = \"./results\"" ] diff --git a/src/physiomotion4d/cli/convert_vtk_to_usd.py b/src/physiomotion4d/cli/convert_vtk_to_usd.py index 179c82e..382ec26 100644 --- a/src/physiomotion4d/cli/convert_vtk_to_usd.py +++ b/src/physiomotion4d/cli/convert_vtk_to_usd.py @@ -130,8 +130,9 @@ def main() -> int: ) parser.add_argument( "--color", - type=str, - metavar="R G B", + nargs=3, + type=float, + metavar=("R", "G", "B"), help="Solid color as R G B in [0,1] or [0,255] (default: 0.8 0.8 0.8). Used when --appearance solid.", ) parser.add_argument( diff --git a/src/physiomotion4d/register_images_greedy.py b/src/physiomotion4d/register_images_greedy.py index bd12bb2..f2153e6 100644 --- a/src/physiomotion4d/register_images_greedy.py +++ b/src/physiomotion4d/register_images_greedy.py @@ -48,7 +48,7 @@ class RegisterImagesGreedy(RegisterImagesBase): - Rigid and affine registration (-a -dof 6 or 12) - Deformable registration with multi-resolution (-n, -s) - Metrics: NMI, NCC, SSD (mapped from CC, Mattes, MeanSquares) - - Optional mask support (-gm) + - Optional mask support (-gm fixed, -mm moving when both provided) - SimpleITK in-memory interface via ImageTools Inherits from RegisterImagesBase: @@ -99,6 +99,10 @@ def set_transform_type(self, transform_type: str) -> None: def set_metric(self, metric: str) -> None: """Set the similarity metric (CC→NCC, Mattes→NMI, MeanSquares→SSD). + This metric is used for both affine and deformable registration stages. + Greedy recommends NCC or SSD for deformable registration; NMI works + well for affine but is less suited to deformable. + Args: metric: 'CC', 'Mattes', or 'MeanSquares'. """ @@ -188,8 +192,9 @@ def _registration_method_affine_or_rigid( "aff_out": None, } if fixed_mask_sitk is not None and moving_mask_sitk is not None: - cmd += " -gm fixed_mask" + cmd += " -gm fixed_mask -mm moving_mask" kwargs["fixed_mask"] = fixed_mask_sitk + kwargs["moving_mask"] = moving_mask_sitk if initial_affine is not None: cmd += " -ia aff_initial" kwargs["aff_initial"] = initial_affine @@ -210,27 +215,27 @@ def _registration_method_deformable( fixed_mask_sitk: Optional[Any], moving_mask_sitk: Optional[Any], iterations_str: str, + metric_str: str, initial_affine: Optional[NDArray[np.float64]] = None, ) -> tuple[Optional[NDArray[np.float64]], Any, float]: """Run Greedy deformable registration. Returns (affine 4x4 or None, warp_sitk, loss).""" Greedy3D = _try_import_greedy() g = Greedy3D() - # Optional affine init + # Optional affine init (uses configured metric) if initial_affine is None: - cmd_aff = ( - f"-i fixed moving -a -dof 6 -n {iterations_str} -m NMI -o aff_init" - ) + cmd_aff = f"-i fixed moving -a -dof 6 -n {iterations_str} -m {metric_str} -o aff_init" kwargs_aff = {"fixed": fixed_sitk, "moving": moving_sitk, "aff_init": None} if fixed_mask_sitk is not None and moving_mask_sitk is not None: - cmd_aff += " -gm fixed_mask" + cmd_aff += " -gm fixed_mask -mm moving_mask" kwargs_aff["fixed_mask"] = fixed_mask_sitk + kwargs_aff["moving_mask"] = moving_mask_sitk g.execute(cmd_aff, **kwargs_aff) initial_affine = np.array(g["aff_init"], dtype=np.float64) cmd_def = ( f"-i fixed moving -it aff_init -n {iterations_str} " - f"-m NCC 2x2x2 -s {self.deformable_smoothing} -o warp_out" + f"-m {metric_str} -s {self.deformable_smoothing} -o warp_out" ) kwargs_def = { "fixed": fixed_sitk, @@ -239,8 +244,9 @@ def _registration_method_deformable( "warp_out": None, } if fixed_mask_sitk is not None and moving_mask_sitk is not None: - cmd_def += " -gm fixed_mask" + cmd_def += " -gm fixed_mask -mm moving_mask" kwargs_def["fixed_mask"] = fixed_mask_sitk + kwargs_def["moving_mask"] = moving_mask_sitk g.execute(cmd_def, **kwargs_def) warp_out = g["warp_out"] @@ -342,6 +348,7 @@ def registration_method( fixed_mask_sitk, moving_mask_sitk, iterations_str, + metric_str, initial_affine=initial_affine, ) aff_tfm = ( diff --git a/src/physiomotion4d/segment_chest_total_segmentator.py b/src/physiomotion4d/segment_chest_total_segmentator.py index d8bb53b..14dc661 100644 --- a/src/physiomotion4d/segment_chest_total_segmentator.py +++ b/src/physiomotion4d/segment_chest_total_segmentator.py @@ -86,6 +86,7 @@ def __init__(self, log_level: int | str = logging.INFO): 60: "brachiocephalic_vein_right", 62: "superior_vena_cava", 63: "inferior_vena_cava", + 120: "lung_vessels", } self.lung_mask_ids = { @@ -94,8 +95,6 @@ def __init__(self, log_level: int | str = logging.INFO): 12: "lung_upper_lobe_right", 13: "lung_middle_lobe_right", 14: "lung_lower_lobe_right", - 15: "esophagus", - 16: "trachea", } self.bone_mask_ids = { @@ -187,6 +186,8 @@ def __init__(self, log_level: int | str = logging.INFO): 84: "gluteus_minimus_left", 85: "gluteus_minimus_right", 90: "brain", + 15: "esophagus", + 16: "trachea", 133: "soft_tissue", } @@ -241,6 +242,11 @@ def segmentation_method(self, preprocessed_image: itk.image) -> itk.image: output_nib_image2 = totalsegmentator(nib_image, task="body", device="gpu") labelmap_arr2 = output_nib_image2.get_fdata().astype(np.uint8) + output_nib_image3 = totalsegmentator( + nib_image, task="lung_vessels", device="gpu" + ) + labelmap_arr3 = output_nib_image3.get_fdata().astype(np.uint8) + # The data from nibabel is in RAS orientation with xyz axis order. # The combination logic can be performed on these numpy arrays. mask1 = labelmap_arr1 == 0 @@ -250,6 +256,18 @@ def segmentation_method(self, preprocessed_image: itk.image) -> itk.image: mask, list(self.soft_tissue_mask_ids.keys())[-1], labelmap_arr1 ) + mask3 = labelmap_arr3 == 1 + mask3_img = itk.image_from_array(mask3.astype(np.uint8)) + mask3_img.CopyInformation(preprocessed_image) + imMath = itk.TubeTK.ImageMath.New(mask3_img) + imMath.Dilate(1, 1, 0) + imMath.Erode(1, 1, 0) + mask3_img = imMath.GetOutputUChar() + mask3 = itk.array_from_image(mask3_img) + final_arr = np.where(mask3, 120, final_arr) # lung vessels + mask3 = labelmap_arr3 == 2 + final_arr = np.where(mask3, 16, final_arr) # trachea + # To create an ITK image, we save the result and read it back with # ITK. This correctly handles the coordinate system and data # layout conversions. diff --git a/src/physiomotion4d/usd_anatomy_tools.py b/src/physiomotion4d/usd_anatomy_tools.py index ea38729..79215ca 100644 --- a/src/physiomotion4d/usd_anatomy_tools.py +++ b/src/physiomotion4d/usd_anatomy_tools.py @@ -218,9 +218,9 @@ def apply_anatomy_material_to_mesh(self, mesh_path: str, anatomy_type: str) -> N raise ValueError(f"Invalid prim at path: {mesh_path}") if not prim.IsA(UsdGeom.Mesh): raise ValueError(f"Prim at {mesh_path} is not a Mesh") - self._apply_surgical_materials(prim, params) + self.apply_anatomy_material_to_prim(prim, params) - def _apply_surgical_materials( + def apply_anatomy_material_to_prim( self, prim: Any, material_params: Mapping[str, Any] ) -> None: """Corrected material application with Omniverse-specific fixes""" @@ -405,4 +405,4 @@ def enhance_meshes(self, segmentator: Any) -> None: assert anatomy_params is not None mesh_prim = UsdGeom.Mesh(prim) if mesh_prim: - self._apply_surgical_materials(prim, anatomy_params) + self.apply_anatomy_material_to_prim(prim, anatomy_params) diff --git a/src/physiomotion4d/usd_tools.py b/src/physiomotion4d/usd_tools.py index 0244c9a..ecfae15 100644 --- a/src/physiomotion4d/usd_tools.py +++ b/src/physiomotion4d/usd_tools.py @@ -721,6 +721,7 @@ def apply_colormap_from_primvar( cmap: str = "viridis", time_codes: list[float] | None = None, intensity_range: tuple[float, float] | None = None, + use_sigmoid_scale: bool = False, write_default_at_t0: bool = True, bind_vertex_color_material: bool = True, ) -> None: @@ -944,10 +945,15 @@ def apply_colormap_from_primvar( for idx, (tc, scalar) in enumerate(scalar_samples): # Normalize to [0, 1] + if vmax > vmin: normalized = (scalar - vmin) / (vmax - vmin) else: normalized = np.full_like(scalar, 0.5) + + if use_sigmoid_scale: + normalized = 1 / (1 + np.exp(-4 * normalized)) + normalized = np.clip(normalized, 0.0, 1.0) # Apply colormap @@ -1026,28 +1032,38 @@ def set_solid_display_color( points_attr = mesh.GetPointsAttr() # Resolve time codes: default only or at each sample - if time_codes is None: - time_codes = [Usd.TimeCode.Default().GetValue()] vec = Gf.Vec3f(float(color[0]), float(color[1]), float(color[2])) display_color_pv = primvars_api.CreatePrimvar( "displayColor", Sdf.ValueTypeNames.Color3fArray, UsdGeom.Tokens.vertex ) - for tc in time_codes: - # Get point count at this time - pts = points_attr.Get(Usd.TimeCode(tc)) + if time_codes is None: + # Default time: get points and set primvar without an explicit time code + pts = points_attr.Get() n_points = len(pts) if pts is not None else 0 - if n_points == 0 and tc == Usd.TimeCode.Default().GetValue(): - pts = points_attr.Get() - n_points = len(pts) if pts is not None else 0 - if n_points == 0: - continue - color_array = Vt.Vec3fArray([vec] * n_points) - if tc == Usd.TimeCode.Default().GetValue(): + if n_points > 0: + color_array = Vt.Vec3fArray([vec] * n_points) display_color_pv.Set(color_array) - else: - display_color_pv.Set(color_array, Usd.TimeCode(tc)) + else: + for tc in time_codes: + # Normalize to a Usd.TimeCode + usd_tc = tc if isinstance(tc, Usd.TimeCode) else Usd.TimeCode(tc) + + # Get point count at this time + pts = points_attr.Get(usd_tc) + n_points = len(pts) if pts is not None else 0 + if n_points == 0 and usd_tc.IsDefault(): + # Fallback: use time-independent points if default has no sample + pts = points_attr.Get() + n_points = len(pts) if pts is not None else 0 + if n_points == 0: + continue + color_array = Vt.Vec3fArray([vec] * n_points) + if usd_tc.IsDefault(): + display_color_pv.Set(color_array) + else: + display_color_pv.Set(color_array, usd_tc) if bind_vertex_color_material: self._ensure_vertex_color_material(stage, mesh_prim) diff --git a/src/physiomotion4d/vtk_to_usd/converter.py b/src/physiomotion4d/vtk_to_usd/converter.py index b26bdfc..2c6468e 100644 --- a/src/physiomotion4d/vtk_to_usd/converter.py +++ b/src/physiomotion4d/vtk_to_usd/converter.py @@ -89,13 +89,13 @@ def convert_file( # Create mesh(es) - by connectivity, by cell type, or single if self.settings.separate_objects_by_connectivity: - parts = split_mesh_data_by_connectivity(mesh_data) + parts = split_mesh_data_by_connectivity(mesh_data, mesh_name=mesh_name) for _idx, (part_data, base_name) in enumerate(parts): mesh_path = f"/World/Meshes/{base_name}" self._ensure_parent_path(mesh_path) mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) elif self.settings.separate_objects_by_cell_type: - parts = split_mesh_data_by_cell_type(mesh_data) + parts = split_mesh_data_by_cell_type(mesh_data, mesh_name=mesh_name) for idx, (part_data, base_name) in enumerate(parts): prim_name = f"{base_name}_{idx}" mesh_path = f"/World/Meshes/{prim_name}" @@ -177,7 +177,8 @@ def convert_sequence( # Create time-varying mesh(es) - by connectivity, by cell type, or single if self.settings.separate_objects_by_connectivity: parts_sequence = [ - split_mesh_data_by_connectivity(m) for m in mesh_data_sequence + split_mesh_data_by_connectivity(m, mesh_name=mesh_name) + for m in mesh_data_sequence ] n_parts = len(parts_sequence[0]) if not all(len(p) == n_parts for p in parts_sequence): @@ -201,7 +202,8 @@ def convert_sequence( ) elif self.settings.separate_objects_by_cell_type: parts_sequence = [ - split_mesh_data_by_cell_type(m) for m in mesh_data_sequence + split_mesh_data_by_cell_type(m, mesh_name=mesh_name) + for m in mesh_data_sequence ] n_parts = len(parts_sequence[0]) if not all(len(p) == n_parts for p in parts_sequence): @@ -277,13 +279,13 @@ def convert_mesh_data( # Create mesh(es) - by connectivity, by cell type, or single if self.settings.separate_objects_by_connectivity: - parts = split_mesh_data_by_connectivity(mesh_data) + parts = split_mesh_data_by_connectivity(mesh_data, mesh_name=mesh_name) for _idx, (part_data, base_name) in enumerate(parts): mesh_path = f"/World/Meshes/{base_name}" self._ensure_parent_path(mesh_path) mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) elif self.settings.separate_objects_by_cell_type: - parts = split_mesh_data_by_cell_type(mesh_data) + parts = split_mesh_data_by_cell_type(mesh_data, mesh_name=mesh_name) for idx, (part_data, base_name) in enumerate(parts): prim_name = f"{base_name}_{idx}" mesh_path = f"/World/Meshes/{prim_name}" @@ -362,7 +364,8 @@ def convert_mesh_data_sequence( # Create time-varying mesh(es) - by connectivity, by cell type, or single if self.settings.separate_objects_by_connectivity: parts_sequence = [ - split_mesh_data_by_connectivity(m) for m in mesh_data_sequence + split_mesh_data_by_connectivity(m, mesh_name=mesh_name) + for m in mesh_data_sequence ] n_parts = len(parts_sequence[0]) if not all(len(p) == n_parts for p in parts_sequence): @@ -386,7 +389,8 @@ def convert_mesh_data_sequence( ) elif self.settings.separate_objects_by_cell_type: parts_sequence = [ - split_mesh_data_by_cell_type(m) for m in mesh_data_sequence + split_mesh_data_by_cell_type(m, mesh_name=mesh_name) + for m in mesh_data_sequence ] n_parts = len(parts_sequence[0]) if not all(len(p) == n_parts for p in parts_sequence): diff --git a/src/physiomotion4d/vtk_to_usd/mesh_utils.py b/src/physiomotion4d/vtk_to_usd/mesh_utils.py index 97d3c37..ba9c835 100644 --- a/src/physiomotion4d/vtk_to_usd/mesh_utils.py +++ b/src/physiomotion4d/vtk_to_usd/mesh_utils.py @@ -13,7 +13,6 @@ from .data_structures import GenericArray, MeshData - # Map face vertex count to cell type name (matches VTK semantics: triangle=3, quad=4, tetra=4, hex=8) # For 4 we use "Quad" (surface); volume tet would also be 4 - we don't distinguish here. CELL_TYPE_NAME_BY_VERTEX_COUNT: dict[int, str] = { @@ -30,7 +29,9 @@ def cell_type_name_for_vertex_count(count: int) -> str: return CELL_TYPE_NAME_BY_VERTEX_COUNT.get(count, f"Cell_{count}") -def split_mesh_data_by_cell_type(mesh_data: MeshData) -> list[tuple[MeshData, str]]: +def split_mesh_data_by_cell_type( + mesh_data: MeshData, mesh_name: str +) -> list[tuple[MeshData, str]]: """Split MeshData into one mesh per distinct face vertex count (cell type). Each part is named by cell type (e.g. Triangle, Quad, Hexahedron). The caller @@ -58,7 +59,7 @@ def split_mesh_data_by_cell_type(mesh_data: MeshData) -> list[tuple[MeshData, st if len(unique_counts) <= 1: # Single cell type: return one mesh with that type name name = cell_type_name_for_vertex_count(int(counts[0])) if n_faces else "Mesh" - return [(mesh_data, name)] + return [(mesh_data, f"{mesh_name}_{name}")] result: list[tuple[MeshData, str]] = [] @@ -140,7 +141,7 @@ def split_mesh_data_by_cell_type(mesh_data: MeshData) -> list[tuple[MeshData, st material_id=mesh_data.material_id, ) name = cell_type_name_for_vertex_count(count) - result.append((part, name)) + result.append((part, f"{mesh_name}_{name}")) return result @@ -275,7 +276,9 @@ def _extract_mesh_part_by_face_indices( ) -def split_mesh_data_by_connectivity(mesh_data: MeshData) -> list[tuple[MeshData, str]]: +def split_mesh_data_by_connectivity( + mesh_data: MeshData, mesh_name: str +) -> list[tuple[MeshData, str]]: """Split MeshData into one mesh per connected component. A connected component is a maximal set of cells that share vertices (directly @@ -295,19 +298,19 @@ def split_mesh_data_by_connectivity(mesh_data: MeshData) -> list[tuple[MeshData, n_faces = len(counts) if n_faces == 0: - return [(mesh_data, "object1")] + return [(mesh_data, f"{mesh_name}_object1")] cum = np.concatenate([[0], np.cumsum(counts)]).astype(np.int64) component_face_lists = _connected_components_face_indices(n_faces, indices, cum) if len(component_face_lists) <= 1: - return [(mesh_data, "object1")] + return [(mesh_data, f"{mesh_name}_object1")] result: list[tuple[MeshData, str]] = [] for k, face_idxs in enumerate(component_face_lists, start=1): part = _extract_mesh_part_by_face_indices( mesh_data, face_idxs, n_points, n_faces, counts, indices, cum, points ) - result.append((part, f"object{k}")) + result.append((part, f"{mesh_name}_object{k}")) return result diff --git a/src/physiomotion4d/workflow_convert_vtk_to_usd.py b/src/physiomotion4d/workflow_convert_vtk_to_usd.py index 5b1cd43..b4cb356 100644 --- a/src/physiomotion4d/workflow_convert_vtk_to_usd.py +++ b/src/physiomotion4d/workflow_convert_vtk_to_usd.py @@ -12,7 +12,6 @@ from pathlib import Path from typing import Literal - from physiomotion4d.physiomotion4d_base import PhysioMotion4DBase from physiomotion4d.usd_anatomy_tools import USDAnatomyTools from physiomotion4d.usd_tools import USDTools @@ -146,6 +145,7 @@ def run(self) -> str: self.vtk_files, pattern=self.time_series_pattern ) time_steps = [t for t, _ in time_series] + time_codes = [float(t) for t in time_steps] paths_ordered = [p for _, p in time_series] n_frames = len(paths_ordered) @@ -182,7 +182,6 @@ def run(self) -> str: extract_surface=self.extract_surface, ) else: - time_codes = [float(t) for t in time_steps] # Optional: validate topology consistency across frames try: mesh_sequence = [ @@ -221,21 +220,12 @@ def run(self) -> str: ) if self.appearance == "solid": - time_codes_for_color: list[float] | None = None - if n_frames > 1 and stage.HasAuthoredTimeCodeRange(): - time_codes_for_color = [ - float(t) - for t in range( - int(stage.GetStartTimeCode()), - int(stage.GetEndTimeCode()) + 1, - ) - ] for mesh_path in mesh_paths: usd_tools.set_solid_display_color( str(self.output_usd), mesh_path, self.solid_color, - time_codes=time_codes_for_color, + time_codes=time_codes, bind_vertex_color_material=True, ) From e6f602fefde12c6c2899f6369120207ac78a5586 Mon Sep 17 00:00:00 2001 From: Stephen Aylward Date: Wed, 25 Feb 2026 19:00:41 -0500 Subject: [PATCH 04/10] ENH: Script converting CHOP heart parts (from simpleware) to USD --- .../convert_chop_heart_vtk_to_usd.ipynb | 73 +++++++++++++++++++ .../convert_chop_valve_to_usd.ipynb | 2 +- 2 files changed, 74 insertions(+), 1 deletion(-) create mode 100644 experiments/Convert_VTK_To_USD/convert_chop_heart_vtk_to_usd.ipynb diff --git a/experiments/Convert_VTK_To_USD/convert_chop_heart_vtk_to_usd.ipynb b/experiments/Convert_VTK_To_USD/convert_chop_heart_vtk_to_usd.ipynb new file mode 100644 index 0000000..ef64038 --- /dev/null +++ b/experiments/Convert_VTK_To_USD/convert_chop_heart_vtk_to_usd.ipynb @@ -0,0 +1,73 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "7070101e", + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "\n", + "import os\n", + "\n", + "from physiomotion4d.workflow_convert_vtk_to_usd import WorkflowConvertVTKToUSD" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e555c83a", + "metadata": {}, + "outputs": [], + "source": [ + "vtknames = [\"a\", \"la\", \"lca\", \"lv\", \"myo\", \"pa\", \"ra\", \"rv\"]\n", + "usdnames = [\n", + " \"Aorta\",\n", + " \"LeftAtrium\",\n", + " \"LeftCoronaryArtery\",\n", + " \"LeftVentricle\",\n", + " \"Myocardium\",\n", + " \"PulmonaryArtery\",\n", + " \"RightAtrium\",\n", + " \"RightVentricle\",\n", + "]\n", + "\n", + "for vtkname, usdname in zip(vtknames, usdnames):\n", + " if os.path.exists(Path.absolute(Path(f\"RVOT28-Dias-{usdname}.usd\"))):\n", + " os.remove(Path.absolute(Path(f\"RVOT28-Dias-{usdname}.usd\")))\n", + " converter = WorkflowConvertVTKToUSD(\n", + " vtk_files=[f\"../../data/CHOP-Valve4D/Simpleware/parts/{vtkname}.vtk\"],\n", + " output_usd=Path.absolute(Path(f\"RVOT28-Dias-{usdname}.usd\")),\n", + " separate_by_connectivity=False,\n", + " separate_by_cell_type=False,\n", + " mesh_name=f\"RVOT28Dias_{usdname}\",\n", + " appearance=\"anatomy\",\n", + " anatomy_type=\"heart\",\n", + " )\n", + " converter.run()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb b/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb index 0fbe890..fdf10b5 100644 --- a/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb +++ b/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb @@ -986,4 +986,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} From 0d9d083ae55ca10e50e3a3eb91b6c5dd2fa1dbef Mon Sep 17 00:00:00 2001 From: "Stephen R. Aylward" Date: Wed, 25 Feb 2026 19:54:06 -0500 Subject: [PATCH 05/10] Update src/physiomotion4d/cli/convert_vtk_to_usd.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/physiomotion4d/cli/convert_vtk_to_usd.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/physiomotion4d/cli/convert_vtk_to_usd.py b/src/physiomotion4d/cli/convert_vtk_to_usd.py index 382ec26..850334a 100644 --- a/src/physiomotion4d/cli/convert_vtk_to_usd.py +++ b/src/physiomotion4d/cli/convert_vtk_to_usd.py @@ -183,7 +183,21 @@ def main() -> int: solid_color = (0.8, 0.8, 0.8) if args.color: try: - solid_color = _parse_color(args.color) + # If argparse defined --color with nargs=3 and type=float, args.color will be a list of floats. + # Handle that case directly by normalizing into [0, 1] and forming an RGB tuple. + if isinstance(args.color, (list, tuple)): + components = [] + for v in args.color: + fv = float(v) + # If any component is > 1.0, interpret values as [0, 255] and normalize. + if fv > 1.0: + fv = fv / 255.0 + components.append(fv) + if len(components) != 3: + raise ValueError("Color must have exactly three components (R G B).") + solid_color = tuple(components) + else: + solid_color = _parse_color(args.color) except ValueError as e: print(f"Error: {e}") return 1 From 0c001ce0687a14d96b7fde39faf12a39064dfde7 Mon Sep 17 00:00:00 2001 From: "Stephen R. Aylward" Date: Wed, 25 Feb 2026 20:53:03 -0500 Subject: [PATCH 06/10] Update src/physiomotion4d/cli/convert_vtk_to_usd.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/physiomotion4d/cli/convert_vtk_to_usd.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/physiomotion4d/cli/convert_vtk_to_usd.py b/src/physiomotion4d/cli/convert_vtk_to_usd.py index 850334a..6881a74 100644 --- a/src/physiomotion4d/cli/convert_vtk_to_usd.py +++ b/src/physiomotion4d/cli/convert_vtk_to_usd.py @@ -186,16 +186,18 @@ def main() -> int: # If argparse defined --color with nargs=3 and type=float, args.color will be a list of floats. # Handle that case directly by normalizing into [0, 1] and forming an RGB tuple. if isinstance(args.color, (list, tuple)): - components = [] - for v in args.color: - fv = float(v) - # If any component is > 1.0, interpret values as [0, 255] and normalize. - if fv > 1.0: - fv = fv / 255.0 - components.append(fv) + components = [float(v) for v in args.color] if len(components) != 3: raise ValueError("Color must have exactly three components (R G B).") - solid_color = tuple(components) + # Interpret either as normalized [0, 1] or byte [0, 255] values, but do not mix scales. + if all(0.0 <= v <= 1.0 for v in components): + solid_color = tuple(components) + elif all(0.0 <= v <= 255.0 for v in components): + solid_color = tuple(v / 255.0 for v in components) + else: + raise ValueError( + "Color values must all be in [0, 1] or all in [0, 255]." + ) else: solid_color = _parse_color(args.color) except ValueError as e: From b7dacec2106b7d43ec037792ef447cfaecf02955 Mon Sep 17 00:00:00 2001 From: Stephen Aylward Date: Wed, 25 Feb 2026 21:05:14 -0500 Subject: [PATCH 07/10] ENH: Update from copilot --- .../1-register_images.ipynb | 4 +- src/physiomotion4d/register_models_pca.py | 4 +- src/physiomotion4d/usd_tools.py | 27 ++++++- src/physiomotion4d/vtk_to_usd/converter.py | 77 +++++++++++++++++++ src/physiomotion4d/vtk_to_usd/mesh_utils.py | 13 ++-- .../workflow_convert_vtk_to_usd.py | 49 ++++++++---- 6 files changed, 149 insertions(+), 25 deletions(-) diff --git a/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb b/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb index bd005cf..2825d1b 100644 --- a/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb +++ b/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb @@ -103,7 +103,7 @@ "metadata": {}, "outputs": [], "source": [ - "for i in range(0, 21, 1): # Process every 4th slice to save time testing\n", + "for i in range(0, 21, 4): # Process every 4th slice to save time testing\n", " print(f\"Processing slice {i:03d}\")\n", " moving_image = itk.imread(os.path.join(data_dir, f\"slice_{i:03d}.mha\"))\n", " result = seg.segment(moving_image, contrast_enhanced_study=True)\n", @@ -239,4 +239,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/src/physiomotion4d/register_models_pca.py b/src/physiomotion4d/register_models_pca.py index c4b65c6..0ed6977 100644 --- a/src/physiomotion4d/register_models_pca.py +++ b/src/physiomotion4d/register_models_pca.py @@ -81,7 +81,7 @@ def __init__( pca_template_model_point_subsample: int = 4, pre_pca_transform: Optional[itk.Transform] = None, fixed_distance_map: Optional[itk.Image] = None, - fixed_model: Optional[pv.UnstructuredGrid] = None, + fixed_model: Optional[pv.UnstructuredGrid | pv.PolyData] = None, reference_image: Optional[itk.Image] = None, log_level: int | str = logging.INFO, ): @@ -186,7 +186,7 @@ def from_json( pca_template_model_point_subsample: int = 4, pre_pca_transform: Optional[itk.Transform] = None, fixed_distance_map: Optional[itk.Image] = None, - fixed_model: Optional[pv.UnstructuredGrid] = None, + fixed_model: Optional[pv.UnstructuredGrid | pv.PolyData] = None, reference_image: Optional[itk.Image] = None, log_level: int | str = logging.INFO, ) -> Self: diff --git a/src/physiomotion4d/usd_tools.py b/src/physiomotion4d/usd_tools.py index ecfae15..5db9e81 100644 --- a/src/physiomotion4d/usd_tools.py +++ b/src/physiomotion4d/usd_tools.py @@ -909,7 +909,21 @@ def apply_colormap_from_primvar( # Value range: use provided intensity_range or compute from data if intensity_range is not None: - vmin, vmax = intensity_range + try: + vmin, vmax = float(intensity_range[0]), float(intensity_range[1]) + except (TypeError, IndexError) as e: + raise ValueError( + "intensity_range must be a sequence of two floats (vmin, vmax)" + ) from e + if not (np.isfinite(vmin) and np.isfinite(vmax)): + raise ValueError( + f"intensity_range values must be finite; got ({vmin}, {vmax})" + ) + if vmin >= vmax: + vmin, vmax = vmax, vmin + self.log_info( + f"intensity_range was (vmax, vmin); swapped to {vmin:.6g} to {vmax:.6g}" + ) self.log_info(f"Using specified intensity range: {vmin:.6g} to {vmax:.6g}") else: all_values = np.concatenate([s for _, s in scalar_samples]) @@ -952,7 +966,7 @@ def apply_colormap_from_primvar( normalized = np.full_like(scalar, 0.5) if use_sigmoid_scale: - normalized = 1 / (1 + np.exp(-4 * normalized)) + normalized = 1 / (1 + np.exp(-4 * (normalized - 0.5))) normalized = np.clip(normalized, 0.0, 1.0) @@ -1046,6 +1060,7 @@ def set_solid_display_color( color_array = Vt.Vec3fArray([vec] * n_points) display_color_pv.Set(color_array) else: + default_point_count: int | None = None for tc in time_codes: # Normalize to a Usd.TimeCode usd_tc = tc if isinstance(tc, Usd.TimeCode) else Usd.TimeCode(tc) @@ -1059,12 +1074,20 @@ def set_solid_display_color( n_points = len(pts) if pts is not None else 0 if n_points == 0: continue + if default_point_count is None: + default_point_count = n_points color_array = Vt.Vec3fArray([vec] * n_points) if usd_tc.IsDefault(): display_color_pv.Set(color_array) else: display_color_pv.Set(color_array, usd_tc) + # Author a default (time-independent) value so consumers that query the + # default when not time-scrubbing still see the solid color. + if default_point_count is not None: + default_color_array = Vt.Vec3fArray([vec] * default_point_count) + display_color_pv.Set(default_color_array) + if bind_vertex_color_material: self._ensure_vertex_color_material(stage, mesh_prim) if stage_path: diff --git a/src/physiomotion4d/vtk_to_usd/converter.py b/src/physiomotion4d/vtk_to_usd/converter.py index 2c6468e..f3ee070 100644 --- a/src/physiomotion4d/vtk_to_usd/converter.py +++ b/src/physiomotion4d/vtk_to_usd/converter.py @@ -112,6 +112,83 @@ def convert_file( return stage + def convert_files_static( + self, + vtk_files: Sequence[str | Path], + output_usd: str | Path, + mesh_name: str = "Mesh", + material: Optional[MaterialData] = None, + extract_surface: bool = True, + ) -> Usd.Stage: + """Convert multiple VTK files into one static USD stage (no time samples). + + All meshes from all files are added to the scene at default time. Use this + when multiple files are provided but filenames do not match a time-series + pattern, so they should be combined as a single static scene rather than + time steps. + + Args: + vtk_files: List of VTK file paths + output_usd: Path to output USD file + mesh_name: Base name for meshes (each file/part gets a unique name) + material: Optional material data. If None, uses default. + extract_surface: For .vtu files, whether to extract surface + + Returns: + Usd.Stage: Created USD stage + """ + if len(vtk_files) == 0: + raise ValueError("Empty file list") + + logger.info( + "Converting %d files to static USD (no time samples): %s", + len(vtk_files), + output_usd, + ) + + # Create USD stage once (no time range) + self._create_stage(output_usd) + stage = self.stage + mesh_converter = self.mesh_converter + material_mgr = self.material_mgr + assert stage is not None + assert mesh_converter is not None + assert material_mgr is not None + + if material is not None: + material_mgr.get_or_create_material(material) + + for file_idx, vtk_file in enumerate(vtk_files): + mesh_data = read_vtk_file(vtk_file, extract_surface=extract_surface) + if material is not None: + mesh_data.material_id = material.name + + # Unique base per file to avoid prim path collisions + file_base = f"{mesh_name}_{file_idx}" + + if self.settings.separate_objects_by_connectivity: + parts = split_mesh_data_by_connectivity(mesh_data, mesh_name=file_base) + for _idx, (part_data, base_name) in enumerate(parts): + mesh_path = f"/World/Meshes/{base_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) + elif self.settings.separate_objects_by_cell_type: + parts = split_mesh_data_by_cell_type(mesh_data, mesh_name=file_base) + for idx, (part_data, base_name) in enumerate(parts): + prim_name = f"{base_name}_{idx}" + mesh_path = f"/World/Meshes/{prim_name}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) + else: + mesh_path = f"/World/Meshes/{file_base}" + self._ensure_parent_path(mesh_path) + mesh_converter.create_mesh(mesh_data, mesh_path, bind_material=True) + + stage.Save() + logger.info(f"Saved USD file: {output_usd}") + + return stage + def convert_sequence( self, vtk_files: Sequence[str | Path], diff --git a/src/physiomotion4d/vtk_to_usd/mesh_utils.py b/src/physiomotion4d/vtk_to_usd/mesh_utils.py index ba9c835..9130b13 100644 --- a/src/physiomotion4d/vtk_to_usd/mesh_utils.py +++ b/src/physiomotion4d/vtk_to_usd/mesh_utils.py @@ -34,15 +34,17 @@ def split_mesh_data_by_cell_type( ) -> list[tuple[MeshData, str]]: """Split MeshData into one mesh per distinct face vertex count (cell type). - Each part is named by cell type (e.g. Triangle, Quad, Hexahedron). The caller - should append a unique number to form final prim names (e.g. Triangle_0, Quad_0). + Each part is named as mesh_name plus the cell type (e.g. MeshName_Triangle, + MeshName_Quad). The caller should append a unique number to form final prim + names (e.g. MeshName_Triangle_0, MeshName_Quad_0). Args: mesh_data: Single mesh that may contain mixed cell types. + mesh_name: Name of the source mesh; used as prefix in returned base_name. Returns: List of (MeshData, base_name) for each cell type present. base_name is - the cell type name (e.g. "Triangle", "Quad"). + mesh_name + "_" + cell type name (e.g. "MeshName_Triangle", "MeshName_Quad"). """ counts = np.asarray(mesh_data.face_vertex_counts, dtype=np.int32) indices = np.asarray(mesh_data.face_vertex_indices, dtype=np.int32) @@ -282,14 +284,15 @@ def split_mesh_data_by_connectivity( """Split MeshData into one mesh per connected component. A connected component is a maximal set of cells that share vertices (directly - or transitively). Components are named object1, object2, etc. + or transitively). Components are named mesh_name_object1, mesh_name_object2, etc. Args: mesh_data: Single mesh that may contain multiple disconnected parts. + mesh_name: Name of the source mesh; used as prefix in returned base_name. Returns: List of (MeshData, base_name) for each component. base_name is - "object1", "object2", ... + mesh_name + "_objectN" (e.g. "MeshName_object1", "MeshName_object2", ...). """ counts = np.asarray(mesh_data.face_vertex_counts, dtype=np.int32) indices = np.asarray(mesh_data.face_vertex_indices, dtype=np.int32) diff --git a/src/physiomotion4d/workflow_convert_vtk_to_usd.py b/src/physiomotion4d/workflow_convert_vtk_to_usd.py index b4cb356..92cdf21 100644 --- a/src/physiomotion4d/workflow_convert_vtk_to_usd.py +++ b/src/physiomotion4d/workflow_convert_vtk_to_usd.py @@ -27,7 +27,7 @@ def discover_time_series( paths: list[Path], pattern: str = r"\.t(\d+)\.(vtk|vtp|vtu)$", -) -> list[tuple[int, Path]]: +) -> tuple[list[tuple[int, Path]], bool]: """Discover and sort time-series VTK files by extracted time index. Args: @@ -35,18 +35,22 @@ def discover_time_series( pattern: Regex with one group for time step number (default matches .t123.vtk) Returns: - Sorted list of (time_step, path) tuples. If no match, returns [(0, p) for p in paths]. + (time_series, pattern_matched): Sorted list of (time_step, path) tuples, and + a flag True if at least one path matched the pattern. If no path matches, + time_series is [(0, p) for p in paths] and pattern_matched is False. """ time_series: list[tuple[int, Path]] = [] regex = re.compile(pattern, re.IGNORECASE) + pattern_matched = False for p in paths: match = regex.search(p.name) if match: time_series.append((int(match.group(1)), Path(p))) + pattern_matched = True else: time_series.append((0, Path(p))) time_series.sort(key=lambda x: (x[0], str(x[1]))) - return time_series + return time_series, pattern_matched AppearanceKind = Literal["solid", "anatomy", "colormap"] @@ -141,7 +145,7 @@ def run(self) -> str: raise ValueError("vtk_files must not be empty") # Discover time series - time_series = discover_time_series( + time_series, pattern_matched = discover_time_series( self.vtk_files, pattern=self.time_series_pattern ) time_steps = [t for t, _ in time_series] @@ -149,9 +153,16 @@ def run(self) -> str: paths_ordered = [p for _, p in time_series] n_frames = len(paths_ordered) + # Multiple files but no pattern match: treat as static scene (all at time 0, no time samples) + is_static_merge = n_frames > 1 and not pattern_matched + self.log_info("Input: %d file(s), time steps: %s", n_frames, time_steps[:5]) if n_frames > 5: self.log_info(" ... and %d more", n_frames - 5) + if is_static_merge: + self.log_info( + "Filenames do not match time-series pattern; outputting static scene (no time samples)" + ) self.log_info("Output: %s", self.output_usd) settings = ConversionSettings( @@ -163,7 +174,7 @@ def run(self) -> str: separate_objects_by_cell_type=self.separate_by_cell_type, up_axis=self.up_axis, times_per_second=self.times_per_second, - use_time_samples=True, + use_time_samples=not is_static_merge, ) converter = VTKToUSDConverter(settings) @@ -181,13 +192,22 @@ def run(self) -> str: material=default_material, extract_surface=self.extract_surface, ) + elif is_static_merge: + stage = converter.convert_files_static( + paths_ordered, + self.output_usd, + mesh_name=self.mesh_name, + material=default_material, + extract_surface=self.extract_surface, + ) else: + # Load mesh sequence once for both validation and conversion (avoids double I/O) + mesh_sequence = [ + read_vtk_file(p, extract_surface=self.extract_surface) + for p in paths_ordered + ] # Optional: validate topology consistency across frames try: - mesh_sequence = [ - read_vtk_file(p, extract_surface=self.extract_surface) - for p in paths_ordered - ] report = validate_time_series_topology(mesh_sequence) if report.get("topology_changes"): self.log_warning( @@ -197,13 +217,12 @@ def run(self) -> str: except Exception as e: self.log_debug("Time series validation skipped: %s", e) - stage = converter.convert_sequence( - paths_ordered, + stage = converter.convert_mesh_data_sequence( + mesh_sequence, self.output_usd, mesh_name=self.mesh_name, time_codes=time_codes, material=default_material, - extract_surface=self.extract_surface, ) # Post-process: apply chosen appearance to all meshes under /World/Meshes @@ -215,6 +234,9 @@ def run(self) -> str: self.log_warning("No mesh prims found under /World/Meshes") return str(self.output_usd) + # Static merge has no time samples; pass None so only default time is used + appearance_time_codes = None if is_static_merge else time_codes + self.log_info( "Applying appearance '%s' to %d mesh(es)", self.appearance, len(mesh_paths) ) @@ -225,7 +247,7 @@ def run(self) -> str: str(self.output_usd), mesh_path, self.solid_color, - time_codes=time_codes, + time_codes=appearance_time_codes, bind_vertex_color_material=True, ) @@ -249,7 +271,6 @@ def run(self) -> str: self.log_warning( "No color primvar found for %s; skip colormap", mesh_path ) - primvar = self.colormap_primvar continue self.log_info( "Applying colormap to %s from primvar %s", mesh_path, primvar From 37a40f2d2a23f696356dfa1d4c37294b55caae59 Mon Sep 17 00:00:00 2001 From: Stephen Aylward Date: Wed, 25 Feb 2026 21:24:22 -0500 Subject: [PATCH 08/10] STYLE: ruff formatting fix --- src/physiomotion4d/cli/convert_vtk_to_usd.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/physiomotion4d/cli/convert_vtk_to_usd.py b/src/physiomotion4d/cli/convert_vtk_to_usd.py index 6881a74..9276203 100644 --- a/src/physiomotion4d/cli/convert_vtk_to_usd.py +++ b/src/physiomotion4d/cli/convert_vtk_to_usd.py @@ -188,12 +188,18 @@ def main() -> int: if isinstance(args.color, (list, tuple)): components = [float(v) for v in args.color] if len(components) != 3: - raise ValueError("Color must have exactly three components (R G B).") + raise ValueError( + "Color must have exactly three components (R G B)." + ) # Interpret either as normalized [0, 1] or byte [0, 255] values, but do not mix scales. if all(0.0 <= v <= 1.0 for v in components): - solid_color = tuple(components) + solid_color = (components[0], components[1], components[2]) elif all(0.0 <= v <= 255.0 for v in components): - solid_color = tuple(v / 255.0 for v in components) + solid_color = ( + components[0] / 255.0, + components[1] / 255.0, + components[2] / 255.0, + ) else: raise ValueError( "Color values must all be in [0, 1] or all in [0, 255]." From 2813a6b233e9ff406dc25b79606beb6ff30ce04f Mon Sep 17 00:00:00 2001 From: Stephen Aylward Date: Fri, 27 Feb 2026 12:04:25 -0500 Subject: [PATCH 09/10] ENH: Copilot doc suggestions. --- .pre-commit-config.yaml | 10 + .../convert_chop_alterra_valve_to_usd.ipynb | 434 ++++++++ ... => convert_chop_tpv25_valve_to_usd.ipynb} | 0 .../1-input_meshes_to_input_surfaces.ipynb | 102 -- ...2-input_surfaces_to_surfaces_aligned.ipynb | 478 --------- .../3-registration_based_correspondence.ipynb | 948 ------------------ ...ces_aligned_correspond_to_pca_inputs.ipynb | 948 ------------------ .../5-compute_pca_model.ipynb | 102 -- .../0-download_and_convert_4d_to_3d.ipynb | 2 +- .../1-register_images.ipynb | 2 +- .../2-generate_segmentation.ipynb | 104 +- .../4-merge_dynamic_and_static_usd.ipynb | 2 +- .../test_vista3d_class.ipynb | 368 ------- .../test_vista3d_inMem.ipynb | 368 ------- .../heart_model_to_model_icp_itk.ipynb | 102 -- ...eart_model_to_model_registration_pca.ipynb | 196 ---- .../heart_model_to_patient.ipynb | 196 ---- src/physiomotion4d/register_images_greedy.py | 3 +- src/physiomotion4d/usd_tools.py | 10 +- src/physiomotion4d/vtk_to_usd/converter.py | 15 +- src/physiomotion4d/vtk_to_usd/mesh_utils.py | 2 +- .../workflow_convert_vtk_to_usd.py | 69 +- tests/test_register_images_greedy.py | 4 + utils/prepare_notebooks_for_commit.py | 28 +- 24 files changed, 528 insertions(+), 3965 deletions(-) create mode 100644 experiments/Convert_VTK_To_USD/convert_chop_alterra_valve_to_usd.ipynb rename experiments/Convert_VTK_To_USD/{convert_chop_valve_to_usd.ipynb => convert_chop_tpv25_valve_to_usd.ipynb} (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a4de808..c999f0d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,6 +27,16 @@ repos: pass_filenames: false files: ^src/ + # Strip notebook outputs and widget state when notebooks are committed + - repo: local + hooks: + - id: prepare-notebooks-for-commit + name: Prepare notebooks for commit (strip outputs and widget state) + entry: py utils/prepare_notebooks_for_commit.py + language: system + pass_filenames: false + files: \.ipynb$ + # Local tests (optional - can be disabled if too slow) - repo: local hooks: diff --git a/experiments/Convert_VTK_To_USD/convert_chop_alterra_valve_to_usd.ipynb b/experiments/Convert_VTK_To_USD/convert_chop_alterra_valve_to_usd.ipynb new file mode 100644 index 0000000..0288c61 --- /dev/null +++ b/experiments/Convert_VTK_To_USD/convert_chop_alterra_valve_to_usd.ipynb @@ -0,0 +1,434 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cardiac Valve 4D Time-Series Conversion to USD\n", + "\n", + "This notebook demonstrates converting time-varying cardiac valve simulation data from VTK format to animated USD.\n", + "\n", + "## Dataset: CHOP-Valve4D (Alterra)\n", + "\n", + "One cardiac valve model with time-varying geometry:\n", + "\n", + "- **Alterra**: 265 time steps (cardiac cycle simulation)\n", + "\n", + "This dataset represents 4D (3D + time) simulation of a prosthetic heart valve during a cardiac cycle.\n", + "\n", + "## Goals\n", + "\n", + "1. Load and inspect time-varying VTK data\n", + "2. Convert entire time series to animated USD\n", + "3. Handle large datasets efficiently\n", + "4. Preserve all simulation data as USD primvars\n", + "5. Create multiple variations (full resolution, subsampled, etc.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "import re\n", + "import time as time_module\n", + "\n", + "import shutil" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configuration\n", + "\n", + "Control which time series conversions to compute." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Configuration: Control which conversions to run\n", + "# Set to True to compute full time series (all frames) - takes longer\n", + "# Set to False to only compute subsampled time series (faster, for preview)\n", + "COMPUTE_FULL_TIME_SERIES = True # Default: only subsampled\n", + "\n", + "print(\"Time Series Configuration:\")\n", + "print(f\" - Compute Full Time Series: {COMPUTE_FULL_TIME_SERIES}\")\n", + "print(\" - Compute Subsampled Time Series: Always enabled\")\n", + "print()\n", + "if not COMPUTE_FULL_TIME_SERIES:\n", + " print(\"⚠️ Full time series conversion is DISABLED for faster execution.\")\n", + " print(\" Set COMPUTE_FULL_TIME_SERIES = True to enable full conversion.\")\n", + "else:\n", + " print(\"✓ Full time series conversion is ENABLED (this will take longer).\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "import numpy as np\n", + "\n", + "# Import the vtk_to_usd library\n", + "from physiomotion4d.vtk_to_usd import (\n", + " VTKToUSDConverter,\n", + " ConversionSettings,\n", + " MaterialData,\n", + " cell_type_name_for_vertex_count,\n", + " read_vtk_file,\n", + " validate_time_series_topology,\n", + ")\n", + "\n", + "# Import USDTools for post-processing colormap\n", + "from physiomotion4d.usd_tools import USDTools\n", + "from physiomotion4d.usd_anatomy_tools import USDAnatomyTools\n", + "\n", + "# Configure logging\n", + "logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Discover and Organize Time-Series Files" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Define data directories (Alterra only)\n", + "data_dir = Path.cwd().parent.parent / \"data\" / \"CHOP-Valve4D\"\n", + "Alterra_dir = data_dir / \"Alterra\"\n", + "output_dir = Path.cwd() / \"output\" / \"valve4d-alterra\"\n", + "output_dir.mkdir(parents=True, exist_ok=True)\n", + "\n", + "print(f\"Data directory: {data_dir}\")\n", + "print(f\"Output directory: {output_dir}\")\n", + "print(\"\\nDirectory status:\")\n", + "print(f\" Alterra: {'✓' if Alterra_dir.exists() else '✗'} {Alterra_dir}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def discover_time_series(directory, pattern=r\"\\.t(\\d+)\\.vtk$\"):\n", + " \"\"\"Discover and sort time-series VTK files.\n", + "\n", + " Args:\n", + " directory: Directory containing VTK files\n", + " pattern: Regex pattern to extract time step number\n", + "\n", + " Returns:\n", + " list: Sorted list of (time_step, file_path) tuples\n", + " \"\"\"\n", + " vtk_files = list(Path(directory).glob(\"*.vtk\"))\n", + "\n", + " # Extract time step numbers and pair with files\n", + " time_series = []\n", + " for vtk_file in vtk_files:\n", + " match = re.search(pattern, vtk_file.name)\n", + " if match:\n", + " time_step = int(match.group(1))\n", + " time_series.append((time_step, vtk_file))\n", + "\n", + " # Sort by time step\n", + " time_series.sort(key=lambda x: x[0])\n", + "\n", + " return time_series\n", + "\n", + "\n", + "# Discover Alterra time series\n", + "Alterra_series = discover_time_series(Alterra_dir)\n", + "\n", + "print(\"=\" * 60)\n", + "print(\"Time-Series Discovery (Alterra)\")\n", + "print(\"=\" * 60)\n", + "print(\"\\nAlterra:\")\n", + "print(f\" Files found: {len(Alterra_series)}\")\n", + "if Alterra_series:\n", + " print(f\" Time range: t{Alterra_series[0][0]} to t{Alterra_series[-1][0]}\")\n", + " print(f\" First file: {Alterra_series[0][1].name}\")\n", + " print(f\" Last file: {Alterra_series[-1][1].name}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Inspect First Frame\n", + "\n", + "Examine the first time step to understand the data structure." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Read first frame of Alterra\n", + "if Alterra_series:\n", + " print(\"=\" * 60)\n", + " print(\"Alterra - First Frame Analysis\")\n", + " print(\"=\" * 60)\n", + "\n", + " first_file = Alterra_series[0][1]\n", + " mesh_data = read_vtk_file(first_file, extract_surface=True)\n", + "\n", + " print(f\"\\nFile: {first_file.name}\")\n", + " print(\"\\nGeometry:\")\n", + " print(f\" Points: {len(mesh_data.points):,}\")\n", + " print(f\" Faces: {len(mesh_data.face_vertex_counts):,}\")\n", + " print(f\" Normals: {'Yes' if mesh_data.normals is not None else 'No'}\")\n", + " print(f\" Colors: {'Yes' if mesh_data.colors is not None else 'No'}\")\n", + "\n", + " # Bounding box\n", + " bbox_min = np.min(mesh_data.points, axis=0)\n", + " bbox_max = np.max(mesh_data.points, axis=0)\n", + " bbox_size = bbox_max - bbox_min\n", + " print(\"\\nBounding Box:\")\n", + " print(f\" Min: [{bbox_min[0]:.3f}, {bbox_min[1]:.3f}, {bbox_min[2]:.3f}]\")\n", + " print(f\" Max: [{bbox_max[0]:.3f}, {bbox_max[1]:.3f}, {bbox_max[2]:.3f}]\")\n", + " print(f\" Size: [{bbox_size[0]:.3f}, {bbox_size[1]:.3f}, {bbox_size[2]:.3f}]\")\n", + "\n", + " print(f\"\\nData Arrays ({len(mesh_data.generic_arrays)}):\")\n", + " for i, array in enumerate(mesh_data.generic_arrays, 1):\n", + " print(f\" {i}. {array.name}:\")\n", + " print(f\" - Type: {array.data_type.value}\")\n", + " print(f\" - Components: {array.num_components}\")\n", + " print(f\" - Interpolation: {array.interpolation}\")\n", + " print(f\" - Elements: {len(array.data):,}\")\n", + " if array.data.size > 0:\n", + " print(f\" - Range: [{np.min(array.data):.6f}, {np.max(array.data):.6f}]\")\n", + "\n", + " # Cell types (face vertex count) - TPV data has multiple cell types (triangle, quad, etc.)\n", + " unique_counts, num_each = np.unique(\n", + " mesh_data.face_vertex_counts, return_counts=True\n", + " )\n", + " print(\"\\nCell types (faces by vertex count):\")\n", + " for u, n in zip(unique_counts, num_each):\n", + " name = cell_type_name_for_vertex_count(int(u))\n", + " print(f\" {name} ({u} vertices): {n:,} faces\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Note: Helper functions removed - now using USDTools for primvar inspection and colorization\n", + "# The workflow has changed to: convert to USD first, then apply colormap post-processing\n", + "\n", + "# Configuration: choose colormap for visualization\n", + "DEFAULT_COLORMAP = \"viridis\" # matplotlib colormap name\n", + "\n", + "# Enable automatic colorization (will pick strain/stress primvars if available)\n", + "ENABLE_AUTO_COLORIZATION = True\n", + "\n", + "print(\"Colorization will be applied after USD conversion using USDTools methods\")\n", + "print(\" - USDTools.list_mesh_primvars() for inspection\")\n", + "print(\" - USDTools.pick_color_primvar() for selection\")\n", + "print(\" - USDTools.apply_colormap_from_primvar() for coloring\")\n", + "print(f\" - Colormap: {DEFAULT_COLORMAP}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## 2. Configure Conversion Settings\n", + "\n", + "# Create converter settings\n", + "settings = ConversionSettings(\n", + " triangulate_meshes=True,\n", + " compute_normals=False, # Use existing normals if available\n", + " preserve_point_arrays=True,\n", + " preserve_cell_arrays=True,\n", + " separate_objects_by_cell_type=False,\n", + " separate_objects_by_connectivity=True,\n", + " up_axis=\"Y\",\n", + " times_per_second=60.0, # 60 FPS for smooth animation\n", + " use_time_samples=True,\n", + ")\n", + "\n", + "print(\"Conversion settings configured\")\n", + "print(f\" - Triangulate: {settings.triangulate_meshes}\")\n", + "print(f\" - Separate objects by cell type: {settings.separate_objects_by_cell_type}\")\n", + "print(f\" - FPS: {settings.times_per_second}\")\n", + "print(f\" - Up axis: {settings.up_axis}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Convert Full Time Series - TPV25" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create material for Alterra\n", + "# Note: Vertex colors will be applied post-conversion by USDTools\n", + "Alterra_material = MaterialData(\n", + " name=\"Alterra_valve\",\n", + " diffuse_color=(0.85, 0.4, 0.4),\n", + " roughness=0.4,\n", + " metallic=0.0,\n", + " use_vertex_colors=False, # USDTools will bind vertex color material during colorization\n", + ")\n", + "\n", + "print(\"=\" * 60)\n", + "print(\"Converting Alterra Time Series\")\n", + "print(\"=\" * 60)\n", + "print(f\"Dataset: {len(Alterra_series)} frames\")\n", + "\n", + "# Convert Alterra (full resolution)\n", + "if COMPUTE_FULL_TIME_SERIES and Alterra_series:\n", + " converter = VTKToUSDConverter(settings)\n", + "\n", + " Alterra_files = [file_path for _, file_path in Alterra_series]\n", + " Alterra_times = [float(time_step) for time_step, _ in Alterra_series]\n", + "\n", + " output_usd = output_dir / \"Alterra_full.usd\"\n", + "\n", + " print(f\"\\nConverting to: {output_usd}\")\n", + " print(f\"Time codes: {Alterra_times[0]:.1f} to {Alterra_times[-1]:.1f}\")\n", + " print(\"\\nThis may take several minutes...\\n\")\n", + "\n", + " start_time = time_module.time()\n", + "\n", + " # Read MeshData\n", + " mesh_data_sequence = [read_vtk_file(f, extract_surface=True) for f in Alterra_files]\n", + "\n", + " # Validate topology consistency across time series\n", + " validation_report = validate_time_series_topology(\n", + " mesh_data_sequence, filenames=Alterra_files\n", + " )\n", + " if not validation_report[\"is_consistent\"]:\n", + " print(\n", + " f\"Warning: Found {len(validation_report['warnings'])} topology/primvar issues\"\n", + " )\n", + " if validation_report[\"topology_changes\"]:\n", + " print(\n", + " f\" Topology changes in {len(validation_report['topology_changes'])} frames\"\n", + " )\n", + "\n", + " # Convert to USD (preserves all primvars from VTK)\n", + " stage = converter.convert_mesh_data_sequence(\n", + " mesh_data_sequence=mesh_data_sequence,\n", + " output_usd=output_usd,\n", + " mesh_name=\"AlterraValve\",\n", + " time_codes=Alterra_times,\n", + " material=Alterra_material,\n", + " )\n", + "\n", + " shutil.copy(output_usd, output_usd.with_suffix(\".save.usd\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if COMPUTE_FULL_TIME_SERIES and Alterra_series:\n", + " # Post-process: apply colormap visualization using USDTools\n", + " if ENABLE_AUTO_COLORIZATION:\n", + " usd_tools = USDTools()\n", + " usd_anatomy_tools = USDAnatomyTools(stage)\n", + " if settings.separate_objects_by_connectivity is True:\n", + " mesh_path1 = \"/World/Meshes/AlterraValve_object4\"\n", + " mesh_path2 = \"/World/Meshes/AlterraValve_object3\"\n", + " elif settings.separate_objects_by_cell_type is True:\n", + " mesh_path1 = \"/World/Meshes/AlterraValve_triangle1\"\n", + " mesh_path2 = \"/World/Meshes/AlterraValve_triangle1\"\n", + " else:\n", + " mesh_path1 = \"/World/Meshes/AlterraValve\"\n", + " mesh_path2 = None\n", + "\n", + " # Inspect and select primvar for coloring\n", + " primvars = usd_tools.list_mesh_primvars(str(output_usd), mesh_path1)\n", + " print(primvars)\n", + " color_primvar = usd_tools.pick_color_primvar(\n", + " primvars, keywords=(\"strain\", \"stress\")\n", + " )\n", + "\n", + " if color_primvar:\n", + " print(f\"\\nApplying colormap to '{color_primvar}'\")\n", + " usd_tools.apply_colormap_from_primvar(\n", + " str(output_usd),\n", + " mesh_path1,\n", + " color_primvar,\n", + " intensity_range=(0, 300),\n", + " cmap=\"hot\",\n", + " # use_sigmoid_scale=True,\n", + " bind_vertex_color_material=True,\n", + " )\n", + " if mesh_path2 is not None:\n", + " mesh_prim = stage.GetPrimAtPath(mesh_path2)\n", + " usd_anatomy_tools.apply_anatomy_material_to_prim(\n", + " mesh_prim, usd_anatomy_tools.bone_params\n", + " )\n", + "\n", + " if not validation_report[\"is_consistent\"]:\n", + " print(\n", + " f\"Warning: Found {len(validation_report['warnings'])} topology/primvar issues\"\n", + " )\n", + " if validation_report[\"topology_changes\"]:\n", + " print(\"\\nNo strain/stress primvar found for coloring\")\n", + "\n", + " print(f\" Size: {output_usd.stat().st_size / (1024 * 1024):.2f} MB\")\n", + " print(f\" Time range: {stage.GetStartTimeCode()} - {stage.GetEndTimeCode()}\")\n", + " print(\n", + " f\" Duration: {(stage.GetEndTimeCode() - stage.GetStartTimeCode()) / settings.times_per_second:.2f} seconds @ {settings.times_per_second} FPS\"\n", + " )\n", + "elif not COMPUTE_FULL_TIME_SERIES:\n", + " print(\"⏭️ Skipping Alterra full time series (COMPUTE_FULL_TIME_SERIES = False)\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb b/experiments/Convert_VTK_To_USD/convert_chop_tpv25_valve_to_usd.ipynb similarity index 100% rename from experiments/Convert_VTK_To_USD/convert_chop_valve_to_usd.ipynb rename to experiments/Convert_VTK_To_USD/convert_chop_tpv25_valve_to_usd.ipynb diff --git a/experiments/Heart-Create_Statistical_Model/1-input_meshes_to_input_surfaces.ipynb b/experiments/Heart-Create_Statistical_Model/1-input_meshes_to_input_surfaces.ipynb index 3870e17..b2d4dd0 100644 --- a/experiments/Heart-Create_Statistical_Model/1-input_meshes_to_input_surfaces.ipynb +++ b/experiments/Heart-Create_Statistical_Model/1-input_meshes_to_input_surfaces.ipynb @@ -127,108 +127,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "72a09f7280cb459e8c5264ddb14bef6d": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "a9362a50103a4743a2cff8555a53d5b2": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "ee42883b11f847a38b6393cc311ba765": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_72a09f7280cb459e8c5264ddb14bef6d", - "placeholder": "​", - "style": "IPY_MODEL_a9362a50103a4743a2cff8555a53d5b2", - "tabbable": null, - "tooltip": null, - "value": "" - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/experiments/Heart-Create_Statistical_Model/2-input_surfaces_to_surfaces_aligned.ipynb b/experiments/Heart-Create_Statistical_Model/2-input_surfaces_to_surfaces_aligned.ipynb index 4a3abc4..ba16248 100644 --- a/experiments/Heart-Create_Statistical_Model/2-input_surfaces_to_surfaces_aligned.ipynb +++ b/experiments/Heart-Create_Statistical_Model/2-input_surfaces_to_surfaces_aligned.ipynb @@ -364,484 +364,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "2a107b02312643d3a6c08bebd65c7dee": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "2db1769de6ee4cefa789c1dad5f4b788": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "34b2817742c34451817083f7686ea0c2": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "54c874f02b5d4095b217608ffcdce7d1": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_34b2817742c34451817083f7686ea0c2", - "placeholder": "​", - "style": "IPY_MODEL_b3b7d00ef3734c00b6fd21f6f0d2ad6f", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "66c14b67ca204e03acd91edf9fe8ca17": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "67eca292a6a64066bd8bbd7a00d2399d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "6a158880e2db43b791507435a478253d": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6ae99451028840d99431c3bae3422a85": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "76b28ad52f984e75876c9d6d3629dd22": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_2a107b02312643d3a6c08bebd65c7dee", - "placeholder": "​", - "style": "IPY_MODEL_2db1769de6ee4cefa789c1dad5f4b788", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "7c479dadb5b84f36b2abc1360ef51fe3": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_6ae99451028840d99431c3bae3422a85", - "placeholder": "​", - "style": "IPY_MODEL_899ecec8ed4f4aa8916299a0cc531f94", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "850d698d2b40403abf91193e075c4715": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_66c14b67ca204e03acd91edf9fe8ca17", - "placeholder": "​", - "style": "IPY_MODEL_cd546a9f51664524a2193744cf5d65ef", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "899ecec8ed4f4aa8916299a0cc531f94": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "b3b7d00ef3734c00b6fd21f6f0d2ad6f": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "cd546a9f51664524a2193744cf5d65ef": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "e59c15679df4488cb6fe077abd8303f3": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_6a158880e2db43b791507435a478253d", - "placeholder": "​", - "style": "IPY_MODEL_67eca292a6a64066bd8bbd7a00d2399d", - "tabbable": null, - "tooltip": null, - "value": "" - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/experiments/Heart-Create_Statistical_Model/3-registration_based_correspondence.ipynb b/experiments/Heart-Create_Statistical_Model/3-registration_based_correspondence.ipynb index 1c4744e..c3d40e1 100644 --- a/experiments/Heart-Create_Statistical_Model/3-registration_based_correspondence.ipynb +++ b/experiments/Heart-Create_Statistical_Model/3-registration_based_correspondence.ipynb @@ -440,954 +440,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "018c352e2d5349649c311a057774deb6": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "0fddf96d6c284d279cbae4de8316eea4": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_16e9aa253b3d4e07bfcc413ac296b468", - "placeholder": "​", - "style": "IPY_MODEL_1c1ac46562b24327806024e1a52deea7", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "16e9aa253b3d4e07bfcc413ac296b468": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "176d607211c64bb59293d625dabf7ecf": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_7e4d3413b386453dae3a87b18343e086", - "placeholder": "​", - "style": "IPY_MODEL_d7862727680e4086b942352750bb5294", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "1a82facb441a4aaaae306eae2752328b": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "1b9c4c56549444788790b08f39b40b57": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "1c1ac46562b24327806024e1a52deea7": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "3d09821a09c84aebad7e952bdefb689a": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "481094da8b854776a78c14b1f878e732": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "59f04642582340d4959c3429f869a138": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "79c31a6452a245298089ccc5dc730437": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_3d09821a09c84aebad7e952bdefb689a", - "placeholder": "​", - "style": "IPY_MODEL_deaf74c1b0ab4b6da6bc6d5c9f6e1f84", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "7b06fda25e314054ac5b538c5c863475": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "7e4d3413b386453dae3a87b18343e086": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "83dbc3b7bf3f423ba6ba1e6abee3f5b1": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_daeeca63c77c41fea4e94cefe429d6b8", - "placeholder": "​", - "style": "IPY_MODEL_a05f27fc17954d3696121483dc7f5c72", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "87389b779d9e4f05893279aee32bd9b4": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_af9f2b0a15a044509e0d729a04fc0be8", - "placeholder": "​", - "style": "IPY_MODEL_481094da8b854776a78c14b1f878e732", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "8dd90efd5e934cd1a0dda189cb453d18": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "9380b4ba890545799d681a78dfd13773": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_ea1f758ec83240f2ae7a55db3e93e8d1", - "placeholder": "​", - "style": "IPY_MODEL_1b9c4c56549444788790b08f39b40b57", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "9d5cd4605b7543ee92f3ae16af22c1a6": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "a05f27fc17954d3696121483dc7f5c72": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "a445f0eda6964f5b831717c19242a355": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "a5445f650a6949e0ae48fcc7c583d819": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_7b06fda25e314054ac5b538c5c863475", - "placeholder": "​", - "style": "IPY_MODEL_1a82facb441a4aaaae306eae2752328b", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "af9f2b0a15a044509e0d729a04fc0be8": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "b2bcfe9e296b4c679fa5b6d9220f195d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_9d5cd4605b7543ee92f3ae16af22c1a6", - "placeholder": "​", - "style": "IPY_MODEL_8dd90efd5e934cd1a0dda189cb453d18", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "d56f11c416e34c48bae0d3a4bb68ab17": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "d7862727680e4086b942352750bb5294": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "daeeca63c77c41fea4e94cefe429d6b8": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "deaf74c1b0ab4b6da6bc6d5c9f6e1f84": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "e769edda54e242209e42179b4344f006": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_018c352e2d5349649c311a057774deb6", - "placeholder": "​", - "style": "IPY_MODEL_59f04642582340d4959c3429f869a138", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "ea1f758ec83240f2ae7a55db3e93e8d1": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "f85a768680874ababb7839b68449fc64": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_a445f0eda6964f5b831717c19242a355", - "placeholder": "​", - "style": "IPY_MODEL_d56f11c416e34c48bae0d3a4bb68ab17", - "tabbable": null, - "tooltip": null, - "value": "" - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/experiments/Heart-Create_Statistical_Model/4-surfaces_aligned_correspond_to_pca_inputs.ipynb b/experiments/Heart-Create_Statistical_Model/4-surfaces_aligned_correspond_to_pca_inputs.ipynb index 7128e69..35d7a75 100644 --- a/experiments/Heart-Create_Statistical_Model/4-surfaces_aligned_correspond_to_pca_inputs.ipynb +++ b/experiments/Heart-Create_Statistical_Model/4-surfaces_aligned_correspond_to_pca_inputs.ipynb @@ -238,954 +238,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "09011b3e4de8408caf8817f3373bf33d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_a18362b33d9646ca83f93eacad94f01b", - "placeholder": "​", - "style": "IPY_MODEL_6683598b066640a19fd5be7e964e01af", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "0b2bfa9cf9c9485fac8654dcce8f415e": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "183b078f89b44958ac0eadd48862e24c": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "2040914057aa4b15bc49e734809f0ca4": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "3992d1518a7349d2910380c3f37f1fcb": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "4723aecc20fc40f7991a2fac4b8a2502": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "4bf1b9a4cf344ab0b7dd06584aa3cc97": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_3992d1518a7349d2910380c3f37f1fcb", - "placeholder": "​", - "style": "IPY_MODEL_677b1b08fdb04a1195544c2aecc5a104", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "4ca7dc14537d491eb1f5be5c443e587b": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "4d591bfcf12748f7b84b710eb2c0b7fb": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_4ca7dc14537d491eb1f5be5c443e587b", - "placeholder": "​", - "style": "IPY_MODEL_9704bba2e1b24cffb6a4c201995ecf6e", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "50f69e49dcd84c92bdee52c50a28271b": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_89334229de5d4987892626fb6c7d6c8c", - "placeholder": "​", - "style": "IPY_MODEL_4723aecc20fc40f7991a2fac4b8a2502", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "6683598b066640a19fd5be7e964e01af": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "677b1b08fdb04a1195544c2aecc5a104": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "69b6db60c32243d5a11fee09e66dded5": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "72d9b3a829554ccbbb07c8f01f2c43d6": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_2040914057aa4b15bc49e734809f0ca4", - "placeholder": "​", - "style": "IPY_MODEL_b8fb9072599240dabe556275c4aa37c3", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "784f8df43d46452d9c01d2388eb630bd": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_aedfedf1d8634a46a5a80ac1cbaf8e89", - "placeholder": "​", - "style": "IPY_MODEL_183b078f89b44958ac0eadd48862e24c", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "8507d1ef8dbd4412974d69779372e0eb": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "89334229de5d4987892626fb6c7d6c8c": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "8b6f908be4b44f6983e324eb2e5fe1a0": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_d2bab64465b54ba781241fdb78910ee2", - "placeholder": "​", - "style": "IPY_MODEL_8ca28eae1a654520872bc71a63f313bc", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "8ca28eae1a654520872bc71a63f313bc": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "9704bba2e1b24cffb6a4c201995ecf6e": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "a18362b33d9646ca83f93eacad94f01b": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "a1a787c893b747179561fd0a458b1bbf": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_b02fd007f7894f28b2fc0e3a1a6f932d", - "placeholder": "​", - "style": "IPY_MODEL_0b2bfa9cf9c9485fac8654dcce8f415e", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "aedfedf1d8634a46a5a80ac1cbaf8e89": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "b02fd007f7894f28b2fc0e3a1a6f932d": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "b79178e3bcc04d4d8ddfb9b533fafbda": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "b8fb9072599240dabe556275c4aa37c3": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "d2bab64465b54ba781241fdb78910ee2": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "d9f21a58fab4469a96adf33ab447a593": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_b79178e3bcc04d4d8ddfb9b533fafbda", - "placeholder": "​", - "style": "IPY_MODEL_ff9b72e9f3a74bde86061bcd1c95d53c", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "df79b28641344a59a048345294ee6ef7": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_69b6db60c32243d5a11fee09e66dded5", - "placeholder": "​", - "style": "IPY_MODEL_8507d1ef8dbd4412974d69779372e0eb", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "ff9b72e9f3a74bde86061bcd1c95d53c": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/experiments/Heart-Create_Statistical_Model/5-compute_pca_model.ipynb b/experiments/Heart-Create_Statistical_Model/5-compute_pca_model.ipynb index f0de0ae..f51079b 100644 --- a/experiments/Heart-Create_Statistical_Model/5-compute_pca_model.ipynb +++ b/experiments/Heart-Create_Statistical_Model/5-compute_pca_model.ipynb @@ -399,108 +399,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "33a81680252c47f09d3c1d8a201b5366": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "5e04c957f11e4ef182625871893ca05f": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "629f7dd9dad0462988d24c738382db4c": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_5e04c957f11e4ef182625871893ca05f", - "placeholder": "​", - "style": "IPY_MODEL_33a81680252c47f09d3c1d8a201b5366", - "tabbable": null, - "tooltip": null, - "value": "" - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/experiments/Heart-GatedCT_To_USD/0-download_and_convert_4d_to_3d.ipynb b/experiments/Heart-GatedCT_To_USD/0-download_and_convert_4d_to_3d.ipynb index 1f67df8..62986a2 100644 --- a/experiments/Heart-GatedCT_To_USD/0-download_and_convert_4d_to_3d.ipynb +++ b/experiments/Heart-GatedCT_To_USD/0-download_and_convert_4d_to_3d.ipynb @@ -78,4 +78,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb b/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb index 2825d1b..8ff96ff 100644 --- a/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb +++ b/experiments/Heart-GatedCT_To_USD/1-register_images.ipynb @@ -239,4 +239,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/experiments/Heart-GatedCT_To_USD/2-generate_segmentation.ipynb b/experiments/Heart-GatedCT_To_USD/2-generate_segmentation.ipynb index 3570e57..26bbf13 100644 --- a/experiments/Heart-GatedCT_To_USD/2-generate_segmentation.ipynb +++ b/experiments/Heart-GatedCT_To_USD/2-generate_segmentation.ipynb @@ -233,110 +233,8 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "41ca3cd40ea349ccb2bdac69a63fbd72": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "84acfe401b6241e19089c899f6d1fc80": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "ad8c6e3abb014a5396fa2c5bc9442f6b": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_84acfe401b6241e19089c899f6d1fc80", - "placeholder": "​", - "style": "IPY_MODEL_41ca3cd40ea349ccb2bdac69a63fbd72", - "tabbable": null, - "tooltip": null, - "value": "" - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/experiments/Heart-GatedCT_To_USD/4-merge_dynamic_and_static_usd.ipynb b/experiments/Heart-GatedCT_To_USD/4-merge_dynamic_and_static_usd.ipynb index 4f1fd2b..557eb5d 100644 --- a/experiments/Heart-GatedCT_To_USD/4-merge_dynamic_and_static_usd.ipynb +++ b/experiments/Heart-GatedCT_To_USD/4-merge_dynamic_and_static_usd.ipynb @@ -66,4 +66,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/experiments/Heart-GatedCT_To_USD/test_vista3d_class.ipynb b/experiments/Heart-GatedCT_To_USD/test_vista3d_class.ipynb index 90e2c78..8bbd2f3 100644 --- a/experiments/Heart-GatedCT_To_USD/test_vista3d_class.ipynb +++ b/experiments/Heart-GatedCT_To_USD/test_vista3d_class.ipynb @@ -60,374 +60,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "1c1229349da843d2b975f78ff9d4f2e4": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "377bafd18157426e818c12183bd3640a": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_d38a6f378345485489cd183f277650cd", - "placeholder": "​", - "style": "IPY_MODEL_1c1229349da843d2b975f78ff9d4f2e4", - "tabbable": null, - "tooltip": null, - "value": " 18/18 [00:00<00:00, 1060.61it/s]" - } - }, - "60f7ecaaf0c94c73ae23dbca80371267": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6e47567392a446feab3407f8307df492": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_d2882c0f64334f1ca9b8b9253d0a49af", - "placeholder": "​", - "style": "IPY_MODEL_8beff594f02e48869662da2fcdfb6b5b", - "tabbable": null, - "tooltip": null, - "value": "Fetching 18 files: 100%" - } - }, - "8beff594f02e48869662da2fcdfb6b5b": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "94c791ac7bc54c62937e61046397b7ce": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_6e47567392a446feab3407f8307df492", - "IPY_MODEL_b73b5ff60bb340c387c94c09e97ff2da", - "IPY_MODEL_377bafd18157426e818c12183bd3640a" - ], - "layout": "IPY_MODEL_d6815ee70fd94a4dbd8edab0780d9d4d", - "tabbable": null, - "tooltip": null - } - }, - "b73b5ff60bb340c387c94c09e97ff2da": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_60f7ecaaf0c94c73ae23dbca80371267", - "max": 18.0, - "min": 0.0, - "orientation": "horizontal", - "style": "IPY_MODEL_d8bcc2331fd44c1683b920d2a02a9890", - "tabbable": null, - "tooltip": null, - "value": 18.0 - } - }, - "d2882c0f64334f1ca9b8b9253d0a49af": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "d38a6f378345485489cd183f277650cd": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "d6815ee70fd94a4dbd8edab0780d9d4d": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "d8bcc2331fd44c1683b920d2a02a9890": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/experiments/Heart-GatedCT_To_USD/test_vista3d_inMem.ipynb b/experiments/Heart-GatedCT_To_USD/test_vista3d_inMem.ipynb index 27b19a9..37e1bb5 100644 --- a/experiments/Heart-GatedCT_To_USD/test_vista3d_inMem.ipynb +++ b/experiments/Heart-GatedCT_To_USD/test_vista3d_inMem.ipynb @@ -210,374 +210,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "0a0f0af3362d4ade93da7f9e544b305c": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "3fbd6b7fdba448b9a6b0b03d1a9a0761": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "417bc37263654fcf87c6cbeb2b58ccc0": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "4b2ae6dee4c8407e982655672140a37f": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "56676a31f09f483881c71cf2c84fb750": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_4b2ae6dee4c8407e982655672140a37f", - "max": 22.0, - "min": 0.0, - "orientation": "horizontal", - "style": "IPY_MODEL_939c13767a2f4524aa09575c646a6958", - "tabbable": null, - "tooltip": null, - "value": 22.0 - } - }, - "58479b6d1d744044a2bf527e17dbc8e6": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_0a0f0af3362d4ade93da7f9e544b305c", - "placeholder": "​", - "style": "IPY_MODEL_9d2301c320504fcd98f599ee652d8215", - "tabbable": null, - "tooltip": null, - "value": "Fetching 22 files: 100%" - } - }, - "640ec13b532c44779310caae20047b53": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_3fbd6b7fdba448b9a6b0b03d1a9a0761", - "placeholder": "​", - "style": "IPY_MODEL_417bc37263654fcf87c6cbeb2b58ccc0", - "tabbable": null, - "tooltip": null, - "value": " 22/22 [00:00<00:00, 981.30it/s]" - } - }, - "939c13767a2f4524aa09575c646a6958": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "9d2301c320504fcd98f599ee652d8215": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "b67e8a0959be4ad58157a63c7fb3efad": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "d1c314d78d2141ff8df3c80315686077": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_58479b6d1d744044a2bf527e17dbc8e6", - "IPY_MODEL_56676a31f09f483881c71cf2c84fb750", - "IPY_MODEL_640ec13b532c44779310caae20047b53" - ], - "layout": "IPY_MODEL_b67e8a0959be4ad58157a63c7fb3efad", - "tabbable": null, - "tooltip": null - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_model_icp_itk.ipynb b/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_model_icp_itk.ipynb index d5fed14..7aa5834 100644 --- a/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_model_icp_itk.ipynb +++ b/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_model_icp_itk.ipynb @@ -309,108 +309,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "0d689fed3b4741d5accd0ee829af7971": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_916525245cf248d28a6bb67a181895cc", - "placeholder": "​", - "style": "IPY_MODEL_c16b7cc9f3bb441d8db907712ce92e56", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "916525245cf248d28a6bb67a181895cc": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c16b7cc9f3bb441d8db907712ce92e56": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_model_registration_pca.ipynb b/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_model_registration_pca.ipynb index d3dcf88..5a6be86 100644 --- a/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_model_registration_pca.ipynb +++ b/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_model_registration_pca.ipynb @@ -548,202 +548,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "12ebef1bb4134652baada92b4bf41e65": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_d13b70b19456415a92798dc847c57042", - "placeholder": "​", - "style": "IPY_MODEL_8a5ce0cea571455f908701b9b74e615b", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "8a5ce0cea571455f908701b9b74e615b": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "aa7837c5f1364c6c9086d42707ea1ac5": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "b493ad90f44d494ebad4e99da0e2eb5f": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_aa7837c5f1364c6c9086d42707ea1ac5", - "placeholder": "​", - "style": "IPY_MODEL_c21c90fea7bb4fa490ee1b0ca2073d14", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "c21c90fea7bb4fa490ee1b0ca2073d14": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "d13b70b19456415a92798dc847c57042": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_patient.ipynb b/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_patient.ipynb index 77394b5..ae35b98 100644 --- a/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_patient.ipynb +++ b/experiments/Heart-Statistical_Model_To_Patient/heart_model_to_patient.ipynb @@ -421,202 +421,6 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "state": { - "1b91c7d4ece941f88ca78d57ef9733f1": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "4bd0b4f38e7c4d25979155d254e3275e": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - }, - "5ca347e68fad49a9bf19ba3c411ab75b": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_1b91c7d4ece941f88ca78d57ef9733f1", - "placeholder": "​", - "style": "IPY_MODEL_cf5183cea96a4f0a8976a8c015708585", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "a5f282186aa4450f8e9548416359f2ff": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", - "_view_name": "HTMLView", - "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_bd38cbf7c3e840a3b2d24f1339732f78", - "placeholder": "​", - "style": "IPY_MODEL_4bd0b4f38e7c4d25979155d254e3275e", - "tabbable": null, - "tooltip": null, - "value": "" - } - }, - "bd38cbf7c3e840a3b2d24f1339732f78": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "2.0.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "cf5183cea96a4f0a8976a8c015708585": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "2.0.0", - "model_name": "HTMLStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", - "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null - } - } - }, - "version_major": 2, - "version_minor": 0 - } } }, "nbformat": 4, diff --git a/src/physiomotion4d/register_images_greedy.py b/src/physiomotion4d/register_images_greedy.py index f2153e6..8b3813a 100644 --- a/src/physiomotion4d/register_images_greedy.py +++ b/src/physiomotion4d/register_images_greedy.py @@ -290,7 +290,6 @@ def registration_method( # Optional initial transform: convert ITK -> 4x4 for Greedy initial_affine: Optional[NDArray[np.float64]] = None if initial_forward_transform is not None: - transform_tools = TransformTools() # If it's affine-like, extract 4x4; else convert to displacement and skip for Greedy init if hasattr(initial_forward_transform, "GetMatrix"): M = np.eye(4, dtype=np.float64) @@ -371,7 +370,7 @@ def registration_method( ) disp_tfm = itk.DisplacementFieldTransform[itk.D, 3].New() disp_tfm.SetDisplacementField(disp_itk) - # Forward = warp then affine (moving -> fixed: first affine then deformable in Greedy) + # Forward = moving -> fixed: first affine then deformable in Greedy forward_composite = itk.CompositeTransform[itk.D, 3].New() if aff_tfm is not None: forward_composite.AddTransform(aff_tfm) diff --git a/src/physiomotion4d/usd_tools.py b/src/physiomotion4d/usd_tools.py index 5db9e81..c308d27 100644 --- a/src/physiomotion4d/usd_tools.py +++ b/src/physiomotion4d/usd_tools.py @@ -749,7 +749,9 @@ def apply_colormap_from_primvar( source_primvar: Name of primvar to visualize (e.g., "vtk_cell_stress") cmap: Matplotlib colormap name (default: "viridis") time_codes: List of time codes to process. If None, uses stage time range. - intensity_range: Optional (vmin, vmax) for colormap. If None, computed from data. + intensity_range: Optional (vmin, vmax) for colormap. If None, computed from + data. + use_sigmoid_scale: If True, use sigmoid scale for colormap normalization. write_default_at_t0: If True, also write default value at t=0 bind_vertex_color_material: If True, create/bind material using displayColor @@ -1051,6 +1053,10 @@ def set_solid_display_color( display_color_pv = primvars_api.CreatePrimvar( "displayColor", Sdf.ValueTypeNames.Color3fArray, UsdGeom.Tokens.vertex ) + display_color_attr = display_color_pv.GetAttr() + # Clear any existing authored default and time samples to avoid stale colors + if display_color_attr: + display_color_attr.Clear() if time_codes is None: # Default time: get points and set primvar without an explicit time code @@ -1086,7 +1092,7 @@ def set_solid_display_color( # default when not time-scrubbing still see the solid color. if default_point_count is not None: default_color_array = Vt.Vec3fArray([vec] * default_point_count) - display_color_pv.Set(default_color_array) + display_color_pv.Set(default_color_array) # , Usd.TimeCode.Default()) if bind_vertex_color_material: self._ensure_vertex_color_material(stage, mesh_prim) diff --git a/src/physiomotion4d/vtk_to_usd/converter.py b/src/physiomotion4d/vtk_to_usd/converter.py index f3ee070..f262b2e 100644 --- a/src/physiomotion4d/vtk_to_usd/converter.py +++ b/src/physiomotion4d/vtk_to_usd/converter.py @@ -97,8 +97,7 @@ def convert_file( elif self.settings.separate_objects_by_cell_type: parts = split_mesh_data_by_cell_type(mesh_data, mesh_name=mesh_name) for idx, (part_data, base_name) in enumerate(parts): - prim_name = f"{base_name}_{idx}" - mesh_path = f"/World/Meshes/{prim_name}" + mesh_path = f"/World/Meshes/{base_name}" self._ensure_parent_path(mesh_path) mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) else: @@ -175,8 +174,7 @@ def convert_files_static( elif self.settings.separate_objects_by_cell_type: parts = split_mesh_data_by_cell_type(mesh_data, mesh_name=file_base) for idx, (part_data, base_name) in enumerate(parts): - prim_name = f"{base_name}_{idx}" - mesh_path = f"/World/Meshes/{prim_name}" + mesh_path = f"/World/Meshes/{base_name}" self._ensure_parent_path(mesh_path) mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) else: @@ -297,8 +295,7 @@ def convert_sequence( for part_idx in range(n_parts): part_sequence = [p[part_idx][0] for p in parts_sequence] base_name = parts_sequence[0][part_idx][1] - prim_name = f"{base_name}_{part_idx}" - mesh_path = f"/World/Meshes/{prim_name}" + mesh_path = f"/World/Meshes/{base_name}" self._ensure_parent_path(mesh_path) mesh_converter.create_time_varying_mesh( part_sequence, mesh_path, time_codes, bind_material=True @@ -364,8 +361,7 @@ def convert_mesh_data( elif self.settings.separate_objects_by_cell_type: parts = split_mesh_data_by_cell_type(mesh_data, mesh_name=mesh_name) for idx, (part_data, base_name) in enumerate(parts): - prim_name = f"{base_name}_{idx}" - mesh_path = f"/World/Meshes/{prim_name}" + mesh_path = f"/World/Meshes/{base_name}" self._ensure_parent_path(mesh_path) mesh_converter.create_mesh(part_data, mesh_path, bind_material=True) else: @@ -484,8 +480,7 @@ def convert_mesh_data_sequence( for part_idx in range(n_parts): part_sequence = [p[part_idx][0] for p in parts_sequence] base_name = parts_sequence[0][part_idx][1] - prim_name = f"{base_name}_{part_idx}" - mesh_path = f"/World/Meshes/{prim_name}" + mesh_path = f"/World/Meshes/{base_name}" self._ensure_parent_path(mesh_path) mesh_converter.create_time_varying_mesh( part_sequence, mesh_path, time_codes, bind_material=True diff --git a/src/physiomotion4d/vtk_to_usd/mesh_utils.py b/src/physiomotion4d/vtk_to_usd/mesh_utils.py index 9130b13..3db7f5e 100644 --- a/src/physiomotion4d/vtk_to_usd/mesh_utils.py +++ b/src/physiomotion4d/vtk_to_usd/mesh_utils.py @@ -53,7 +53,7 @@ def split_mesh_data_by_cell_type( n_faces = len(counts) if n_faces == 0: - return [(mesh_data, "Mesh")] + return [(mesh_data, f"{mesh_name}_Empty")] cum = np.concatenate([[0], np.cumsum(counts)]).astype(np.int64) diff --git a/src/physiomotion4d/workflow_convert_vtk_to_usd.py b/src/physiomotion4d/workflow_convert_vtk_to_usd.py index 92cdf21..56b7600 100644 --- a/src/physiomotion4d/workflow_convert_vtk_to_usd.py +++ b/src/physiomotion4d/workflow_convert_vtk_to_usd.py @@ -23,36 +23,6 @@ validate_time_series_topology, ) - -def discover_time_series( - paths: list[Path], - pattern: str = r"\.t(\d+)\.(vtk|vtp|vtu)$", -) -> tuple[list[tuple[int, Path]], bool]: - """Discover and sort time-series VTK files by extracted time index. - - Args: - paths: List of paths to VTK files - pattern: Regex with one group for time step number (default matches .t123.vtk) - - Returns: - (time_series, pattern_matched): Sorted list of (time_step, path) tuples, and - a flag True if at least one path matched the pattern. If no path matches, - time_series is [(0, p) for p in paths] and pattern_matched is False. - """ - time_series: list[tuple[int, Path]] = [] - regex = re.compile(pattern, re.IGNORECASE) - pattern_matched = False - for p in paths: - match = regex.search(p.name) - if match: - time_series.append((int(match.group(1)), Path(p))) - pattern_matched = True - else: - time_series.append((0, Path(p))) - time_series.sort(key=lambda x: (x[0], str(x[1]))) - return time_series, pattern_matched - - AppearanceKind = Literal["solid", "anatomy", "colormap"] @@ -132,6 +102,42 @@ def __init__( "separate_by_connectivity and separate_by_cell_type cannot both be True" ) + def discover_time_series( + self, + paths: list[Path], + pattern: str = r"\.t(\d+)\.(vtk|vtp|vtu)$", + ) -> tuple[list[tuple[int, Path]], bool]: + """Discover and sort time-series VTK files by extracted time index. + + Args: + paths: List of paths to VTK files + pattern: Regex with one group for time step number (default matches .t123.vtk) + + Returns: + (time_series, pattern_matched): Sorted list of (time_step, path) tuples, and + a flag True only when every path matched the pattern (true time series). + If any path does not match, time_series is [(0, p) for p in paths] and + pattern_matched is False (static merge). + """ + regex = re.compile(pattern, re.IGNORECASE) + invalid_series = False + parsed: list[tuple[int, Path]] = [] + for p in paths: + match = regex.search(p.name) + if match: + parsed.append((int(match.group(1)), Path(p))) + else: + parsed.append((-1, Path(p))) + invalid_series = True + # Only treat as time series when all paths match; otherwise static merge + if invalid_series: + self.log_warning("Not a time series: %s", paths) + time_series = [(0, p) for _, p in parsed] + return time_series, False + time_series = [(t, p) for t, p in parsed] + time_series.sort(key=lambda x: (x[0], str(x[1]))) + return time_series, True + def run(self) -> str: """ Run the full workflow: convert VTK to USD, then apply the chosen appearance. @@ -145,7 +151,7 @@ def run(self) -> str: raise ValueError("vtk_files must not be empty") # Discover time series - time_series, pattern_matched = discover_time_series( + time_series, pattern_matched = self.discover_time_series( self.vtk_files, pattern=self.time_series_pattern ) time_steps = [t for t, _ in time_series] @@ -271,6 +277,7 @@ def run(self) -> str: self.log_warning( "No color primvar found for %s; skip colormap", mesh_path ) + primvar = self.colormap_primvar continue self.log_info( "Applying colormap to %s from primvar %s", mesh_path, primvar diff --git a/tests/test_register_images_greedy.py b/tests/test_register_images_greedy.py index 22c239a..b757bba 100644 --- a/tests/test_register_images_greedy.py +++ b/tests/test_register_images_greedy.py @@ -209,3 +209,7 @@ def test_transform_application( str(reg_output_dir / "greedy_registered_image.mha"), compression=True, ) + + +if __name__ == "__main__": + pytest.main([__file__]) diff --git a/utils/prepare_notebooks_for_commit.py b/utils/prepare_notebooks_for_commit.py index c942ec7..f53076e 100644 --- a/utils/prepare_notebooks_for_commit.py +++ b/utils/prepare_notebooks_for_commit.py @@ -1,12 +1,13 @@ #!/usr/bin/env python """ -Clear all cell outputs in every Jupyter notebook in the project. +Clear cell outputs and widget state in every Jupyter notebook in the project. -Use this script before committing to GitHub to keep notebook diffs small and -avoid committing large output blobs (images, data, execution metadata). +Use this script before committing (or as a pre-commit hook) to keep notebook +diffs small and avoid committing large output blobs, execution metadata, and +ipywidget/PyVista widget state (application/vnd.jupyter.widget-state+json). Usage: - python clear_notebook_outputs.py [root_dir] + python prepare_notebooks_for_commit.py [root_dir] If root_dir is omitted, uses the parent of the directory containing this script (i.e. the physiomotion4d project root). @@ -28,9 +29,21 @@ def clear_cell_outputs(cell: dict) -> None: del cell["metadata"]["execution"] +def strip_widget_state(nb: dict) -> bool: + """ + Remove Jupyter widget state from notebook metadata (ipywidgets, PyVista, etc.). + Returns True if widget state was present and removed, False otherwise. + """ + meta = nb.get("metadata") + if not isinstance(meta, dict) or "widgets" not in meta: + return False + del meta["widgets"] + return True + + def clear_notebook(path: Path) -> bool: """ - Clear all cell outputs in a notebook file in place. + Clear all cell outputs and strip widget state in a notebook file in place. Returns True if the file was modified, False otherwise. """ try: @@ -64,6 +77,9 @@ def clear_notebook(path: Path) -> bool: clear_cell_outputs(cell) modified = True + if strip_widget_state(nb): + modified = True + if not modified: return False @@ -142,6 +158,8 @@ def main() -> int: ): would_modify = True break + if not would_modify and isinstance(nb.get("metadata"), dict): + would_modify = "widgets" in nb["metadata"] if would_modify: print(f" Would clear: {rel}") modified_count += 1 From 0758150735dee99059979fb1aaa155921ef88e39 Mon Sep 17 00:00:00 2001 From: Stephen Aylward Date: Fri, 27 Feb 2026 17:43:28 -0500 Subject: [PATCH 10/10] ENH: Copilot suggestions --- .pre-commit-config.yaml | 2 +- .../convert_chop_alterra_valve_to_usd.ipynb | 6 +++--- .../convert_chop_tpv25_valve_to_usd.ipynb | 4 +--- src/physiomotion4d/cli/convert_vtk_to_usd.py | 17 +---------------- src/physiomotion4d/register_images_greedy.py | 4 ++-- .../vtk_to_usd/data_structures.py | 2 +- src/physiomotion4d/vtk_to_usd/mesh_utils.py | 3 +-- 7 files changed, 10 insertions(+), 28 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c999f0d..3dd328c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -32,7 +32,7 @@ repos: hooks: - id: prepare-notebooks-for-commit name: Prepare notebooks for commit (strip outputs and widget state) - entry: py utils/prepare_notebooks_for_commit.py + entry: python utils/prepare_notebooks_for_commit.py language: system pass_filenames: false files: \.ipynb$ diff --git a/experiments/Convert_VTK_To_USD/convert_chop_alterra_valve_to_usd.ipynb b/experiments/Convert_VTK_To_USD/convert_chop_alterra_valve_to_usd.ipynb index 0288c61..45d83c2 100644 --- a/experiments/Convert_VTK_To_USD/convert_chop_alterra_valve_to_usd.ipynb +++ b/experiments/Convert_VTK_To_USD/convert_chop_alterra_valve_to_usd.ipynb @@ -360,8 +360,8 @@ " usd_tools = USDTools()\n", " usd_anatomy_tools = USDAnatomyTools(stage)\n", " if settings.separate_objects_by_connectivity is True:\n", - " mesh_path1 = \"/World/Meshes/AlterraValve_object4\"\n", - " mesh_path2 = \"/World/Meshes/AlterraValve_object3\"\n", + " mesh_path1 = \"/World/Meshes/AlterraValve_object3\"\n", + " mesh_path2 = \"/World/Meshes/AlterraValve_object4\"\n", " elif settings.separate_objects_by_cell_type is True:\n", " mesh_path1 = \"/World/Meshes/AlterraValve_triangle1\"\n", " mesh_path2 = \"/World/Meshes/AlterraValve_triangle1\"\n", @@ -382,7 +382,7 @@ " str(output_usd),\n", " mesh_path1,\n", " color_primvar,\n", - " intensity_range=(0, 300),\n", + " # intensity_range=(0, 300),\n", " cmap=\"hot\",\n", " # use_sigmoid_scale=True,\n", " bind_vertex_color_material=True,\n", diff --git a/experiments/Convert_VTK_To_USD/convert_chop_tpv25_valve_to_usd.ipynb b/experiments/Convert_VTK_To_USD/convert_chop_tpv25_valve_to_usd.ipynb index fdf10b5..9fa4b66 100644 --- a/experiments/Convert_VTK_To_USD/convert_chop_tpv25_valve_to_usd.ipynb +++ b/experiments/Convert_VTK_To_USD/convert_chop_tpv25_valve_to_usd.ipynb @@ -379,8 +379,6 @@ " primvars, keywords=(\"strain\", \"stress\")\n", " )\n", "\n", - " shutil.copy(output_usd.with_suffix(\".save.usd\"), Path(output_usd))\n", - "\n", " if color_primvar:\n", " print(f\"\\nApplying colormap to '{color_primvar}' using {DEFAULT_COLORMAP}\")\n", " usd_tools.apply_colormap_from_primvar(\n", @@ -986,4 +984,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/src/physiomotion4d/cli/convert_vtk_to_usd.py b/src/physiomotion4d/cli/convert_vtk_to_usd.py index 9276203..7f42e26 100644 --- a/src/physiomotion4d/cli/convert_vtk_to_usd.py +++ b/src/physiomotion4d/cli/convert_vtk_to_usd.py @@ -27,19 +27,6 @@ ] -def _parse_color(s: str) -> tuple[float, float, float]: - """Parse 'R G B' or 'R,G,B' into (r,g,b) in [0,1].""" - parts = s.replace(",", " ").split() - if len(parts) != 3: - raise ValueError("Color must be three numbers (R G B) in [0,1] or [0,255]") - vals = [float(x) for x in parts] - if all(0 <= v <= 1 for v in vals): - return (vals[0], vals[1], vals[2]) - if all(0 <= v <= 255 for v in vals): - return (vals[0] / 255.0, vals[1] / 255.0, vals[2] / 255.0) - raise ValueError("Color values must be in [0,1] or [0,255]") - - def main() -> int: """Command-line interface for VTK to USD conversion.""" parser = argparse.ArgumentParser( @@ -183,8 +170,6 @@ def main() -> int: solid_color = (0.8, 0.8, 0.8) if args.color: try: - # If argparse defined --color with nargs=3 and type=float, args.color will be a list of floats. - # Handle that case directly by normalizing into [0, 1] and forming an RGB tuple. if isinstance(args.color, (list, tuple)): components = [float(v) for v in args.color] if len(components) != 3: @@ -205,7 +190,7 @@ def main() -> int: "Color values must all be in [0, 1] or all in [0, 255]." ) else: - solid_color = _parse_color(args.color) + raise ValueError("Color must be specified as a list of 3 float values") except ValueError as e: print(f"Error: {e}") return 1 diff --git a/src/physiomotion4d/register_images_greedy.py b/src/physiomotion4d/register_images_greedy.py index 8b3813a..3ca9767 100644 --- a/src/physiomotion4d/register_images_greedy.py +++ b/src/physiomotion4d/register_images_greedy.py @@ -91,10 +91,10 @@ def set_transform_type(self, transform_type: str) -> None: Args: transform_type: 'Deformable', 'Affine', or 'Rigid'. """ - self.transform_type = transform_type if transform_type not in ("Deformable", "Affine", "Rigid"): self.log_error("Invalid transform type: %s", transform_type) raise ValueError(f"Invalid transform type: {transform_type}") + self.transform_type = transform_type def set_metric(self, metric: str) -> None: """Set the similarity metric (CC→NCC, Mattes→NMI, MeanSquares→SSD). @@ -106,10 +106,10 @@ def set_metric(self, metric: str) -> None: Args: metric: 'CC', 'Mattes', or 'MeanSquares'. """ - self.metric = metric if metric not in ("CC", "Mattes", "MeanSquares"): self.log_error("Invalid metric: %s", metric) raise ValueError(f"Invalid metric: {metric}") + self.metric = metric def _itk_to_sitk(self, itk_image: itk.Image) -> Any: """Convert ITK image to SimpleITK (for Greedy).""" diff --git a/src/physiomotion4d/vtk_to_usd/data_structures.py b/src/physiomotion4d/vtk_to_usd/data_structures.py index 3494a70..4206a04 100644 --- a/src/physiomotion4d/vtk_to_usd/data_structures.py +++ b/src/physiomotion4d/vtk_to_usd/data_structures.py @@ -154,7 +154,7 @@ class ConversionSettings: preserve_point_arrays: bool = True preserve_cell_arrays: bool = True separate_objects_by_cell_type: bool = False # Split into separate USD meshes by cell type (triangle/quad/tetra/hex etc.) - separate_objects_by_connectivity: bool = True # Split into separate USD meshes by connected components (object1, object2, ...). Mutually exclusive with separate_objects_by_cell_type. + separate_objects_by_connectivity: bool = False # Split into separate USD meshes by connected components (object1, object2, ...). Mutually exclusive with separate_objects_by_cell_type. # Material settings use_preview_surface: bool = True diff --git a/src/physiomotion4d/vtk_to_usd/mesh_utils.py b/src/physiomotion4d/vtk_to_usd/mesh_utils.py index 3db7f5e..2abe057 100644 --- a/src/physiomotion4d/vtk_to_usd/mesh_utils.py +++ b/src/physiomotion4d/vtk_to_usd/mesh_utils.py @@ -35,8 +35,7 @@ def split_mesh_data_by_cell_type( """Split MeshData into one mesh per distinct face vertex count (cell type). Each part is named as mesh_name plus the cell type (e.g. MeshName_Triangle, - MeshName_Quad). The caller should append a unique number to form final prim - names (e.g. MeshName_Triangle_0, MeshName_Quad_0). + MeshName_Quad). Args: mesh_data: Single mesh that may contain mixed cell types.