From 0c8a7423a30db5ee6140171d8189fc73909ff090 Mon Sep 17 00:00:00 2001 From: Blanca Pablos Date: Wed, 4 Feb 2026 15:20:31 +0100 Subject: [PATCH 1/4] feat(application): Add option to summarize run describe --- src/aignostics/application/_cli.py | 11 +- src/aignostics/application/_utils.py | 72 +++++- tests/aignostics/application/utils_test.py | 245 +++++++++++++++++++++ 3 files changed, 318 insertions(+), 10 deletions(-) diff --git a/src/aignostics/application/_cli.py b/src/aignostics/application/_cli.py index 8446a6ec0..0418eeb6c 100644 --- a/src/aignostics/application/_cli.py +++ b/src/aignostics/application/_cli.py @@ -919,6 +919,13 @@ def run_describe( str, typer.Option(help="Output format: 'text' (default) or 'json'"), ] = "text", + summarize: Annotated[ + bool, + typer.Option( + "--summarize", + help="Show only run and item status summary (external ID, state, error message)", + ), + ] = False, ) -> None: """Describe run.""" logger.trace("Describing run with ID '{}'", run_id) @@ -931,7 +938,9 @@ def run_describe( run_details = run.details(hide_platform_queue_position=not user_info.is_internal_user) print(json.dumps(run_details.model_dump(mode="json"), indent=2, default=str)) else: - retrieve_and_print_run_details(run, hide_platform_queue_position=not user_info.is_internal_user) + retrieve_and_print_run_details( + run, hide_platform_queue_position=not user_info.is_internal_user, summarize=summarize + ) logger.debug("Described run with ID '{}'", run_id) except NotFoundException: logger.warning(f"Run with ID '{run_id}' not found.") diff --git a/src/aignostics/application/_utils.py b/src/aignostics/application/_utils.py index 0344c1ee2..c18f9855f 100644 --- a/src/aignostics/application/_utils.py +++ b/src/aignostics/application/_utils.py @@ -26,6 +26,7 @@ ) from aignostics.platform import ( InputArtifactData, + ItemState, OutputArtifactData, OutputArtifactElement, Run, @@ -174,17 +175,17 @@ class OutputFormat(StrEnum): JSON = "json" -def _format_status_string(state: RunState, termination_reason: str | None = None) -> str: +def _format_status_string(state: RunState | ItemState, termination_reason: str | None = None) -> str: """Format status string with optional termination reason. Args: - state (RunState): The run state + state (RunState | ItemState): The run or item state termination_reason (str | None): Optional termination reason Returns: str: Formatted status string """ - if state is RunState.TERMINATED and termination_reason: + if (state.value == RunState.TERMINATED or state.value == ItemState.TERMINATED) and termination_reason: return f"{state.value} ({termination_reason})" return f"{state.value}" @@ -277,21 +278,26 @@ def _format_run_details(run: RunData) -> str: return output -def retrieve_and_print_run_details(run_handle: Run, hide_platform_queue_position: bool) -> None: +def retrieve_and_print_run_details( + run_handle: Run, hide_platform_queue_position: bool, *, summarize: bool = False +) -> None: """Retrieve and print detailed information about a run. Args: run_handle (Run): The Run handle hide_platform_queue_position (bool): Whether to hide platform-wide queue position + summarize (bool): If True, show only status summary (external ID, state, error message) """ run = run_handle.details(hide_platform_queue_position=hide_platform_queue_position) - run_details = _format_run_details(run) - output = f"[bold]Run Details for {run.run_id}[/bold]\n{'=' * 80}\n{run_details}\n\n[bold]Items:[/bold]" - - console.print(output) - _retrieve_and_print_run_items(run_handle) + if summarize: + _print_run_summary(run, run_handle) + else: + run_details = _format_run_details(run) + output = f"[bold]Run Details for {run.run_id}[/bold]\n{'=' * 80}\n{run_details}\n\n[bold]Items:[/bold]" + console.print(output) + _retrieve_and_print_run_items(run_handle) def _retrieve_and_print_run_items(run_handle: Run) -> None: @@ -328,6 +334,54 @@ def _retrieve_and_print_run_items(run_handle: Run) -> None: console.print(f"{item_output}\n") +def _print_run_summary(run: RunData, run_handle: Run) -> None: + """Print a concise summary of run and item statuses. + + Shows only the essential status information: external ID, state, and error message + for each item, plus overall run statistics. + + Args: + run (RunData): Run data object + run_handle (Run): The Run handle for fetching item results + """ + status_str = _format_status_string(run.state, run.termination_reason) + duration_str = _format_duration_string(run.submitted_at, run.terminated_at) + + # Run summary header + output = ( + f"[bold]Run Summary for {run.run_id}[/bold]\n" + f"{'=' * 80}\n" + f"[bold]Application (Version):[/bold] {run.application_id} ({run.version_number})\n" + f"[bold]Status:[/bold] {status_str}\n" + f"[bold]Duration:[/bold] {duration_str}\n" + ) + + if run.error_message or run.error_code: + output += f"[bold]Error:[/bold] {run.error_message or 'N/A'} ({run.error_code or 'N/A'})\n" + + output += f"[bold]Statistics:[/bold]\n{_format_run_statistics(run.statistics)}\n" + console.print(output) + + # Items summary + console.print("[bold]Items:[/bold]") + results = run_handle.results() + if not results: + console.print(" No item results available.") + return + + for item in results: + item_status = _format_status_string(item.state, item.termination_reason) + item_line = f" [bold]{item.external_id}[/bold]: {item_status}" + + if item.error_message or item.error_code: + error_info = item.error_message or item.error_code or "" + if item.error_message and item.error_code: + error_info = f"{item.error_message} ({item.error_code})" + item_line += f" - [red]{error_info}[/red]" + + console.print(item_line) + + def print_runs_verbose(runs: list[RunData]) -> None: """Print detailed information about runs, sorted by submitted_at in descending order. diff --git a/tests/aignostics/application/utils_test.py b/tests/aignostics/application/utils_test.py index 65974d68c..7741e9a90 100644 --- a/tests/aignostics/application/utils_test.py +++ b/tests/aignostics/application/utils_test.py @@ -787,3 +787,248 @@ def test_queue_position_string_from_run_with_only_platform_position() -> None: num_preceding_items_platform=15, ) assert queue_position_string_from_run(run) == "15 items ahead across the entire platform" + + +# Tests for retrieve_and_print_run_details with summarize option + + +@pytest.mark.unit +@patch("aignostics.application._utils.console") +def test_retrieve_and_print_run_details_summarize_mode(mock_console: Mock) -> None: + """Test summarize mode shows concise output with external ID, state, and errors.""" + submitted_at = datetime(2025, 1, 1, 12, 0, 0, tzinfo=UTC) + terminated_at = datetime(2025, 1, 1, 13, 0, 0, tzinfo=UTC) + + run_data = RunData( + run_id="run-summarize-test", + application_id="he-tme", + version_number="1.0.0", + state=RunState.TERMINATED, + termination_reason=RunTerminationReason.ALL_ITEMS_PROCESSED, + output=RunOutput.FULL, + statistics=RunItemStatistics( + item_count=2, + item_pending_count=0, + item_processing_count=0, + item_skipped_count=0, + item_succeeded_count=1, + item_user_error_count=1, + item_system_error_count=0, + ), + submitted_at=submitted_at, + submitted_by="user@example.com", + terminated_at=terminated_at, + custom_metadata=None, + error_message=None, + error_code=None, + ) + + from aignx.codegen.models import ItemOutput + + item_success = ItemResult( + item_id="item-001", + external_id="slide-success.svs", + state=ItemState.TERMINATED, + termination_reason=ItemTerminationReason.SUCCEEDED, + output=ItemOutput.FULL, + error_message=None, + error_code=None, + custom_metadata=None, + custom_metadata_checksum=None, + terminated_at=terminated_at, + output_artifacts=[], + ) + + item_error = ItemResult( + item_id="item-002", + external_id="slide-error.svs", + state=ItemState.TERMINATED, + termination_reason=ItemTerminationReason.USER_ERROR, + output=ItemOutput.NONE, + error_message="Invalid file format", + error_code="INVALID_FORMAT", + custom_metadata=None, + custom_metadata_checksum=None, + terminated_at=terminated_at, + output_artifacts=[], + ) + + mock_run = MagicMock() + mock_run.details.return_value = run_data + mock_run.results.return_value = [item_success, item_error] + + retrieve_and_print_run_details(mock_run, hide_platform_queue_position=False, summarize=True) + + # Collect all printed output + all_output = " ".join(str(call) for call in mock_console.print.call_args_list) + + # Verify summary header is present + assert "Run Summary for run-summarize-test" in all_output + # Verify application info is present + assert "he-tme" in all_output + # Verify items are listed with external IDs + assert "slide-success.svs" in all_output + assert "slide-error.svs" in all_output + # Verify error message is shown for failed item + assert "Invalid file format" in all_output + # Verify artifact details are NOT shown (they are omitted in summary) + assert "Download URL" not in all_output + assert "Artifact ID" not in all_output + + +@pytest.mark.unit +@patch("aignostics.application._utils.console") +def test_retrieve_and_print_run_details_summarize_no_items(mock_console: Mock) -> None: + """Test summarize mode with no items shows appropriate message.""" + submitted_at = datetime(2025, 1, 1, 12, 0, 0, tzinfo=UTC) + + run_data = RunData( + run_id="run-no-items", + application_id="test-app", + version_number="0.0.1", + state=RunState.PENDING, + termination_reason=None, + output=RunOutput.NONE, + statistics=RunItemStatistics( + item_count=0, + item_pending_count=0, + item_processing_count=0, + item_skipped_count=0, + item_succeeded_count=0, + item_user_error_count=0, + item_system_error_count=0, + ), + submitted_at=submitted_at, + submitted_by="user@example.com", + terminated_at=None, + custom_metadata=None, + error_message=None, + error_code=None, + ) + + mock_run = MagicMock() + mock_run.details.return_value = run_data + mock_run.results.return_value = [] + + retrieve_and_print_run_details(mock_run, hide_platform_queue_position=False, summarize=True) + + all_output = " ".join(str(call) for call in mock_console.print.call_args_list) + assert "Run Summary for run-no-items" in all_output + assert "No item results available" in all_output + + +@pytest.mark.unit +@patch("aignostics.application._utils.console") +def test_retrieve_and_print_run_details_summarize_with_run_error(mock_console: Mock) -> None: + """Test summarize mode shows run-level errors.""" + submitted_at = datetime(2025, 1, 1, 12, 0, 0, tzinfo=UTC) + terminated_at = datetime(2025, 1, 1, 12, 5, 0, tzinfo=UTC) + + run_data = RunData( + run_id="run-with-error", + application_id="test-app", + version_number="0.0.1", + state=RunState.TERMINATED, + termination_reason=RunTerminationReason.CANCELED_BY_SYSTEM, + output=RunOutput.NONE, + statistics=RunItemStatistics( + item_count=1, + item_pending_count=0, + item_processing_count=0, + item_skipped_count=0, + item_succeeded_count=0, + item_user_error_count=0, + item_system_error_count=1, + ), + submitted_at=submitted_at, + submitted_by="user@example.com", + terminated_at=terminated_at, + custom_metadata=None, + error_message="System error occurred", + error_code="SYS_ERROR", + ) + + mock_run = MagicMock() + mock_run.details.return_value = run_data + mock_run.results.return_value = [] + + retrieve_and_print_run_details(mock_run, hide_platform_queue_position=False, summarize=True) + + all_output = " ".join(str(call) for call in mock_console.print.call_args_list) + assert "System error occurred" in all_output + assert "SYS_ERROR" in all_output + + +@pytest.mark.unit +@patch("aignostics.application._utils.console") +def test_retrieve_and_print_run_details_default_is_detailed(mock_console: Mock) -> None: + """Test that default mode (summarize=False) shows detailed output with artifacts.""" + submitted_at = datetime(2025, 1, 1, 12, 0, 0, tzinfo=UTC) + terminated_at = datetime(2025, 1, 1, 13, 0, 0, tzinfo=UTC) + + run_data = RunData( + run_id="run-detailed-test", + application_id="he-tme", + version_number="1.0.0", + state=RunState.TERMINATED, + termination_reason=RunTerminationReason.ALL_ITEMS_PROCESSED, + output=RunOutput.FULL, + statistics=RunItemStatistics( + item_count=1, + item_pending_count=0, + item_processing_count=0, + item_skipped_count=0, + item_succeeded_count=1, + item_user_error_count=0, + item_system_error_count=0, + ), + submitted_at=submitted_at, + submitted_by="user@example.com", + terminated_at=terminated_at, + custom_metadata=None, + error_message=None, + error_code=None, + ) + + from aignx.codegen.models import ArtifactOutput, ArtifactState, ArtifactTerminationReason, ItemOutput + + item_result = ItemResult( + item_id="item-123", + external_id="slide-001.svs", + state=ItemState.TERMINATED, + termination_reason=ItemTerminationReason.SUCCEEDED, + output=ItemOutput.FULL, + error_message=None, + error_code=None, + custom_metadata=None, + custom_metadata_checksum=None, + terminated_at=terminated_at, + output_artifacts=[ + OutputArtifactElement( + output_artifact_id="artifact-abc", + name="result.parquet", + download_url="https://example.com/result.parquet", + metadata={"media_type": "application/vnd.apache.parquet"}, + state=ArtifactState.TERMINATED, + termination_reason=ArtifactTerminationReason.SUCCEEDED, + output=ArtifactOutput.AVAILABLE, + error_code=None, + error_message=None, + ) + ], + ) + + mock_run = MagicMock() + mock_run.details.return_value = run_data + mock_run.results.return_value = [item_result] + + # Call without summarize parameter (default is False) + retrieve_and_print_run_details(mock_run, hide_platform_queue_position=False) + + all_output = " ".join(str(call) for call in mock_console.print.call_args_list) + + # Verify detailed output shows "Run Details" not "Run Summary" + assert "Run Details for run-detailed-test" in all_output + # Verify artifact details ARE shown in detailed mode + assert "Download URL" in all_output + assert "Artifact ID" in all_output From efa1bb26c65d8db00c611cd6edc59348684ebcdf Mon Sep 17 00:00:00 2001 From: Blanca Pablos Date: Wed, 4 Feb 2026 17:15:45 +0100 Subject: [PATCH 2/4] Apply suggestion from @olivermeyer Co-authored-by: Oliver Meyer <42039965+olivermeyer@users.noreply.github.com> --- src/aignostics/application/_cli.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/aignostics/application/_cli.py b/src/aignostics/application/_cli.py index 0418eeb6c..0b41e95fe 100644 --- a/src/aignostics/application/_cli.py +++ b/src/aignostics/application/_cli.py @@ -923,6 +923,7 @@ def run_describe( bool, typer.Option( "--summarize", + "-s", help="Show only run and item status summary (external ID, state, error message)", ), ] = False, From afec82c23c0858425b7a4ca8c8898750de80bc06 Mon Sep 17 00:00:00 2001 From: Blanca Pablos Date: Wed, 4 Feb 2026 17:27:11 +0100 Subject: [PATCH 3/4] fix(application): Address Oliver's review --- CLI_REFERENCE.md | 1 + src/aignostics/application/_utils.py | 64 +++------------------- tests/aignostics/application/utils_test.py | 6 +- 3 files changed, 11 insertions(+), 60 deletions(-) diff --git a/CLI_REFERENCE.md b/CLI_REFERENCE.md index 2eadb8b7d..011db6cad 100644 --- a/CLI_REFERENCE.md +++ b/CLI_REFERENCE.md @@ -356,6 +356,7 @@ $ aignostics application run describe [OPTIONS] RUN_ID **Options**: * `--format TEXT`: Output format: 'text' (default) or 'json' [default: text] +* `--summarize, -s`: Show run details without output artifacts for items * `--help`: Show this message and exit. #### `aignostics application run dump-metadata` diff --git a/src/aignostics/application/_utils.py b/src/aignostics/application/_utils.py index c18f9855f..b4b2e9f72 100644 --- a/src/aignostics/application/_utils.py +++ b/src/aignostics/application/_utils.py @@ -291,20 +291,18 @@ def retrieve_and_print_run_details( """ run = run_handle.details(hide_platform_queue_position=hide_platform_queue_position) - if summarize: - _print_run_summary(run, run_handle) - else: - run_details = _format_run_details(run) - output = f"[bold]Run Details for {run.run_id}[/bold]\n{'=' * 80}\n{run_details}\n\n[bold]Items:[/bold]" - console.print(output) - _retrieve_and_print_run_items(run_handle) + run_details = _format_run_details(run) + output = f"[bold]Run Details for {run.run_id}[/bold]\n{'=' * 80}\n{run_details}\n\n[bold]Items:[/bold]" + console.print(output) + _retrieve_and_print_run_items(run_handle, summarize) -def _retrieve_and_print_run_items(run_handle: Run) -> None: +def _retrieve_and_print_run_items(run_handle: Run, summarize: bool = False) -> None: """Retrieve and print information about items in a run. Args: run_handle (Run): The Run handle + summarize (bool): If True, show only status summary without output artifacts """ results = run_handle.results() if not results: @@ -320,7 +318,7 @@ def _retrieve_and_print_run_items(run_handle: Run) -> None: f" [bold]Custom Metadata:[/bold] {item.custom_metadata or 'None'}" ) - if item.output_artifacts: + if not summarize and item.output_artifacts: artifacts_output = "\n [bold]Output Artifacts:[/bold]" for artifact in item.output_artifacts: artifacts_output += ( @@ -334,54 +332,6 @@ def _retrieve_and_print_run_items(run_handle: Run) -> None: console.print(f"{item_output}\n") -def _print_run_summary(run: RunData, run_handle: Run) -> None: - """Print a concise summary of run and item statuses. - - Shows only the essential status information: external ID, state, and error message - for each item, plus overall run statistics. - - Args: - run (RunData): Run data object - run_handle (Run): The Run handle for fetching item results - """ - status_str = _format_status_string(run.state, run.termination_reason) - duration_str = _format_duration_string(run.submitted_at, run.terminated_at) - - # Run summary header - output = ( - f"[bold]Run Summary for {run.run_id}[/bold]\n" - f"{'=' * 80}\n" - f"[bold]Application (Version):[/bold] {run.application_id} ({run.version_number})\n" - f"[bold]Status:[/bold] {status_str}\n" - f"[bold]Duration:[/bold] {duration_str}\n" - ) - - if run.error_message or run.error_code: - output += f"[bold]Error:[/bold] {run.error_message or 'N/A'} ({run.error_code or 'N/A'})\n" - - output += f"[bold]Statistics:[/bold]\n{_format_run_statistics(run.statistics)}\n" - console.print(output) - - # Items summary - console.print("[bold]Items:[/bold]") - results = run_handle.results() - if not results: - console.print(" No item results available.") - return - - for item in results: - item_status = _format_status_string(item.state, item.termination_reason) - item_line = f" [bold]{item.external_id}[/bold]: {item_status}" - - if item.error_message or item.error_code: - error_info = item.error_message or item.error_code or "" - if item.error_message and item.error_code: - error_info = f"{item.error_message} ({item.error_code})" - item_line += f" - [red]{error_info}[/red]" - - console.print(item_line) - - def print_runs_verbose(runs: list[RunData]) -> None: """Print detailed information about runs, sorted by submitted_at in descending order. diff --git a/tests/aignostics/application/utils_test.py b/tests/aignostics/application/utils_test.py index 7741e9a90..1935ad724 100644 --- a/tests/aignostics/application/utils_test.py +++ b/tests/aignostics/application/utils_test.py @@ -862,8 +862,8 @@ def test_retrieve_and_print_run_details_summarize_mode(mock_console: Mock) -> No # Collect all printed output all_output = " ".join(str(call) for call in mock_console.print.call_args_list) - # Verify summary header is present - assert "Run Summary for run-summarize-test" in all_output + # Verify run details header is present + assert "Run Details for run-summarize-test" in all_output # Verify application info is present assert "he-tme" in all_output # Verify items are listed with external IDs @@ -913,7 +913,7 @@ def test_retrieve_and_print_run_details_summarize_no_items(mock_console: Mock) - retrieve_and_print_run_details(mock_run, hide_platform_queue_position=False, summarize=True) all_output = " ".join(str(call) for call in mock_console.print.call_args_list) - assert "Run Summary for run-no-items" in all_output + assert "Run Details for run-no-items" in all_output assert "No item results available" in all_output From 89168688f8c3d5aaecb1cd0ba39597c6b454f028 Mon Sep 17 00:00:00 2001 From: Blanca Pablos Date: Fri, 6 Feb 2026 18:02:36 +0100 Subject: [PATCH 4/4] lint --- src/aignostics/application/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aignostics/application/_utils.py b/src/aignostics/application/_utils.py index b4b2e9f72..1538a230e 100644 --- a/src/aignostics/application/_utils.py +++ b/src/aignostics/application/_utils.py @@ -185,7 +185,7 @@ def _format_status_string(state: RunState | ItemState, termination_reason: str | Returns: str: Formatted status string """ - if (state.value == RunState.TERMINATED or state.value == ItemState.TERMINATED) and termination_reason: + if state.value in {RunState.TERMINATED, ItemState.TERMINATED} and termination_reason: return f"{state.value} ({termination_reason})" return f"{state.value}"