Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions python/packages/core/agent_framework/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -1866,6 +1866,9 @@ def _process_update(response: ChatResponse | AgentResponse, update: ChatResponse
response.finish_reason = update.finish_reason
if update.model_id is not None:
response.model_id = update.model_id
if isinstance(response, AgentResponse) and isinstance(update, AgentResponseUpdate):
if update.finish_reason is not None:
response.finish_reason = update.finish_reason
response.continuation_token = update.continuation_token


Expand Down Expand Up @@ -2369,6 +2372,7 @@ def __init__(
response_id: str | None = None,
agent_id: str | None = None,
created_at: CreatedAtT | None = None,
finish_reason: FinishReasonLiteral | FinishReason | None = None,
usage_details: UsageDetails | None = None,
value: ResponseModelT | None = None,
response_format: type[BaseModel] | None = None,
Expand All @@ -2384,6 +2388,9 @@ def __init__(
agent_id: The identifier of the agent that produced this response. Useful in multi-agent
scenarios to track which agent generated the response.
created_at: A timestamp for the chat response.
finish_reason: The reason the model stopped generating. Common values include
``"stop"`` (natural completion), ``"length"`` (token limit), and
``"tool_calls"`` (the model invoked a tool).
usage_details: The usage details for the chat response.
value: The structured output of the agent run response, if applicable.
response_format: Optional response format for the agent response.
Expand All @@ -2410,6 +2417,7 @@ def __init__(
self.response_id = response_id
self.agent_id = agent_id
self.created_at = created_at
self.finish_reason = finish_reason
self.usage_details = usage_details
self._value: ResponseModelT | None = value
self._response_format: type[BaseModel] | None = response_format
Expand Down Expand Up @@ -2604,6 +2612,7 @@ def __init__(
response_id: str | None = None,
message_id: str | None = None,
created_at: CreatedAtT | None = None,
finish_reason: FinishReasonLiteral | FinishReason | None = None,
continuation_token: ContinuationToken | None = None,
additional_properties: dict[str, Any] | None = None,
raw_representation: Any | None = None,
Expand All @@ -2619,6 +2628,9 @@ def __init__(
response_id: Optional ID of the response of which this update is a part.
message_id: Optional ID of the message of which this update is a part.
created_at: Optional timestamp for the chat response update.
finish_reason: The reason the model stopped generating. Common values include
``"stop"`` (natural completion), ``"length"`` (token limit), and
``"tool_calls"`` (the model invoked a tool).
continuation_token: Optional token for resuming a long-running background operation.
When present, indicates the operation is still in progress.
additional_properties: Optional additional properties associated with the chat response update.
Expand All @@ -2645,6 +2657,7 @@ def __init__(
self.response_id = response_id
self.message_id = message_id
self.created_at = created_at
self.finish_reason = finish_reason
self.continuation_token = continuation_token
self.additional_properties = _restore_compaction_annotation_in_additional_properties(
additional_properties,
Expand Down Expand Up @@ -2677,6 +2690,7 @@ def map_chat_to_agent_update(update: ChatResponseUpdate, agent_name: str | None)
response_id=update.response_id,
message_id=update.message_id,
created_at=update.created_at,
finish_reason=update.finish_reason,
continuation_token=update.continuation_token,
additional_properties=update.additional_properties,
raw_representation=update,
Expand Down
102 changes: 102 additions & 0 deletions python/packages/core/tests/core/test_finish_reason.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
# Copyright (c) Microsoft. All rights reserved.

from agent_framework import (
AgentResponse,
AgentResponseUpdate,
Comment on lines +3 to +5
Copy link

Copilot AI Apr 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add the standard copyright header at the top of this new test file to match the rest of python/packages/core/tests/core (nearly all existing test modules start with # Copyright (c) Microsoft. All rights reserved.).

Copilot uses AI. Check for mistakes.
ChatResponseUpdate,
Content,
Message,
)
from agent_framework._types import _process_update, map_chat_to_agent_update


def test_agent_response_init_with_finish_reason() -> None:
"""Test that AgentResponse correctly initializes and stores finish_reason."""
response = AgentResponse(
messages=[Message("assistant", [Content.from_text("test")])],
finish_reason="stop",
)
assert response.finish_reason == "stop"


def test_agent_response_update_init_with_finish_reason() -> None:
"""Test that AgentResponseUpdate correctly initializes and stores finish_reason."""
update = AgentResponseUpdate(
contents=[Content.from_text("test")],
role="assistant",
finish_reason="stop",
)
assert update.finish_reason == "stop"


def test_map_chat_to_agent_update_forwards_finish_reason() -> None:
"""Test that mapping a ChatResponseUpdate with finish_reason forwards it."""
chat_update = ChatResponseUpdate(
contents=[Content.from_text("test")],
finish_reason="length",
)
agent_update = map_chat_to_agent_update(chat_update, agent_name="test_agent")

assert agent_update.finish_reason == "length"
assert agent_update.author_name == "test_agent"


def test_process_update_propagates_finish_reason_to_agent_response() -> None:
"""Test that _process_update correctly updates an AgentResponse from an AgentResponseUpdate."""
response = AgentResponse(messages=[Message("assistant", [Content.from_text("test")])])
update = AgentResponseUpdate(
contents=[Content.from_text("more text")],
role="assistant",
finish_reason="stop",
)

# Process the update
_process_update(response, update)

assert response.finish_reason == "stop"


def test_process_update_does_not_overwrite_with_none() -> None:
"""Test that _process_update does not overwrite an existing finish_reason with None."""
response = AgentResponse(
messages=[Message("assistant", [Content.from_text("test")])],
finish_reason="length",
)
update = AgentResponseUpdate(
contents=[Content.from_text("more text")],
role="assistant",
finish_reason=None,
)

# Process the update
_process_update(response, update)

assert response.finish_reason == "length"


def test_agent_response_serialization_includes_finish_reason() -> None:
"""Test that AgentResponse serializes correctly, including finish_reason."""
response = AgentResponse(
messages=[Message("assistant", [Content.from_text("test")])],
response_id="test_123",
finish_reason="stop",
)

# Serialize using the framework's API and verify finish_reason is included.
data = response.to_dict()
assert "finish_reason" in data
assert data["finish_reason"] == "stop"


def test_agent_response_update_serialization_includes_finish_reason() -> None:
"""Test that AgentResponseUpdate serializes correctly, including finish_reason."""
update = AgentResponseUpdate(
contents=[Content.from_text("test")],
role="assistant",
response_id="test_456",
finish_reason="tool_calls",
)

data = update.to_dict()
assert "finish_reason" in data
assert data["finish_reason"] == "tool_calls"