-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdemo_reflection.py
More file actions
82 lines (68 loc) · 2.62 KB
/
demo_reflection.py
File metadata and controls
82 lines (68 loc) · 2.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import asyncio
import os
from dotenv import load_dotenv
from llm.provider import LLMProvider, Message, LLMResponse
from core.agent import Agent
from tools.base import Tool
from state.persistence import StatePersistence
# Load environment
load_dotenv()
class MockLLMWithCritique(LLMProvider):
def __init__(self, model: str):
self.model = model
self.step = 0
def supports_tool_calling(self) -> bool:
return True
async def stream_generate(self, messages, **kwargs):
yield "Final."
async def generate(self, messages, tools=None, **kwargs):
content = messages[-1].content
self.step += 1
# Reflection Request
if "You are a critical reviewer" in content:
return LLMResponse(
content='{"is_progressing": false, "critique": "You are iterating but not solving.", "suggestion": "Try reverse calculation."}',
role="assistant"
)
# Normal Agent flow
if self.step < 3:
return LLMResponse(
content=f'Thought: I am trying step {self.step}.\nAction: test_tool\nAction Input: {{"try": {self.step}}}',
role="assistant"
)
elif self.step == 3:
# Trigger reflection in look by doing something incomplete
return LLMResponse(
content=f'Thought: I am stuck.',
role="assistant"
)
else:
return LLMResponse(
content='Thought: Based on reflection, I will solve it.\nFinal Answer: Solved after reflection.',
role="assistant"
)
class TestTool(Tool):
name: str = "test_tool"
description: str = "Testing"
parameters: dict = {"type": "object", "properties": {"try": {"type": "integer"}}}
async def _run(self, **kwargs):
return "Executed"
async def demo_reflection():
print("=" * 60)
print("REFLECTION & ROBUSTNESS DEMO")
print("=" * 60)
llm = MockLLMWithCritique(model="mock-reflect")
tools = [TestTool()]
agent = Agent(llm=llm, tools=tools)
print("\n[START] Running task that requires reflection...")
result = await agent.run("Solve a hard problem", pattern="react")
print(f"\n[RESULT] {result.get('output')}")
print("\n[HISTORY]")
history = result['state']['history']
for event in history:
if event['event'] == 'reflection':
print(f"🧠 REFLECTION: {event['data']}")
elif event['event'] == 'thought':
print(f"💭 THOUGHT: {event['data']}")
if __name__ == "__main__":
asyncio.run(demo_reflection())