From f529ed67b93b41f847ed1fc0e733ff01d3f4fe77 Mon Sep 17 00:00:00 2001 From: Mourya Balabhadra Date: Fri, 3 Apr 2026 21:57:13 -0700 Subject: [PATCH] Add working claude agent mcp server v2 --- mcp/python-react-agent-simple-ui/README.md | 335 ++++++++++++------ mcp/python-react-agent-simple-ui/env.template | 8 +- .../server/claude_agent_mcp_server_v2.py | 263 ++++++++++++++ .../server/requirements.txt | 3 + 4 files changed, 497 insertions(+), 112 deletions(-) create mode 100644 mcp/python-react-agent-simple-ui/server/claude_agent_mcp_server_v2.py diff --git a/mcp/python-react-agent-simple-ui/README.md b/mcp/python-react-agent-simple-ui/README.md index 625d652..1e4adab 100644 --- a/mcp/python-react-agent-simple-ui/README.md +++ b/mcp/python-react-agent-simple-ui/README.md @@ -1,59 +1,68 @@ # Python Agent with Simple React UI -A full-stack example that pairs a **Python (FastAPI) agent** powered by the OpenAI Responses API and ThoughtSpot MCP server with a **React chat UI**. +A full-stack example that pairs a **Python (FastAPI) agent** with a **React chat UI**. Supports two backend implementations: -The backend streams responses to the frontend using Server-Sent Events (SSE), giving users a real-time chat experience while the agent queries ThoughtSpot for data insights and displays ThoughtSpot charts in -an embed. Supports trusted auth and other standard thoughtspot customizations. +| Backend | File | AI Provider | MCP Integration | +|-----------------|----------------------------------------|-----------------------|------------------------------------------------| +| **v1 (OpenAI)** | `server/agent.py` | Azure OpenAI / OpenAI | Server-side (OpenAI manages MCP) | +| **v2 (Claude)** | `server/claude_agent_mcp_server_v2.py` | Anthropic Claude | Client-side (FastAPI connects to MCP directly) | +| **v2 (OpenAI)** | _(coming soon)_ | Azure OpenAI / OpenAI | Client-side (FastAPI connects to MCP directly) | + + + +The backend streams responses to the frontend using Server-Sent Events (SSE), giving users a real-time chat experience while the agent queries ThoughtSpot for data insights and displays ThoughtSpot charts in an embed. ## Screenshot ![Python Agent with Simple React UI](Screenshot.png) -## Architecture +--- + +## MCP Server v2: Claude + Client-side MCP (Recommended) + +`claude_agent_mcp_server_v2.py` uses Anthropic's Claude API with a **client-side agentic loop** — the FastAPI process connects directly to the ThoughtSpot MCP server using custom HTTP headers (`Authorization` + `x-ts-host`). This approach is required because Anthropic's server-side MCP integration does not support custom headers. + +### Architecture (v2) ``` -┌──────────────┐ SSE stream ┌──────────────┐ MCP ┌─────────────┐ -│ React Chat │ ◄────────────► │ FastAPI │ ◄──────► │ ThoughtSpot │ -│ (Vite) │ /api/chat │ + OpenAI │ │ MCP Server │ -└──────────────┘ └──────────────┘ └─────────────┘ - :5173 :8000 agent.thoughtspot.app +┌──────────────┐ SSE stream ┌──────────────────────┐ MCP (streamable-http) ┌─────────────┐ +│ React Chat │ ◄────────────► │ FastAPI + Claude │ ◄──────────────────────────────► │ ThoughtSpot │ +│ (Vite) │ /api/chat │ / OpenAI │ Authorization + x-ts-host │ MCP Server │ +└──────────────┘ └──────────────────────┘ └─────────────┘ + :5173 :8000 agent.thoughtspot.app ``` +**Request flow:** + 1. User sends a message from the React UI -2. FastAPI backend forwards it to OpenAI Responses API with the ThoughtSpot MCP tool -3. OpenAI calls ThoughtSpot tools as needed, and streams the response -4. Backend forwards text deltas and status events to the UI over SSE -5. React renders the streamed markdown response in real time +2. FastAPI opens a new MCP session to `agent.thoughtspot.app` with auth headers +3. Claude receives the user message + ThoughtSpot tool definitions +4. Claude calls ThoughtSpot tools as needed; FastAPI executes each call via the MCP session +5. The agentic loop continues until Claude stops calling tools +6. Text deltas and status events are streamed to the UI over SSE in real time -## Prerequisites +### Prerequisites (v2) - Python 3.10+ - Node.js 18+ -- Azure OpenAI endpoint and API key (or an OpenAI-compatible endpoint) -- ThoughtSpot instance with: - - Host URL - - Authentication token (Bearer token) - -## Quick Start - -### 1. Clone the repository - -```bash -git clone https://github.com/thoughtspot/developer-examples.git -cd developer-examples/mcp/python-react-agent-simple-ui -``` +- Anthropic API key +- ThoughtSpot instance with a host URL and Authentication token(bearer token) -### 2. Configure environment +### Environment Setup (v2) From the project root (`python-react-agent-simple-ui/`): @@ -61,35 +70,34 @@ From the project root (`python-react-agent-simple-ui/`): cp env.template .env ``` -Edit `.env` with your credentials: +Edit `.env` — v2 uses these variables: ```env -# Server-side (used by the Python agent) -AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/openai/v1 -AZURE_OPENAI_KEY=your_azure_openai_key_here +# Server-side — used by the Claude agent +ANTHROPIC_API_KEY=your_anthropic_api_key_here -# Client-side (prefix with VITE_ so Vite exposes them to the browser) +# ThoughtSpot credentials (VITE_ prefix makes them available to the React client too) VITE_TS_HOST=your-instance.thoughtspot.cloud VITE_TS_AUTH_TOKEN=your_thoughtspot_bearer_token ``` -The Python server reads `AZURE_OPENAI_*` and `VITE_TS_*` variables from this `.env` file. The Vite client reads variables prefixed with `VITE_` via `import.meta.env`. +> **Note:** `VITE_TS_HOST` / `VITE_TS_AUTH_TOKEN` are read by both the Python server and the React client. You can also set them without the `VITE_` prefix as `TS_HOST` / `TS_AUTH_TOKEN` if you only need server-side access. -### 3. Start the backend +> **Warning:** Using a static bearer token is for development and demo purposes only. For production, implement the [Trusted Authentication](https://developers.thoughtspot.com/docs/trusted-auth) flow where your backend generates short-lived tokens per user. + +### Running v2 + +**Backend:** ```bash cd server python -m venv .venv source .venv/bin/activate # Windows: .venv\Scripts\activate pip install -r requirements.txt -uvicorn agent:app --reload +uvicorn claude_agent_mcp_server_v2:app --reload ``` -The API will be running at `http://localhost:8000`. - -### 4. Start the frontend - -In a separate terminal: +**Frontend** (separate terminal): ```bash cd client @@ -97,108 +105,206 @@ npm install npm run dev ``` -Open `http://localhost:5173` in your browser. The Vite dev server proxies `/api` requests to the FastAPI backend. +Open `http://localhost:5173`. The Vite dev server proxies `/api` to the FastAPI backend on port 8000. -## Project Structure +### How v2 Works + +#### Conversation management + +v2 maintains full conversation history (including all tool interactions) in an in-memory dict keyed by `conv_id`. Each `/api/chat` request either starts a new conversation or continues an existing one by passing the `response_id` returned in the previous `done` event. ``` -python-react-agent-simple-ui/ -├── .env # Shared env vars (create from env.template) -├── env.template # Environment variable template -├── server/ -│ ├── agent.py # FastAPI server with OpenAI Responses API + MCP -│ └── requirements.txt # Python dependencies -├── client/ -│ ├── package.json # Node dependencies -│ ├── vite.config.js # Vite config with API proxy + envDir -│ ├── index.html # HTML entry point -│ └── src/ -│ ├── main.jsx # React entry point -│ ├── App.jsx # Chat UI component -│ └── App.css # Styles -└── README.md +conversations: { conv_id → [user msg, assistant msg + tool calls, tool results, ...] } ``` -## How It Works +#### Analytical session continuity -### Backend (`server/agent.py`) +When the Claude model calls `create_analysis_session`, the server stores the returned `analytical_session_id` and injects it into the system prompt for all follow-up turns. This lets `send_session_message` / `get_session_updates` calls reference the same ThoughtSpot analytical session across multiple questions. -- Uses the **OpenAI Responses API** with `stream=True` for real-time generation -- Configures the **ThoughtSpot MCP server** as a tool — OpenAI handles MCP communication server-side -- Supports **multi-turn conversations** via `previous_response_id` — the API maintains conversation context automatically -- Streams **SSE events** to the frontend: `delta` (text), `status` (tool call progress), `done` (response complete), `error` +``` +analytical_sessions: { conv_id → analytical_session_id } +``` -### Frontend (`client/src/App.jsx`) +#### Agentic loop -- Reads the SSE stream using `fetch` + `ReadableStream` API -- Renders assistant responses as **markdown** with table and code block support -- Shows **real-time status** while the agent connects to and queries ThoughtSpot -- Tracks `response_id` across turns for seamless multi-turn conversation +The loop inside `agent_loop()` runs until `stop_reason != "tool_use"`: -## Customizations +1. Call `claude_client.messages.stream(...)` with tools +2. Stream text deltas to the queue as SSE `delta` events +3. On `tool_use` stop: execute every tool call via the live MCP session +4. Append `assistant` turn + `tool_result` user turn to `current_messages` +5. Repeat from step 1 -### 1. OpenAI Configuration (`server/agent.py`) +### Customization (v2) -#### Change the model +#### Change the Claude model -Modify the `model` parameter in the `/api/chat` handler: +In `claude_agent_mcp_server_v2.py`, update the `model` parameter in the `claude_client.messages.stream(...)` call: ```python -"model": "gpt-5", # or gpt-5-mini, gpt-4.1-mini, etc. +async with claude_client.messages.stream( + model="claude-sonnet-4-6", # or claude-haiku-4-5-20251001, etc. + ... +) ``` -#### Use Standard OpenAI Instead of Azure +#### System prompt -Update the `.env` variables and client initialization: +Edit `SYSTEM_PROMPT` to change agent behavior — tone, focus, or datasource: ```python -openai_client = OpenAI( - api_key=os.getenv("OPENAI_API_KEY"), +SYSTEM_PROMPT = ( + "You are a helpful data analyst assistant powered by ThoughtSpot. " + # ... + # Uncomment to force a specific datasource for all queries: + # "Use this datasource: cd252e5c-b552-49a8-821d-3eadaa049cca to answer all data questions." ) ``` +#### Restrict available tools + +Set `ALLOWED_TOOLS` to a list of tool names to limit what the agent can call. Set to `None` to allow all tools. + +```python +# Allow all tools (default) +ALLOWED_TOOLS = None + +# Restrict to specific tools +ALLOWED_TOOLS = ["check_connectivity", "create_analysis_session", "send_session_message", "get_session_updates"] +``` + +#### Available ThoughtSpot MCP tools (v2) + +The v2 MCP server exposes an **analytical session workflow** — a three-step loop to query data and retrieve visualizations: + +| Tool | Inputs | Outputs | Description | +|---------------------------|-----------------------------------------------------------------------|-------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `check_connectivity` | — | `success` | Test connectivity and authentication. Call this if other tools are failing to verify the connection. | +| `create_analysis_session` | `data_source_id` _(optional)_ | `analytical_session_id` | Start a new analytical session. Provide `data_source_id` when the user has specified a source; omit it to let the Analytics Agent auto-select. Returns a session ID used in all subsequent calls. Sessions are conversational — ask follow-up questions without creating a new one. | +| `send_session_message` | `analytical_session_id`, `message`, `additional_context` _(optional)_ | `success` | Send a natural-language question to an active session. The response is not returned immediately — poll with `get_session_updates`. Wait for `is_done: true` before sending another message. Use `additional_context` for background the Agent wouldn't otherwise know (e.g. "fiscal year starts in April"). | +| `get_session_updates` | `analytical_session_id` | `session_updates` (list), `is_done` | Poll for the latest incremental updates from the session. Call repeatedly until `is_done: true`. An empty `session_updates` list with `is_done: false` means the Agent is still thinking — keep polling. Each update has a `type` field: | +| `create_dashboard` | `title`, `answers` (list of `answer_id`s), `note_tile` | `link` | Create a dashboard from one or more answers returned by `get_session_updates`. Returns a URL to the created dashboard. | + +**`session_update` type definition** (items in the `session_updates` list): + +| Field | Present when | Description | +|----------------|----------------------------------|---------------------------------------------------------------------------------------------------------------------------| +| `type` | always | `"text"`, `"text_chunk"`, or `"answer"` | +| `text` | `type` is `text` or `text_chunk` | Natural-language message from the Analytics Agent. Concatenate `text_chunk` values in order to form the complete message. | +| `answer_id` | `type` is `answer` | Unique identifier for the answer — use with `create_dashboard`. | +| `answer_title` | `type` is `answer` | Human-readable title describing what the answer shows. | +| `answer_query` | `type` is `answer` | The search query the Analytics Agent used to generate the answer. | +| `iframe_url` | `type` is `answer` | Embeddable URL for rendering the answer as an interactive chart or table in an `" # important! + "Do not ask to create charts, as thoughtspot will already create interactive charts for you." + "Respond in an engaging markdown format, with html tags when needed." + "Keep the response short and to the point." + # "Use this datasource: cd252e5c-b552-49a8-821d-3eadaa049cca to answer all data questions." +) + +# ThoughtSpot MCP Tools (v2): +# To restrict which tools are accessible to the agent, set ALLOWED_TOOLS to a list of tool names. +# Set to None to allow all tools. +# +# The v2 MCP server uses an analytical session workflow: +# 1. check_connectivity - Test connectivity and authentication. No inputs. +# 2. create_analysis_session - Start a session. Optional: data_source_id. +# Returns: analytical_session_id. +# 3. send_session_message - Send a natural-language question to the session. +# Inputs: analytical_session_id, message, additional_context (optional). +# 4. get_session_updates - Poll for incremental updates. Inputs: analytical_session_id. +# Returns: session_updates (list), is_done (bool). +# Poll until is_done=True. Each update has type: text | text_chunk | answer. +# Answer updates include: answer_id, answer_title, answer_query, iframe_url. +# 5. create_dashboard - Create a dashboard from answer IDs. +# Inputs: title, answers (list of answer_ids), note_tile. +# Returns: link. +ALLOWED_TOOLS = None +# ALLOWED_TOOLS = ["check_connectivity", "create_analysis_session", "send_session_message", "get_session_updates", "create_dashboard"] + +# In-memory conversation store: conv_id -> full message history (including tool interactions) +conversations: dict[str, list] = {} + +# 1:1 mapping: conv_id -> analytical_session_id returned by create_analysis_session tool. +# Passed to Claude via system prompt so follow-up send_session_message / get_session_updates +# calls use the same ThoughtSpot analytical session. +analytical_sessions: dict[str, str] = {} + + +class ChatRequest(BaseModel): + message: str + response_id: str | None = None + + +def format_sse(data: dict) -> str: + return f"data: {json.dumps(data)}\n\n" + + +async def agent_loop(messages: list, queue: asyncio.Queue, conv_id: str) -> None: + """ + Client-side agentic loop. Connects to the ThoughtSpot MCP server directly + (with Authorization + x-ts-host headers), fetches tool definitions, then + runs the Claude tool-use loop until the model stops calling tools. + Puts SSE event dicts into queue for streaming to the frontend. + """ + try: + headers = dict(MCP_HEADERS) + + print(f"[MCP] Connecting to {MCP_URL}") + async with streamablehttp_client(MCP_URL, headers=headers) as (read, write, _): + async with ClientSession(read, write) as session: + print("[MCP] Initializing session...") + await session.initialize() + print("[MCP] Session initialized. Fetching tools...") + + # Fetch tool definitions from ThoughtSpot MCP server + tools_result = await session.list_tools() + print(f"[MCP] Got {len(tools_result.tools)} tools") + available_tools = tools_result.tools + # Optionally filter tools based on ALLOWED_TOOLS + if ALLOWED_TOOLS is not None: + available_tools = [t for t in available_tools if t.name in ALLOWED_TOOLS] + + # Convert MCP tool definitions to Anthropic format + anthropic_tools = [ + { + "name": t.name, + "description": t.description or "", + "input_schema": t.inputSchema, + } + for t in available_tools + ] + + current_messages = messages[:] + final_text_parts: list[str] = [] + + # Build system prompt, injecting analytical_session_id for follow-up turns + system = SYSTEM_PROMPT + existing_session_id = analytical_sessions.get(conv_id) + if existing_session_id: + system += ( + f"\n\nActive ThoughtSpot analytical session ID: {existing_session_id}. " + "Use this ID when calling send_session_message or get_session_updates " + "so follow-up questions continue in the same session." + ) + + while True: + async with claude_client.messages.stream( + model="claude-opus-4-6", + max_tokens=16000, + system=system, + messages=current_messages, + tools=anthropic_tools, + ) as stream: + async for event in stream: + t = getattr(event, "type", None) + if t == "content_block_start": + if getattr(event.content_block, "type", None) == "tool_use": + await queue.put({"type": "status", "message": "Querying ThoughtSpot..."}) + elif t == "content_block_delta": + delta = event.delta + if getattr(delta, "type", None) == "text_delta": + await queue.put({"type": "delta", "text": delta.text}) + final_text_parts.append(delta.text) + + final_message = await stream.get_final_message() + + if final_message.stop_reason != "tool_use": + break + + # Execute each tool call via MCP client (headers are set on the session) + tool_results = [] + for block in final_message.content: + if getattr(block, "type", None) == "tool_use": + try: + mcp_result = await session.call_tool(block.name, block.input) + print(f"[MCP] Tool {block.name} and input {block.input} returned: {mcp_result}") + result_text = " ".join( + getattr(c, "text", str(c)) for c in mcp_result.content + ) if mcp_result.content else "" + is_error = getattr(mcp_result, "isError", False) + + # Store analytical_session_id (1:1 with conv_id) so follow-up + # requests can reference the same ThoughtSpot session. + if block.name == "create_analysis_session" and not analytical_sessions.get(conv_id): + try: + sid = json.loads(result_text).get("analytical_session_id") + if sid: + analytical_sessions[conv_id] = sid + print(f"[MCP] Stored analytical_session_id for conv {conv_id}: {sid}") + except Exception: + pass + + except McpError as e: + print(f"[MCP] Tool {block.name} failed: {e}") + result_text = f"Tool call failed: {e}" + is_error = True + tool_results.append({ + "type": "tool_result", + "tool_use_id": block.id, + "content": result_text, + "is_error": is_error, + }) + + # Append assistant turn + tool results and continue the loop + current_messages = current_messages + [ + {"role": "assistant", "content": final_message.content}, + {"role": "user", "content": tool_results}, + ] + final_text_parts = [] # reset; next iteration may stream more text + + # Persist full conversation history (including tool interactions) so + # follow-up turns have complete context (e.g. analytical_session_id in prior results). + conversations[conv_id] = current_messages + [ + {"role": "assistant", "content": final_message.content} + ] + await queue.put({"type": "done", "response_id": conv_id}) + + except BaseException as e: + traceback.print_exc() + # Recursively unwrap ExceptionGroup to get the root cause + err = e + while hasattr(err, "exceptions") and getattr(err, "exceptions", None): + err = err.exceptions[0] + await queue.put({"type": "error", "message": f"{type(err).__name__}: {err}"}) + + +@app.post("/api/chat") +async def chat(request: ChatRequest): + conv_id = request.response_id or str(uuid.uuid4()) + print(f"[Chat] Received message for conv_id {conv_id}: {request.response_id}") + history = conversations.get(conv_id, []) + messages = history + [{"role": "user", "content": request.message}] + + queue: asyncio.Queue = asyncio.Queue() + asyncio.create_task(agent_loop(messages, queue, conv_id)) + + async def event_stream() -> AsyncGenerator[str, None]: + while True: + item = await queue.get() + yield format_sse(item) + if item.get("type") in ("done", "error"): + break + + return StreamingResponse(event_stream(), media_type="text/event-stream") + + +@app.get("/api/health") +async def health(): + return {"status": "ok"} diff --git a/mcp/python-react-agent-simple-ui/server/requirements.txt b/mcp/python-react-agent-simple-ui/server/requirements.txt index 9d68e1e..4da0eef 100644 --- a/mcp/python-react-agent-simple-ui/server/requirements.txt +++ b/mcp/python-react-agent-simple-ui/server/requirements.txt @@ -1,4 +1,7 @@ openai>=2.4.0 +anthropic>=0.52.0 fastapi>=0.115.0 uvicorn>=0.34.0 python-dotenv>=1.1.1 +mcp>=1.9.0 +httpx>=0.27.0