Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
6a3cc8a
fix(ai-proxy): validate model tool support at Router init (fail fast)
Feb 5, 2026
c9329d8
test(ai-proxy): add end-to-end integration tests with real OpenAI API
Feb 4, 2026
7779fe7
test(ai-proxy): add comprehensive integration tests for production re…
Feb 4, 2026
041412c
fix(ai-proxy): disable langchain retries by default
Feb 4, 2026
af3ef2e
test(ai-proxy): improve integration test quality based on PR review
Feb 4, 2026
264e0f2
test(ai-proxy): fix tool_choice with specific function name test
Feb 4, 2026
90c3bf8
refactor(ai-proxy): use generic error message for AINotConfiguredError
Feb 4, 2026
21910c4
fix(ai-proxy): properly close MCP server in tests to avoid forceExit
Feb 4, 2026
354ea8f
test(ai-proxy): reduce test timeouts from 30s to 10s
Feb 4, 2026
f854570
feat(ai-proxy): add Anthropic LLM provider support
Jan 23, 2026
f3f81c6
fix(ai-proxy): handle null content and JSON parse errors in Anthropic…
Jan 23, 2026
32a3933
test(ai-proxy): add Anthropic integration tests
Feb 4, 2026
f926352
refactor(ai-proxy): consolidate LLM integration tests
Feb 4, 2026
09873e8
fix(ai-proxy): fix lint errors in provider-dispatcher
Feb 4, 2026
916d802
test(ai-proxy): increase timeout for MCP tool enrichment test
Feb 4, 2026
18e2629
test(ai-proxy): add model compatibility tests for all OpenAI and Anth…
Feb 4, 2026
6db6a4b
fix: package
Feb 5, 2026
f371caf
refactor(ai-proxy): use Anthropic SDK's Model type instead of custom …
Feb 5, 2026
d76b087
fix(ai-proxy): fix ANTHROPIC_MODELS_TO_TEST typo
Feb 5, 2026
c2cc1fe
refactor(ai-proxy): move isModelSupportingTools to router.ts
Feb 5, 2026
565361c
refactor(ai-proxy): extract isModelSupportingTools tests to dedicated…
Feb 5, 2026
b0f978a
fix(ai-proxy): improve Anthropic error handling and input validation
Feb 6, 2026
b7956eb
revert(ai-proxy): restore llm.integration.test.ts to main version
Feb 6, 2026
4bd08ce
test(ai-proxy): add Anthropic model compatibility integration tests
Feb 7, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ jobs:
run: yarn workspace @forestadmin/ai-proxy test --testPathPattern='llm.integration'
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}

send-coverage:
name: Send Coverage
Expand Down
8 changes: 7 additions & 1 deletion packages/_example/src/forest/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -94,5 +94,11 @@ export default function makeAgent() {
.customizeCollection('post', customizePost)
.customizeCollection('comment', customizeComment)
.customizeCollection('review', customizeReview)
.customizeCollection('sales', customizeSales);
.customizeCollection('sales', customizeSales)
.addAi({
model: 'gpt-4o',
provider: 'openai',
name: 'test',
apiKey: process.env.OPENAI_API_KEY,
});
}
4 changes: 4 additions & 0 deletions packages/ai-proxy/jest.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,8 @@ export default {
collectCoverageFrom: ['<rootDir>/src/**/*.ts'],
testMatch: ['<rootDir>/test/**/*.test.ts'],
setupFiles: ['<rootDir>/test/setup-env.ts'],
// Fix module resolution for @anthropic-ai/sdk submodules (peer dep of @langchain/anthropic)
moduleNameMapper: {
'^@anthropic-ai/sdk/(.*)$': '<rootDir>/../../node_modules/@anthropic-ai/sdk/$1',
},
};
1 change: 1 addition & 0 deletions packages/ai-proxy/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
},
"dependencies": {
"@forestadmin/datasource-toolkit": "1.50.1",
"@langchain/anthropic": "1.3.14",
"@langchain/community": "1.1.4",
"@langchain/core": "1.1.15",
"@langchain/langgraph": "^1.1.0",
Expand Down
7 changes: 7 additions & 0 deletions packages/ai-proxy/src/errors.ts
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,13 @@ export class OpenAIUnprocessableError extends AIUnprocessableError {
}
}

export class AnthropicUnprocessableError extends AIUnprocessableError {
constructor(message: string) {
super(message);
this.name = 'AnthropicError';
}
}

export class AIToolUnprocessableError extends AIUnprocessableError {
constructor(message: string) {
super(message);
Expand Down
223 changes: 214 additions & 9 deletions packages/ai-proxy/src/provider-dispatcher.ts
Original file line number Diff line number Diff line change
@@ -1,17 +1,31 @@
import type { AiConfiguration, ChatCompletionResponse, ChatCompletionTool } from './provider';
import type {
AiConfiguration,
ChatCompletionResponse,
ChatCompletionTool,
ChatCompletionToolChoice,
} from './provider';
import type { RemoteTools } from './remote-tools';
import type { DispatchBody } from './schemas/route';
import type { BaseMessageLike } from '@langchain/core/messages';
import type { BaseMessage, BaseMessageLike } from '@langchain/core/messages';

import { ChatAnthropic } from '@langchain/anthropic';
import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from '@langchain/core/messages';
import { convertToOpenAIFunction } from '@langchain/core/utils/function_calling';
import { ChatOpenAI } from '@langchain/openai';

import { AINotConfiguredError, OpenAIUnprocessableError } from './errors';
import {
AIBadRequestError,
AINotConfiguredError,
AnthropicUnprocessableError,
OpenAIUnprocessableError,
} from './errors';

// Re-export types for consumers
export type {
AiConfiguration,
AiProvider,
AnthropicConfiguration,
AnthropicModel,
BaseAiConfiguration,
ChatCompletionMessage,
ChatCompletionResponse,
Expand All @@ -21,8 +35,24 @@
} from './provider';
export type { DispatchBody } from './schemas/route';

interface OpenAIMessage {
role: 'system' | 'user' | 'assistant' | 'tool';
content: string | null;
tool_calls?: Array<{
id: string;
function: {
name: string;
arguments: string;
};
}>;
tool_call_id?: string;
}
export class ProviderDispatcher {
private readonly chatModel: ChatOpenAI | null = null;
private readonly openaiModel: ChatOpenAI | null = null;

private readonly anthropicModel: ChatAnthropic | null = null;

private readonly modelName: string | null = null;

private readonly remoteTools: RemoteTools;

Expand All @@ -31,19 +61,35 @@

if (configuration?.provider === 'openai') {
const { provider, name, ...chatOpenAIOptions } = configuration;
this.chatModel = new ChatOpenAI({
this.openaiModel = new ChatOpenAI({
maxRetries: 0, // No retries by default - this lib is a passthrough
...chatOpenAIOptions,
__includeRawResponse: true,
});
} else if (configuration?.provider === 'anthropic') {
const { provider, name, model, ...clientOptions } = configuration;
this.anthropicModel = new ChatAnthropic({
maxRetries: 0, // No retries by default - this lib is a passthrough
...clientOptions,
model,
});
this.modelName = model;
}
}

async dispatch(body: DispatchBody): Promise<ChatCompletionResponse> {
if (!this.chatModel) {
throw new AINotConfiguredError();
if (this.openaiModel) {
return this.dispatchOpenAI(body);
}

if (this.anthropicModel) {
return this.dispatchAnthropic(body);
}

throw new AINotConfiguredError();
}

private async dispatchOpenAI(body: DispatchBody): Promise<ChatCompletionResponse> {
const {
tools,
messages,
Expand All @@ -53,11 +99,11 @@

const enrichedTools = this.enrichToolDefinitions(tools);
const model = enrichedTools?.length
? this.chatModel.bindTools(enrichedTools, {
? this.openaiModel!.bindTools(enrichedTools, {

Check warning on line 102 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion

Check warning on line 102 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion
tool_choice: toolChoice,
parallel_tool_calls: parallelToolCalls,
})
: this.chatModel;
: this.openaiModel!;

Check warning on line 106 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion

Check warning on line 106 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion

try {
const response = await model.invoke(messages as BaseMessageLike[]);
Expand Down Expand Up @@ -89,6 +135,165 @@
}
}

private async dispatchAnthropic(body: DispatchBody): Promise<ChatCompletionResponse> {
const { tools, messages, tool_choice: toolChoice } = body;

// Convert messages outside try-catch so input validation errors propagate directly
const langChainMessages = this.convertMessagesToLangChain(messages as OpenAIMessage[]);
const enhancedTools = tools ? this.enrichToolDefinitions(tools) : undefined;

try {
let response: AIMessage;

if (enhancedTools?.length) {
const langChainTools = this.convertToolsToLangChain(enhancedTools);
const clientWithTools = this.anthropicModel!.bindTools(langChainTools, {

Check warning on line 150 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion

Check warning on line 150 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion
tool_choice: this.convertToolChoiceToLangChain(toolChoice),
});
response = await clientWithTools.invoke(langChainMessages);
} else {
response = await this.anthropicModel!.invoke(langChainMessages);

Check warning on line 155 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion

Check warning on line 155 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion
}

return this.convertLangChainResponseToOpenAI(response);
} catch (error) {
if (error instanceof AnthropicUnprocessableError) throw error;

const err = error as Error & { status?: number };

if (err.status === 429) {
throw new AnthropicUnprocessableError(`Rate limit exceeded: ${err.message}`);
}

if (err.status === 401) {
throw new AnthropicUnprocessableError(`Authentication failed: ${err.message}`);
}

throw new AnthropicUnprocessableError(`Error while calling Anthropic: ${err.message}`);
}
}

private convertMessagesToLangChain(messages: OpenAIMessage[]): BaseMessage[] {
return messages.map(msg => {
switch (msg.role) {
case 'system':
return new SystemMessage(msg.content || '');
case 'user':
return new HumanMessage(msg.content || '');
case 'assistant':
if (msg.tool_calls) {
return new AIMessage({
content: msg.content || '',
tool_calls: msg.tool_calls.map(tc => ({
id: tc.id,
name: tc.function.name,
args: ProviderDispatcher.parseToolArguments(
tc.function.name,
tc.function.arguments,
),
})),
});
}

return new AIMessage(msg.content || '');
case 'tool':
if (!msg.tool_call_id) {
throw new AIBadRequestError('Tool message is missing required "tool_call_id" field.');
}

return new ToolMessage({
content: msg.content || '',
tool_call_id: msg.tool_call_id,
});
default:
throw new AIBadRequestError(
`Unsupported message role '${msg.role}'. Expected: system, user, assistant, or tool.`,
);
}
});
}

private static parseToolArguments(toolName: string, args: string): Record<string, unknown> {
try {
return JSON.parse(args);
} catch {
throw new AIBadRequestError(
`Invalid JSON in tool_calls arguments for tool '${toolName}': ${args}`,
);
}
}

private convertToolsToLangChain(tools: ChatCompletionTool[]): Array<{
type: 'function';
function: { name: string; description?: string; parameters?: Record<string, unknown> };
}> {
return tools
.filter((tool): tool is ChatCompletionTool & { type: 'function' } => tool.type === 'function')
.map(tool => ({
type: 'function' as const,
function: {
name: tool.function.name,
description: tool.function.description,
parameters: tool.function.parameters as Record<string, unknown> | undefined,
},
}));
}

private convertToolChoiceToLangChain(
toolChoice: ChatCompletionToolChoice | undefined,
): 'auto' | 'any' | 'none' | { type: 'tool'; name: string } | undefined {
if (!toolChoice) return undefined;
if (toolChoice === 'auto') return 'auto';
if (toolChoice === 'none') return 'none';
if (toolChoice === 'required') return 'any';

if (typeof toolChoice === 'object' && toolChoice.type === 'function') {
return { type: 'tool', name: toolChoice.function.name };
}

return undefined;
}

private convertLangChainResponseToOpenAI(response: AIMessage): ChatCompletionResponse {
const toolCalls = response.tool_calls?.map(tc => ({
id: tc.id || `call_${Date.now()}`,
type: 'function' as const,
function: {
name: tc.name,
arguments: JSON.stringify(tc.args),
},
}));

const usageMetadata = response.usage_metadata as
| { input_tokens?: number; output_tokens?: number; total_tokens?: number }
| undefined;

return {
id: response.id || `msg_${Date.now()}`,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
model: this.modelName!,

Check warning on line 275 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion

Check warning on line 275 in packages/ai-proxy/src/provider-dispatcher.ts

View workflow job for this annotation

GitHub Actions / Linting & Testing (ai-proxy)

Forbidden non-null assertion
choices: [
{
index: 0,
message: {
role: 'assistant',
content: typeof response.content === 'string' ? response.content : null,
refusal: null,
tool_calls: toolCalls?.length ? toolCalls : undefined,
},
finish_reason: toolCalls?.length ? 'tool_calls' : 'stop',
logprobs: null,
},
],
usage: {
prompt_tokens: usageMetadata?.input_tokens ?? 0,
completion_tokens: usageMetadata?.output_tokens ?? 0,
total_tokens: usageMetadata?.total_tokens ?? 0,
},
};
}

private enrichToolDefinitions(tools?: ChatCompletionTool[]) {
if (!tools || !Array.isArray(tools)) return tools;

Expand Down
22 changes: 19 additions & 3 deletions packages/ai-proxy/src/provider.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import type { AnthropicInput, AnthropicMessagesModelId } from '@langchain/anthropic';
import type { ChatOpenAIFields, OpenAIChatModelId } from '@langchain/openai';
import type OpenAI from 'openai';

Expand All @@ -7,8 +8,12 @@ export type ChatCompletionMessage = OpenAI.Chat.Completions.ChatCompletionMessag
export type ChatCompletionTool = OpenAI.Chat.Completions.ChatCompletionTool;
export type ChatCompletionToolChoice = OpenAI.Chat.Completions.ChatCompletionToolChoiceOption;

// Anthropic model type from langchain (auto-updated when SDK updates)
// Includes known models for autocomplete + allows custom strings
export type AnthropicModel = AnthropicMessagesModelId;

// AI Provider types
export type AiProvider = 'openai';
export type AiProvider = 'openai' | 'anthropic';

/**
* Base configuration common to all AI providers.
Expand All @@ -24,12 +29,23 @@ export type BaseAiConfiguration = {
* OpenAI-specific configuration.
* Extends base with all ChatOpenAI options (temperature, maxTokens, configuration, etc.)
*/
export type OpenAiConfiguration = BaseAiConfiguration &
export type OpenAiConfiguration = Omit<BaseAiConfiguration, 'model'> &
Omit<ChatOpenAIFields, 'model' | 'apiKey'> & {
provider: 'openai';
// OpenAIChatModelId provides autocomplete for known models (gpt-4o, gpt-4-turbo, etc.)
// (string & NonNullable<unknown>) allows custom model strings without losing autocomplete
model: OpenAIChatModelId | (string & NonNullable<unknown>);
};

export type AiConfiguration = OpenAiConfiguration;
/**
* Anthropic-specific configuration.
* Extends base with all ChatAnthropic options (temperature, maxTokens, etc.)
* Supports both `apiKey` (unified) and `anthropicApiKey` (native) for flexibility.
*/
export type AnthropicConfiguration = BaseAiConfiguration &
Omit<AnthropicInput, 'model' | 'apiKey'> & {
provider: 'anthropic';
model: AnthropicModel;
};

export type AiConfiguration = OpenAiConfiguration | AnthropicConfiguration;
2 changes: 1 addition & 1 deletion packages/ai-proxy/src/router.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ export class Router {

private validateConfigurations(): void {
for (const config of this.aiConfigurations) {
if (!isModelSupportingTools(config.model)) {
if (config.provider === 'openai' && !isModelSupportingTools(config.model)) {
throw new AIModelNotSupportedError(config.model);
}
}
Expand Down
1 change: 1 addition & 0 deletions packages/ai-proxy/test/.env-test.example
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
# This file is used for integration tests

OPENAI_API_KEY=sk-your-openai-api-key-here
ANTHROPIC_API_KEY=sk-ant-your-anthropic-api-key-here
Loading
Loading