From 090ad8f9ce7b279dad1117799ccca2e10a0ab490 Mon Sep 17 00:00:00 2001 From: Sreeram Sreedhar Date: Fri, 3 Apr 2026 21:38:41 -0700 Subject: [PATCH 1/2] refactor mastra to object-based API (v2.0.0) --- apps/docs/integrations/mastra.mdx | 57 +- packages/tools/README.md | 82 ++- packages/tools/package.json | 2 +- packages/tools/src/mastra/processor.ts | 134 ++-- packages/tools/src/mastra/types.ts | 10 +- packages/tools/src/mastra/wrapper.ts | 14 +- packages/tools/src/shared/cache.ts | 6 +- packages/tools/src/shared/types.ts | 4 +- .../tools/test/mastra/integration.test.ts | 535 +++++++-------- packages/tools/test/mastra/unit.test.ts | 611 ++++++++---------- 10 files changed, 708 insertions(+), 747 deletions(-) diff --git a/apps/docs/integrations/mastra.mdx b/apps/docs/integrations/mastra.mdx index 0874c763e..972bd1871 100644 --- a/apps/docs/integrations/mastra.mdx +++ b/apps/docs/integrations/mastra.mdx @@ -34,11 +34,11 @@ const agent = new Agent(withSupermemory( model: openai("gpt-4o"), instructions: "You are a helpful assistant.", }, - "user-123", // containerTag - scopes memories to this user + "user-123", // containerTag - scopes memories to this user + "conv-456", // conversationId - groups messages into the same document { mode: "full", addMemory: "always", - threadId: "conv-456", } )) @@ -52,10 +52,8 @@ const response = await agent.generate("What do you know about me?") const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), ... }, "user-123", - { - addMemory: "always", - threadId: "conv-456" // Required for conversation grouping - } + "conv-456", + { addMemory: "always" } )) ``` @@ -100,7 +98,6 @@ sequenceDiagram | `baseUrl` | `string` | `https://api.supermemory.ai` | Custom API endpoint | | `mode` | `"profile" \| "query" \| "full"` | `"profile"` | Memory search mode | | `addMemory` | `"always" \| "never"` | `"never"` | Auto-save conversations | -| `threadId` | `string` | - | Conversation ID for grouping messages | | `verbose` | `boolean` | `false` | Enable debug logging | | `promptTemplate` | `function` | - | Custom memory formatting | @@ -111,19 +108,19 @@ sequenceDiagram **Profile Mode (Default)** - Retrieves the user's complete profile without query-based filtering: ```typescript -const agent = new Agent(withSupermemory(config, "user-123", { mode: "profile" })) +const agent = new Agent(withSupermemory(config, "user-123", "conv-456", { mode: "profile" })) ``` **Query Mode** - Searches memories based on the user's message: ```typescript -const agent = new Agent(withSupermemory(config, "user-123", { mode: "query" })) +const agent = new Agent(withSupermemory(config, "user-123", "conv-456", { mode: "query" })) ``` **Full Mode** - Combines profile AND query-based search for maximum context: ```typescript -const agent = new Agent(withSupermemory(config, "user-123", { mode: "full" })) +const agent = new Agent(withSupermemory(config, "user-123", "conv-456", { mode: "full" })) ### Mode Comparison @@ -137,16 +134,14 @@ const agent = new Agent(withSupermemory(config, "user-123", { mode: "full" })) ## Saving Conversations -Enable automatic conversation saving with `addMemory: "always"`. A `threadId` is required to group messages: +Enable automatic conversation saving with `addMemory: "always"`. The `conversationId` parameter groups messages into the same document: ```typescript const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, "user-123", - { - addMemory: "always", - threadId: "conv-456", - } + "conv-456", + { addMemory: "always" } )) // All messages in this conversation are saved @@ -154,10 +149,6 @@ await agent.generate("I prefer TypeScript over JavaScript") await agent.generate("My favorite framework is Next.js") ``` - - Without a `threadId`, the output processor will log a warning and skip saving. Always provide a `threadId` when using `addMemory: "always"`. - - --- ## Custom Prompt Templates @@ -183,6 +174,7 @@ const claudePrompt = (data: MemoryPromptData) => ` const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, "user-123", + "conv-456", { mode: "full", promptTemplate: claudePrompt, @@ -210,7 +202,7 @@ const agent = new Agent({ name: "My Assistant", model: openai("gpt-4o"), inputProcessors: [ - createSupermemoryProcessor("user-123", { + createSupermemoryProcessor("user-123", "conv-456", { mode: "full", verbose: true, }), @@ -232,9 +224,8 @@ const agent = new Agent({ name: "My Assistant", model: openai("gpt-4o"), outputProcessors: [ - createSupermemoryOutputProcessor("user-123", { + createSupermemoryOutputProcessor("user-123", "conv-456", { addMemory: "always", - threadId: "conv-456", }), ], }) @@ -249,10 +240,9 @@ import { Agent } from "@mastra/core/agent" import { createSupermemoryProcessors } from "@supermemory/tools/mastra" import { openai } from "@ai-sdk/openai" -const { input, output } = createSupermemoryProcessors("user-123", { +const { input, output } = createSupermemoryProcessors("user-123", "conv-456", { mode: "full", addMemory: "always", - threadId: "conv-456", verbose: true, }) @@ -269,7 +259,7 @@ const agent = new Agent({ ## Using RequestContext -Mastra's `RequestContext` can provide `threadId` dynamically: +Mastra's `RequestContext` can provide a dynamic conversation ID override: ```typescript import { Agent } from "@mastra/core/agent" @@ -280,14 +270,14 @@ import { openai } from "@ai-sdk/openai" const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, "user-123", + "default-conv-id", { mode: "full", addMemory: "always", - // threadId not set - will use RequestContext } )) -// Set threadId dynamically via RequestContext +// Override conversationId dynamically via RequestContext const ctx = new RequestContext() ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-id") @@ -304,6 +294,7 @@ Enable detailed logging for debugging: const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, "user-123", + "conv-456", { verbose: true } )) @@ -330,7 +321,8 @@ const agent = new Agent(withSupermemory( inputProcessors: [myLoggingProcessor], outputProcessors: [myAnalyticsProcessor], }, - "user-123" + "user-123", + "conv-456" )) ``` @@ -346,6 +338,7 @@ Enhances a Mastra agent config with memory capabilities. function withSupermemory( config: T, containerTag: string, + conversationId: string, options?: SupermemoryMastraOptions ): T ``` @@ -353,6 +346,7 @@ function withSupermemory( **Parameters:** - `config` - The Mastra agent configuration object - `containerTag` - User/container ID for scoping memories +- `conversationId` - Conversation ID to group messages into the same document - `options` - Configuration options **Returns:** Enhanced config with Supermemory processors injected @@ -364,6 +358,7 @@ Creates an input processor for memory injection. ```typescript function createSupermemoryProcessor( containerTag: string, + conversationId: string, options?: SupermemoryMastraOptions ): SupermemoryInputProcessor ``` @@ -375,6 +370,7 @@ Creates an output processor for conversation saving. ```typescript function createSupermemoryOutputProcessor( containerTag: string, + conversationId: string, options?: SupermemoryMastraOptions ): SupermemoryOutputProcessor ``` @@ -386,6 +382,7 @@ Creates both processors with shared configuration. ```typescript function createSupermemoryProcessors( containerTag: string, + conversationId: string, options?: SupermemoryMastraOptions ): { input: SupermemoryInputProcessor @@ -401,7 +398,6 @@ interface SupermemoryMastraOptions { baseUrl?: string mode?: "profile" | "query" | "full" addMemory?: "always" | "never" - threadId?: string verbose?: boolean promptTemplate?: (data: MemoryPromptData) => string } @@ -423,13 +419,14 @@ Processors gracefully handle errors without breaking the agent: - **API errors** - Logged and skipped; agent continues without memories - **Missing API key** - Throws immediately with helpful error message -- **Missing threadId** - Warns in console; skips saving +- **Empty conversationId** - Throws immediately with helpful `[supermemory]`-prefixed error message ```typescript // Missing API key throws immediately const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, "user-123", + "conv-456", { apiKey: undefined } // Will check SUPERMEMORY_API_KEY env )) // Error: SUPERMEMORY_API_KEY is not set diff --git a/packages/tools/README.md b/packages/tools/README.md index 2d03411d3..c35d223b5 100644 --- a/packages/tools/README.md +++ b/packages/tools/README.md @@ -409,10 +409,42 @@ const addResult = await tools.addMemory({ ### Mastra Usage +> **⚠️ Breaking Change in v2.0.0**: The Mastra integration API has been updated to use object-based parameters instead of positional parameters for better clarity. See the migration guide below. + Add persistent memory to [Mastra](https://mastra.ai) AI agents. The integration provides processors that: - **Input Processor**: Fetches relevant memories and injects them into the system prompt before LLM calls - **Output Processor**: Optionally saves conversations to Supermemory after responses +#### Migration from v1.x to v2.0.0 + +**v1.x (old API with positional parameters):** +```typescript +// Old API - DEPRECATED +withSupermemory(config, "user-123", "conv-456", { mode: "full" }) +new SupermemoryInputProcessor("user-123", "conv-456", { mode: "full" }) +createSupermemoryProcessors("user-123", "conv-456", { mode: "full" }) +``` + +**v2.0.0 (new API with object parameters):** +```typescript +// New API - clearer with explicit key-value pairs +withSupermemory(config, { + containerTag: "user-123", + conversationId: "conv-456", + mode: "full" +}) +new SupermemoryInputProcessor({ + containerTag: "user-123", + conversationId: "conv-456", + mode: "full" +}) +createSupermemoryProcessors({ + containerTag: "user-123", + conversationId: "conv-456", + mode: "full" +}) +``` + #### Quick Start with `withSupermemory` Wrapper The simplest way to add memory to a Mastra agent - wrap your config before creating the Agent: @@ -430,11 +462,11 @@ const agent = new Agent(withSupermemory( model: openai("gpt-4o"), instructions: "You are a helpful assistant.", }, - "user-123", // containerTag - scopes memories to this user { + containerTag: "user-123", // scopes memories to this user + conversationId: "conv-456", // groups messages into the same document mode: "full", addMemory: "always", - threadId: "conv-456", } )) @@ -451,10 +483,11 @@ import { Agent } from "@mastra/core/agent" import { createSupermemoryProcessors } from "@supermemory/tools/mastra" import { openai } from "@ai-sdk/openai" -const { input, output } = createSupermemoryProcessors("user-123", { +const { input, output } = createSupermemoryProcessors({ + containerTag: "user-123", + conversationId: "conv-456", mode: "full", addMemory: "always", - threadId: "conv-456", verbose: true, // Enable logging }) @@ -481,12 +514,13 @@ import { openai } from "@ai-sdk/openai" async function main() { const userId = "user-alex-123" - const threadId = `thread-${Date.now()}` + const conversationId = `conv-${Date.now()}` - const { input, output } = createSupermemoryProcessors(userId, { + const { input, output } = createSupermemoryProcessors({ + containerTag: userId, + conversationId: conversationId, mode: "profile", // Fetch user profile memories addMemory: "always", // Save all conversations - threadId, verbose: true, }) @@ -522,13 +556,25 @@ main() ```typescript // Profile mode - good for general personalization -const { input } = createSupermemoryProcessors("user-123", { mode: "profile" }) +const { input } = createSupermemoryProcessors({ + containerTag: "user-123", + conversationId: "conv-456", + mode: "profile" +}) // Query mode - good for specific lookups -const { input } = createSupermemoryProcessors("user-123", { mode: "query" }) +const { input } = createSupermemoryProcessors({ + containerTag: "user-123", + conversationId: "conv-456", + mode: "query" +}) // Full mode - comprehensive context -const { input } = createSupermemoryProcessors("user-123", { mode: "full" }) +const { input } = createSupermemoryProcessors({ + containerTag: "user-123", + conversationId: "conv-456", + mode: "full" +}) ``` #### Custom Prompt Templates @@ -545,25 +591,28 @@ ${data.generalSearchMemories} `.trim() -const { input, output } = createSupermemoryProcessors("user-123", { +const { input, output } = createSupermemoryProcessors({ + containerTag: "user-123", + conversationId: "conv-456", mode: "full", promptTemplate: customTemplate, }) ``` -#### Using RequestContext for Dynamic Thread IDs +#### Using RequestContext for Dynamic Conversation IDs -Instead of hardcoding `threadId`, use Mastra's RequestContext for dynamic values: +Mastra's RequestContext can override the `conversationId` dynamically per request: ```typescript import { Agent } from "@mastra/core/agent" import { RequestContext, MASTRA_THREAD_ID_KEY } from "@mastra/core/request-context" import { createSupermemoryProcessors } from "@supermemory/tools/mastra" -const { input, output } = createSupermemoryProcessors("user-123", { +const { input, output } = createSupermemoryProcessors({ + containerTag: "user-123", + conversationId: "default-conv-id", mode: "profile", addMemory: "always", - // threadId not set here - will be read from RequestContext }) const agent = new Agent({ @@ -574,7 +623,7 @@ const agent = new Agent({ outputProcessors: [output], }) -// Set threadId dynamically per request +// Override conversationId dynamically per request const ctx = new RequestContext() ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-123") @@ -589,7 +638,6 @@ interface SupermemoryMastraOptions { baseUrl?: string // Custom API endpoint mode?: "profile" | "query" | "full" // Memory search mode (default: "profile") addMemory?: "always" | "never" // Auto-save conversations (default: "never") - threadId?: string // Conversation ID for grouping messages verbose?: boolean // Enable debug logging (default: false) promptTemplate?: (data: MemoryPromptData) => string // Custom memory formatting } diff --git a/packages/tools/package.json b/packages/tools/package.json index 8d192aba6..80004a6d1 100644 --- a/packages/tools/package.json +++ b/packages/tools/package.json @@ -1,7 +1,7 @@ { "name": "@supermemory/tools", "type": "module", - "version": "1.4.01", + "version": "2.0.0", "description": "Memory tools for AI SDK and OpenAI function calling with supermemory", "scripts": { "build": "tsdown", diff --git a/packages/tools/src/mastra/processor.ts b/packages/tools/src/mastra/processor.ts index d2975127d..d6cf08554 100644 --- a/packages/tools/src/mastra/processor.ts +++ b/packages/tools/src/mastra/processor.ts @@ -43,11 +43,11 @@ import type { */ interface ProcessorContext { containerTag: string + conversationId: string apiKey: string baseUrl: string mode: MemoryMode addMemory: "always" | "never" - threadId?: string logger: Logger promptTemplate?: PromptTemplate memoryCache: MemoryCache @@ -57,20 +57,41 @@ interface ProcessorContext { * Creates the shared processor context from options. */ function createProcessorContext( - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): ProcessorContext { + const { containerTag, conversationId } = options + + if ( + !containerTag || + typeof containerTag !== "string" || + !containerTag.trim() + ) { + throw new Error( + "[supermemory] containerTag is required and must be a non-empty string. " + + "Pass it in the options object: new SupermemoryInputProcessor({ containerTag: 'user-123', conversationId: 'conv-456' })", + ) + } + + if (typeof conversationId !== "string" || !conversationId.trim()) { + throw new Error( + "[supermemory] conversationId is required and must be a non-empty string. " + + "Pass a unique identifier (e.g., session ID, chat ID) in the options object. " + + "This ensures messages are grouped into the same document for a conversation. " + + "Example: new SupermemoryInputProcessor({ containerTag: 'user-123', conversationId: 'conversation-456' })", + ) + } + const apiKey = validateApiKey(options.apiKey) const baseUrl = normalizeBaseUrl(options.baseUrl) const logger = createLogger(options.verbose ?? false) return { containerTag, + conversationId, apiKey, baseUrl, mode: options.mode ?? "profile", addMemory: options.addMemory ?? "never", - threadId: options.threadId, logger, promptTemplate: options.promptTemplate, memoryCache: new MemoryCache(), @@ -78,19 +99,25 @@ function createProcessorContext( } /** - * Gets the effective threadId from options or RequestContext. + * Gets the effective conversationId from context or RequestContext. + * + * Priority order: + * 1. RequestContext + * 2. Default conversationId from processor options */ -function getEffectiveThreadId( +function getEffectiveConversationId( ctx: ProcessorContext, requestContext?: RequestContext, -): string | undefined { - if (ctx.threadId) { - return ctx.threadId - } +): string { + // Check RequestContext FIRST to allow dynamic per-request override if (requestContext) { - return requestContext.get(MASTRA_THREAD_ID_KEY) as string | undefined + const fromCtx = requestContext.get(MASTRA_THREAD_ID_KEY) as + | string + | undefined + if (fromCtx) return fromCtx } - return undefined + // Fall back to required default conversationId + return ctx.conversationId } /** @@ -111,7 +138,9 @@ function getEffectiveThreadId( * name: "My Agent", * model: openai("gpt-4o"), * inputProcessors: [ - * new SupermemoryInputProcessor("user-123", { + * new SupermemoryInputProcessor({ + * containerTag: "user-123", + * conversationId: "conv-456", * mode: "full", * verbose: true, * }), @@ -125,8 +154,8 @@ export class SupermemoryInputProcessor implements Processor { private ctx: ProcessorContext - constructor(containerTag: string, options: SupermemoryMastraOptions = {}) { - this.ctx = createProcessorContext(containerTag, options) + constructor(options: SupermemoryMastraOptions) { + this.ctx = createProcessorContext(options) } async processInput(args: ProcessInputArgs): Promise { @@ -146,10 +175,13 @@ export class SupermemoryInputProcessor implements Processor { return messageList } - const effectiveThreadId = getEffectiveThreadId(this.ctx, requestContext) + const effectiveConversationId = getEffectiveConversationId( + this.ctx, + requestContext, + ) const turnKey = MemoryCache.makeTurnKey( this.ctx.containerTag, - effectiveThreadId, + effectiveConversationId, this.ctx.mode, queryText || "", ) @@ -163,7 +195,7 @@ export class SupermemoryInputProcessor implements Processor { this.ctx.logger.info("Starting memory search", { containerTag: this.ctx.containerTag, - threadId: effectiveThreadId, + conversationId: effectiveConversationId, mode: this.ctx.mode, }) @@ -213,9 +245,10 @@ export class SupermemoryInputProcessor implements Processor { * name: "My Agent", * model: openai("gpt-4o"), * outputProcessors: [ - * new SupermemoryOutputProcessor("user-123", { + * new SupermemoryOutputProcessor({ + * containerTag: "user-123", + * conversationId: "conv-456", * addMemory: "always", - * threadId: "conv-456", * }), * ], * }) @@ -227,26 +260,23 @@ export class SupermemoryOutputProcessor implements Processor { private ctx: ProcessorContext - constructor(containerTag: string, options: SupermemoryMastraOptions = {}) { - this.ctx = createProcessorContext(containerTag, options) + constructor(options: SupermemoryMastraOptions) { + this.ctx = createProcessorContext(options) } async processOutputResult( args: ProcessOutputResultArgs, ): Promise { - const { messages, messageList, requestContext } = args + const { messages, requestContext } = args if (this.ctx.addMemory !== "always") { return messages } - const effectiveThreadId = getEffectiveThreadId(this.ctx, requestContext) - if (!effectiveThreadId) { - this.ctx.logger.warn( - "No threadId provided for conversation save. Provide via options.threadId or RequestContext.", - ) - return messages - } + const effectiveConversationId = getEffectiveConversationId( + this.ctx, + requestContext, + ) try { const conversationMessages = this.convertToConversationMessages(messages) @@ -257,7 +287,7 @@ export class SupermemoryOutputProcessor implements Processor { } const response = await addConversation({ - conversationId: effectiveThreadId, + conversationId: effectiveConversationId, messages: conversationMessages, containerTags: [this.ctx.containerTag], apiKey: this.ctx.apiKey, @@ -266,7 +296,7 @@ export class SupermemoryOutputProcessor implements Processor { this.ctx.logger.info("Conversation saved successfully", { containerTag: this.ctx.containerTag, - conversationId: effectiveThreadId, + conversationId: effectiveConversationId, messageCount: conversationMessages.length, responseId: response.id, }) @@ -323,8 +353,7 @@ export class SupermemoryOutputProcessor implements Processor { /** * Creates a Supermemory input processor for memory injection. * - * @param containerTag - The container tag/user ID for scoping memories - * @param options - Configuration options + * @param options - Configuration options including containerTag and conversationId * @returns Configured SupermemoryInputProcessor instance * * @example @@ -333,7 +362,9 @@ export class SupermemoryOutputProcessor implements Processor { * import { createSupermemoryProcessor } from "@supermemory/tools/mastra" * import { openai } from "@ai-sdk/openai" * - * const processor = createSupermemoryProcessor("user-123", { + * const processor = createSupermemoryProcessor({ + * containerTag: "user-123", + * conversationId: "conv-456", * mode: "full", * verbose: true, * }) @@ -347,17 +378,15 @@ export class SupermemoryOutputProcessor implements Processor { * ``` */ export function createSupermemoryProcessor( - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): SupermemoryInputProcessor { - return new SupermemoryInputProcessor(containerTag, options) + return new SupermemoryInputProcessor(options) } /** * Creates a Supermemory output processor for saving conversations. * - * @param containerTag - The container tag/user ID for scoping memories - * @param options - Configuration options + * @param options - Configuration options including containerTag and conversationId * @returns Configured SupermemoryOutputProcessor instance * * @example @@ -366,9 +395,10 @@ export function createSupermemoryProcessor( * import { createSupermemoryOutputProcessor } from "@supermemory/tools/mastra" * import { openai } from "@ai-sdk/openai" * - * const processor = createSupermemoryOutputProcessor("user-123", { + * const processor = createSupermemoryOutputProcessor({ + * containerTag: "user-123", + * conversationId: "conv-456", * addMemory: "always", - * threadId: "conv-456", * }) * * const agent = new Agent({ @@ -380,10 +410,9 @@ export function createSupermemoryProcessor( * ``` */ export function createSupermemoryOutputProcessor( - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): SupermemoryOutputProcessor { - return new SupermemoryOutputProcessor(containerTag, options) + return new SupermemoryOutputProcessor(options) } /** @@ -392,8 +421,7 @@ export function createSupermemoryOutputProcessor( * Use this when you want both memory injection and conversation saving * with consistent settings across both processors. * - * @param containerTag - The container tag/user ID for scoping memories - * @param options - Configuration options shared by both processors + * @param options - Configuration options shared by both processors including containerTag and conversationId * @returns Object containing both input and output processors * * @example @@ -402,10 +430,11 @@ export function createSupermemoryOutputProcessor( * import { createSupermemoryProcessors } from "@supermemory/tools/mastra" * import { openai } from "@ai-sdk/openai" * - * const { input, output } = createSupermemoryProcessors("user-123", { + * const { input, output } = createSupermemoryProcessors({ + * containerTag: "user-123", + * conversationId: "conv-456", * mode: "full", * addMemory: "always", - * threadId: "conv-456", * }) * * const agent = new Agent({ @@ -418,14 +447,13 @@ export function createSupermemoryOutputProcessor( * ``` */ export function createSupermemoryProcessors( - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): { input: SupermemoryInputProcessor output: SupermemoryOutputProcessor } { return { - input: new SupermemoryInputProcessor(containerTag, options), - output: new SupermemoryOutputProcessor(containerTag, options), + input: new SupermemoryInputProcessor(options), + output: new SupermemoryOutputProcessor(options), } } diff --git a/packages/tools/src/mastra/types.ts b/packages/tools/src/mastra/types.ts index efe2e7e4e..49d7d2ee0 100644 --- a/packages/tools/src/mastra/types.ts +++ b/packages/tools/src/mastra/types.ts @@ -38,10 +38,14 @@ export type { RequestContext } from "@mastra/core/request-context" */ export interface SupermemoryMastraOptions extends SupermemoryBaseOptions { /** - * When using the output processor, set this to enable automatic conversation saving. - * The threadId is used to group messages into a single conversation. + * Container tag for scoping memories (e.g., user ID) */ - threadId?: string + containerTag: string + + /** + * Conversation ID for grouping messages into the same document + */ + conversationId: string } export type { PromptTemplate, MemoryMode, AddMemoryMode, MemoryPromptData } diff --git a/packages/tools/src/mastra/wrapper.ts b/packages/tools/src/mastra/wrapper.ts index 8137e7eed..515656738 100644 --- a/packages/tools/src/mastra/wrapper.ts +++ b/packages/tools/src/mastra/wrapper.ts @@ -37,8 +37,7 @@ interface AgentConfig { * - Output processor: Optionally saves conversations after responses * * @param config - The Mastra agent configuration to enhance - * @param containerTag - The container tag/user ID for scoping memories - * @param options - Configuration options for memory behavior + * @param options - Configuration options including containerTag, conversationId, and memory behavior * @returns Enhanced agent config with Supermemory processors injected * * @example @@ -54,11 +53,11 @@ interface AgentConfig { * model: openai("gpt-4o"), * instructions: "You are a helpful assistant.", * }, - * "user-123", * { + * containerTag: "user-123", + * conversationId: "conv-456", * mode: "full", * addMemory: "always", - * threadId: "conv-456", * } * ) * @@ -69,13 +68,12 @@ interface AgentConfig { */ export function withSupermemory( config: T, - containerTag: string, - options: SupermemoryMastraOptions = {}, + options: SupermemoryMastraOptions, ): T { validateApiKey(options.apiKey) - const inputProcessor = new SupermemoryInputProcessor(containerTag, options) - const outputProcessor = new SupermemoryOutputProcessor(containerTag, options) + const inputProcessor = new SupermemoryInputProcessor(options) + const outputProcessor = new SupermemoryOutputProcessor(options) const existingInputProcessors = config.inputProcessors ?? [] const existingOutputProcessors = config.outputProcessors ?? [] diff --git a/packages/tools/src/shared/cache.ts b/packages/tools/src/shared/cache.ts index 15ce16fe4..002b02dad 100644 --- a/packages/tools/src/shared/cache.ts +++ b/packages/tools/src/shared/cache.ts @@ -13,19 +13,19 @@ export class MemoryCache { * Normalizes the message by trimming and collapsing whitespace. * * @param containerTag - The container tag/user ID - * @param threadId - Optional thread/conversation ID + * @param conversationId - Optional conversation ID * @param mode - The memory retrieval mode * @param message - The user message content * @returns A unique cache key for this turn */ static makeTurnKey( containerTag: string, - threadId: string | undefined, + conversationId: string | undefined, mode: MemoryMode, message: string, ): string { const normalizedMessage = message.trim().replace(/\s+/g, " ") - return `${containerTag}:${threadId || ""}:${mode}:${normalizedMessage}` + return `${containerTag}:${conversationId || ""}:${mode}:${normalizedMessage}` } /** diff --git a/packages/tools/src/shared/types.ts b/packages/tools/src/shared/types.ts index 421785f52..3103a88fd 100644 --- a/packages/tools/src/shared/types.ts +++ b/packages/tools/src/shared/types.ts @@ -113,8 +113,8 @@ export interface SupermemoryBaseOptions { apiKey?: string /** Custom Supermemory API base URL */ baseUrl?: string - /** Optional conversation/thread ID to group messages for contextual memory generation */ - threadId?: string + /** Conversation ID to group messages into the same document for a conversation */ + conversationId?: string /** Memory retrieval mode */ mode?: MemoryMode /** Memory persistence mode */ diff --git a/packages/tools/test/mastra/integration.test.ts b/packages/tools/test/mastra/integration.test.ts index f33b974ea..2e1d573fb 100644 --- a/packages/tools/test/mastra/integration.test.ts +++ b/packages/tools/test/mastra/integration.test.ts @@ -37,6 +37,7 @@ const INTEGRATION_CONFIG = { apiKey: process.env.SUPERMEMORY_API_KEY || "", baseUrl: process.env.SUPERMEMORY_BASE_URL || "https://api.supermemory.ai", containerTag: "integration-test-mastra", + conversationId: `integration-test-${Date.now()}`, } const shouldRunIntegration = !!process.env.SUPERMEMORY_API_KEY @@ -95,32 +96,49 @@ const createIntegrationMessageList = (): MessageList & { } } +const createInputArgs = ( + overrides: Partial = {}, +): ProcessInputArgs => + ({ + messages: [], + systemMessages: [], + messageList: createIntegrationMessageList(), + abort: vi.fn() as never, + retryCount: 0, + ...overrides, + }) as ProcessInputArgs + +const createOutputArgs = ( + overrides: Partial = {}, +): ProcessOutputResultArgs => + ({ + messages: [], + messageList: createIntegrationMessageList(), + abort: vi.fn() as never, + retryCount: 0, + ...overrides, + }) as ProcessOutputResultArgs + describe.skipIf(!shouldRunIntegration)( "Integration: Mastra processors with real API", () => { describe("SupermemoryInputProcessor", () => { it("should fetch real memories and inject into messageList", async () => { - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) const messageList = createIntegrationMessageList() - const messages: MastraDBMessage[] = [ - createMessage("user", "Hello, what do you know about me?"), - ] - - const args: ProcessInputArgs = { - messages, - systemMessages: [], + const args = createInputArgs({ + messages: [ + createMessage("user", "Hello, what do you know about me?"), + ], messageList, - abort: vi.fn() as never, - retryCount: 0, - } + }) await processor.processInput(args) @@ -132,31 +150,25 @@ describe.skipIf(!shouldRunIntegration)( it("should use query mode with user message as search query", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "query", - }, + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "query", + }) + + await processor.processInput( + createInputArgs({ + messages: [ + createMessage( + "user", + "What are my favorite programming languages?", + ), + ], + }), ) - const messageList = createIntegrationMessageList() - const args: ProcessInputArgs = { - messages: [ - createMessage( - "user", - "What are my favorite programming languages?", - ), - ], - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args) - const profileCalls = fetchSpy.mock.calls.filter( (call) => typeof call[0] === "string" && call[0].includes("/v4/profile"), @@ -177,26 +189,20 @@ describe.skipIf(!shouldRunIntegration)( it("should use full mode with both profile and query", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "full", - }, + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "full", + }) + + await processor.processInput( + createInputArgs({ + messages: [createMessage("user", "Full mode test query")], + }), ) - const messageList = createIntegrationMessageList() - const args: ProcessInputArgs = { - messages: [createMessage("user", "Full mode test query")], - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args) - const profileCalls = fetchSpy.mock.calls.filter( (call) => typeof call[0] === "string" && call[0].includes("/v4/profile"), @@ -217,42 +223,25 @@ describe.skipIf(!shouldRunIntegration)( it("should cache memories for repeated calls with same message", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) const messages: MastraDBMessage[] = [ createMessage("user", "Cache test message"), ] - const args1: ProcessInputArgs = { - messages, - systemMessages: [], - messageList: createIntegrationMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args1) + await processor.processInput(createInputArgs({ messages })) const callsAfterFirst = fetchSpy.mock.calls.filter( (call) => typeof call[0] === "string" && call[0].includes("/v4/profile"), ).length - const args2: ProcessInputArgs = { - messages, - systemMessages: [], - messageList: createIntegrationMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args2) + await processor.processInput(createInputArgs({ messages })) const callsAfterSecond = fetchSpy.mock.calls.filter( (call) => typeof call[0] === "string" && call[0].includes("/v4/profile"), @@ -269,26 +258,22 @@ describe.skipIf(!shouldRunIntegration)( generalSearchMemories: string }) => `${data.userMemories}` - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - promptTemplate: customTemplate, - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + promptTemplate: customTemplate, + }) const messageList = createIntegrationMessageList() - const args: ProcessInputArgs = { - messages: [createMessage("user", "Custom template test")], - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args) + await processor.processInput( + createInputArgs({ + messages: [createMessage("user", "Custom template test")], + messageList, + }), + ) const systemContent = messageList.getSystemContent() expect(systemContent).toMatch(/.*<\/mastra-memories>/s) @@ -299,30 +284,25 @@ describe.skipIf(!shouldRunIntegration)( it("should save conversation when addMemory is always", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const threadId = `test-mastra-${Date.now()}` - - const processor = new SupermemoryOutputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - addMemory: "always", - threadId, - }, + const conversationId = `test-mastra-${Date.now()}` + + const processor = new SupermemoryOutputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + addMemory: "always", + }) + + await processor.processOutputResult( + createOutputArgs({ + messages: [ + createMessage("user", "Hello from Mastra integration test"), + createMessage("assistant", "Hi! I'm responding to the test."), + ], + }), ) - const args: ProcessOutputResultArgs = { - messages: [ - createMessage("user", "Hello from Mastra integration test"), - createMessage("assistant", "Hi! I'm responding to the test."), - ], - messageList: createIntegrationMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processOutputResult(args) - const conversationCalls = fetchSpy.mock.calls.filter( (call) => typeof call[0] === "string" && @@ -336,28 +316,23 @@ describe.skipIf(!shouldRunIntegration)( it("should not save when addMemory is never", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryOutputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - addMemory: "never", - threadId: "test-thread", - }, + const processor = new SupermemoryOutputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: "test-thread", + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + addMemory: "never", + }) + + await processor.processOutputResult( + createOutputArgs({ + messages: [ + createMessage("user", "This should not be saved"), + createMessage("assistant", "Agreed"), + ], + }), ) - const args: ProcessOutputResultArgs = { - messages: [ - createMessage("user", "This should not be saved"), - createMessage("assistant", "Agreed"), - ], - messageList: createIntegrationMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processOutputResult(args) - const conversationCalls = fetchSpy.mock.calls.filter( (call) => typeof call[0] === "string" && @@ -368,34 +343,30 @@ describe.skipIf(!shouldRunIntegration)( fetchSpy.mockRestore() }) - it("should use threadId from RequestContext when not in options", async () => { + it("should use conversationId from RequestContext when available", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryOutputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - addMemory: "always", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + addMemory: "always", + }) const contextThreadId = `context-thread-${Date.now()}` const requestContext = new RequestContext() requestContext.set(MASTRA_THREAD_ID_KEY, contextThreadId) - const args: ProcessOutputResultArgs = { - messages: [ - createMessage("user", "Test with RequestContext threadId"), - createMessage("assistant", "Got it!"), - ], - messageList: createIntegrationMessageList(), - abort: vi.fn() as never, - retryCount: 0, - requestContext, - } - - await processor.processOutputResult(args) + await processor.processOutputResult( + createOutputArgs({ + messages: [ + createMessage("user", "Test with RequestContext conversationId"), + createMessage("assistant", "Got it!"), + ], + requestContext, + }), + ) const conversationCalls = fetchSpy.mock.calls.filter( (call) => @@ -410,40 +381,32 @@ describe.skipIf(!shouldRunIntegration)( describe("createSupermemoryProcessors", () => { it("should create working input and output processors", async () => { - const { input, output } = createSupermemoryProcessors( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - addMemory: "always", - threadId: `processors-test-${Date.now()}`, - }, - ) + const { input, output } = createSupermemoryProcessors({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: `processors-test-${Date.now()}`, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + addMemory: "always", + }) const messageList = createIntegrationMessageList() - const inputArgs: ProcessInputArgs = { - messages: [createMessage("user", "Test processors factory")], - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - await input.processInput(inputArgs) + await input.processInput( + createInputArgs({ + messages: [createMessage("user", "Test processors factory")], + messageList, + }), + ) expect(messageList.addSystem).toHaveBeenCalled() - const outputArgs: ProcessOutputResultArgs = { - messages: [ - createMessage("user", "Test processors factory"), - createMessage("assistant", "Response"), - ], - messageList: createIntegrationMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await output.processOutputResult(outputArgs) + await output.processOutputResult( + createOutputArgs({ + messages: [ + createMessage("user", "Test processors factory"), + createMessage("assistant", "Response"), + ], + }), + ) }) }) @@ -455,17 +418,14 @@ describe.skipIf(!shouldRunIntegration)( model: "gpt-4o", } - const enhanced = withSupermemory( - config, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - addMemory: "always", - threadId: `wrapper-test-${Date.now()}`, - }, - ) + const enhanced = withSupermemory(config, { + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: `wrapper-test-${Date.now()}`, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + addMemory: "always", + }) expect(enhanced.id).toBe("test-mastra-agent") expect(enhanced.name).toBe("Test Mastra Agent") @@ -478,15 +438,12 @@ describe.skipIf(!shouldRunIntegration)( if (inputProcessor?.processInput) { const messageList = createIntegrationMessageList() - const args: ProcessInputArgs = { - messages: [createMessage("user", "Wrapper test")], - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - await inputProcessor.processInput(args) + await inputProcessor.processInput( + createInputArgs({ + messages: [createMessage("user", "Wrapper test")], + messageList, + }), + ) expect(messageList.addSystem).toHaveBeenCalled() } }) @@ -511,15 +468,13 @@ describe.skipIf(!shouldRunIntegration)( outputProcessors: [existingOutputProcessor], } - const enhanced = withSupermemory( - config, - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, - ) + const enhanced = withSupermemory(config, { + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) expect(enhanced.inputProcessors).toHaveLength(2) expect(enhanced.outputProcessors).toHaveLength(2) @@ -534,26 +489,22 @@ describe.skipIf(!shouldRunIntegration)( describe("Options", () => { it("verbose mode should not break functionality", async () => { - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - verbose: true, - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + verbose: true, + }) const messageList = createIntegrationMessageList() - const args: ProcessInputArgs = { - messages: [createMessage("user", "Verbose mode test")], - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args) + await processor.processInput( + createInputArgs({ + messages: [createMessage("user", "Verbose mode test")], + messageList, + }), + ) expect(messageList.addSystem).toHaveBeenCalled() }) @@ -561,25 +512,20 @@ describe.skipIf(!shouldRunIntegration)( it("custom baseUrl should be used for API calls", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: INTEGRATION_CONFIG.apiKey, - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: INTEGRATION_CONFIG.apiKey, + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) + + await processor.processInput( + createInputArgs({ + messages: [createMessage("user", "Base URL test")], + }), ) - const args: ProcessInputArgs = { - messages: [createMessage("user", "Base URL test")], - systemMessages: [], - messageList: createIntegrationMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args) - const profileCalls = fetchSpy.mock.calls.filter( (call) => typeof call[0] === "string" && call[0].includes("/v4/profile"), @@ -595,51 +541,44 @@ describe.skipIf(!shouldRunIntegration)( describe("Error handling", () => { it("should handle invalid API key gracefully", async () => { - const processor = new SupermemoryInputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: "invalid-api-key-12345", - baseUrl: INTEGRATION_CONFIG.baseUrl, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: INTEGRATION_CONFIG.conversationId, + apiKey: "invalid-api-key-12345", + baseUrl: INTEGRATION_CONFIG.baseUrl, + mode: "profile", + }) const messageList = createIntegrationMessageList() - const args: ProcessInputArgs = { - messages: [createMessage("user", "Invalid key test")], - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - const result = await processor.processInput(args) + const result = await processor.processInput( + createInputArgs({ + messages: [createMessage("user", "Invalid key test")], + messageList, + }), + ) expect(result).toBe(messageList) expect(messageList.addSystem).not.toHaveBeenCalled() }) it("output processor should handle save errors gracefully", async () => { - const processor = new SupermemoryOutputProcessor( - INTEGRATION_CONFIG.containerTag, - { - apiKey: "invalid-api-key-12345", - baseUrl: INTEGRATION_CONFIG.baseUrl, - addMemory: "always", - threadId: "error-test", - }, - ) - - const args: ProcessOutputResultArgs = { - messages: [ - createMessage("user", "Error test"), - createMessage("assistant", "Response"), - ], - messageList: createIntegrationMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await expect(processor.processOutputResult(args)).resolves.toBeDefined() + const processor = new SupermemoryOutputProcessor({ + containerTag: INTEGRATION_CONFIG.containerTag, + conversationId: "error-test", + apiKey: "invalid-api-key-12345", + baseUrl: INTEGRATION_CONFIG.baseUrl, + addMemory: "always", + }) + + await expect( + processor.processOutputResult( + createOutputArgs({ + messages: [ + createMessage("user", "Error test"), + createMessage("assistant", "Response"), + ], + }), + ), + ).resolves.toBeDefined() }) }) }, diff --git a/packages/tools/test/mastra/unit.test.ts b/packages/tools/test/mastra/unit.test.ts index 0161546dc..9a087c3d7 100644 --- a/packages/tools/test/mastra/unit.test.ts +++ b/packages/tools/test/mastra/unit.test.ts @@ -29,6 +29,7 @@ const TEST_CONFIG = { apiKey: "test-api-key", baseUrl: "https://api.supermemory.ai", containerTag: "test-mastra-user", + conversationId: "test-conv-123", } interface MockAgentConfig { @@ -104,6 +105,29 @@ const createMockConversationResponse = () => ({ status: "created", }) +const createInputArgs = ( + overrides: Partial = {}, +): ProcessInputArgs => + ({ + messages: [], + systemMessages: [], + messageList: createMockMessageList(), + abort: vi.fn() as never, + retryCount: 0, + ...overrides, + }) as ProcessInputArgs + +const createOutputArgs = ( + overrides: Partial = {}, +): ProcessOutputResultArgs => + ({ + messages: [], + messageList: createMockMessageList(), + abort: vi.fn() as never, + retryCount: 0, + ...overrides, + }) as ProcessOutputResultArgs + describe("SupermemoryInputProcessor", () => { let originalEnv: string | undefined let originalFetch: typeof globalThis.fetch @@ -129,7 +153,10 @@ describe("SupermemoryInputProcessor", () => { describe("constructor", () => { it("should create processor with default options", () => { - const processor = new SupermemoryInputProcessor(TEST_CONFIG.containerTag) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(processor.id).toBe("supermemory-input") expect(processor.name).toBe("Supermemory Memory Injection") }) @@ -138,21 +165,41 @@ describe("SupermemoryInputProcessor", () => { delete process.env.SUPERMEMORY_API_KEY expect(() => { - new SupermemoryInputProcessor(TEST_CONFIG.containerTag) + new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) }).toThrow("SUPERMEMORY_API_KEY is not set") }) it("should accept API key via options", () => { delete process.env.SUPERMEMORY_API_KEY - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: "custom-key", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: "custom-key", + }) expect(processor.id).toBe("supermemory-input") }) + + it("should throw error if conversationId is empty", () => { + expect(() => { + new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: "", + }) + }).toThrow("[supermemory] conversationId is required") + }) + + it("should throw error if conversationId is whitespace", () => { + expect(() => { + new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: " ", + }) + }).toThrow("[supermemory] conversationId is required") + }) }) describe("processInput", () => { @@ -168,24 +215,18 @@ describe("SupermemoryInputProcessor", () => { ), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: TEST_CONFIG.apiKey, + mode: "profile", + }) const messageList = createMockMessageList() - const messages: MastraDBMessage[] = [createMessage("user", "Hello")] - - const args: ProcessInputArgs = { - messages, - systemMessages: [], + const args = createInputArgs({ + messages: [createMessage("user", "Hello")], messageList, - abort: vi.fn() as never, - retryCount: 0, - } + }) await processor.processInput(args) @@ -203,36 +244,19 @@ describe("SupermemoryInputProcessor", () => { Promise.resolve(createMockProfileResponse(["Cached memory"])), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: TEST_CONFIG.apiKey, + mode: "profile", + }) const messages: MastraDBMessage[] = [createMessage("user", "Hello")] - const args1: ProcessInputArgs = { - messages, - systemMessages: [], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args1) + await processor.processInput(createInputArgs({ messages })) expect(fetchMock).toHaveBeenCalledTimes(1) - const args2: ProcessInputArgs = { - messages, - systemMessages: [], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args2) + await processor.processInput(createInputArgs({ messages })) expect(fetchMock).toHaveBeenCalledTimes(1) }) @@ -249,56 +273,40 @@ describe("SupermemoryInputProcessor", () => { }) }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "query", - }, - ) - - const args1: ProcessInputArgs = { - messages: [createMessage("user", "First message")], - systemMessages: [], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: TEST_CONFIG.apiKey, + mode: "query", + }) - await processor.processInput(args1) + await processor.processInput( + createInputArgs({ + messages: [createMessage("user", "First message")], + }), + ) expect(fetchMock).toHaveBeenCalledTimes(1) - const args2: ProcessInputArgs = { - messages: [createMessage("user", "Different message")], - systemMessages: [], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args2) + await processor.processInput( + createInputArgs({ + messages: [createMessage("user", "Different message")], + }), + ) expect(fetchMock).toHaveBeenCalledTimes(2) }) it("should return messageList in query mode when no user message", async () => { - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "query", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: TEST_CONFIG.apiKey, + mode: "query", + }) const messageList = createMockMessageList() - const args: ProcessInputArgs = { - messages: [], - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - const result = await processor.processInput(args) + const result = await processor.processInput( + createInputArgs({ messages: [], messageList }), + ) expect(result).toBe(messageList) expect(fetchMock).not.toHaveBeenCalled() @@ -313,84 +321,47 @@ describe("SupermemoryInputProcessor", () => { text: () => Promise.resolve("Server error"), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "profile", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: TEST_CONFIG.apiKey, + mode: "profile", + }) const messageList = createMockMessageList() - const args: ProcessInputArgs = { - messages: [createMessage("user", "Hello")], - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - const result = await processor.processInput(args) + const result = await processor.processInput( + createInputArgs({ + messages: [createMessage("user", "Hello")], + messageList, + }), + ) expect(result).toBe(messageList) expect(messageList.addSystem).not.toHaveBeenCalled() }) - it("should use threadId from options", async () => { + it("should use conversationId from requestContext fallback", async () => { fetchMock.mockResolvedValue({ ok: true, json: () => Promise.resolve(createMockProfileResponse(["Memory"])), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - threadId: "thread-123", - mode: "profile", - }, - ) - - const args: ProcessInputArgs = { - messages: [createMessage("user", "Hello")], - systemMessages: [], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args) - - expect(fetchMock).toHaveBeenCalled() - }) - - it("should use threadId from requestContext when not in options", async () => { - fetchMock.mockResolvedValue({ - ok: true, - json: () => Promise.resolve(createMockProfileResponse(["Memory"])), + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: TEST_CONFIG.apiKey, + mode: "profile", }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "profile", - }, - ) - const requestContext = new RequestContext() requestContext.set(MASTRA_THREAD_ID_KEY, "ctx-thread-456") - const args: ProcessInputArgs = { - messages: [createMessage("user", "Hello")], - systemMessages: [], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - requestContext, - } - - await processor.processInput(args) + await processor.processInput( + createInputArgs({ + messages: [createMessage("user", "Hello")], + requestContext, + }), + ) expect(fetchMock).toHaveBeenCalled() }) @@ -401,13 +372,12 @@ describe("SupermemoryInputProcessor", () => { json: () => Promise.resolve(createMockProfileResponse(["Memory"])), }) - const processor = new SupermemoryInputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - mode: "query", - }, - ) + const processor = new SupermemoryInputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: TEST_CONFIG.apiKey, + mode: "query", + }) const messages: MastraDBMessage[] = [ { @@ -424,16 +394,7 @@ describe("SupermemoryInputProcessor", () => { }, ] - const messageList = createMockMessageList() - const args: ProcessInputArgs = { - messages, - systemMessages: [], - messageList, - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processInput(args) + await processor.processInput(createInputArgs({ messages })) expect(fetchMock).toHaveBeenCalled() }) @@ -465,7 +426,10 @@ describe("SupermemoryOutputProcessor", () => { describe("constructor", () => { it("should create processor with default options", () => { - const processor = new SupermemoryOutputProcessor(TEST_CONFIG.containerTag) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(processor.id).toBe("supermemory-output") expect(processor.name).toBe("Supermemory Conversation Save") }) @@ -478,26 +442,19 @@ describe("SupermemoryOutputProcessor", () => { json: () => Promise.resolve(createMockConversationResponse()), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, - ) - - const messages: MastraDBMessage[] = [ - createMessage("user", "Hello"), - createMessage("assistant", "Hi there!"), - ] + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) - const args: ProcessOutputResultArgs = { - messages, - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } + const args = createOutputArgs({ + messages: [ + createMessage("user", "Hello"), + createMessage("assistant", "Hi there!"), + ], + }) await processor.processOutputResult(args) @@ -522,81 +479,48 @@ describe("SupermemoryOutputProcessor", () => { }) it("should not save conversation when addMemory is never", async () => { - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "never", - threadId: "conv-456", - }, - ) - - const args: ProcessOutputResultArgs = { - messages: [ - createMessage("user", "Hello"), - createMessage("assistant", "Hi!"), - ], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } - - await processor.processOutputResult(args) - - expect(fetchMock).not.toHaveBeenCalled() - }) - - it("should not save when no threadId provided", async () => { - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "never", + }) - const args: ProcessOutputResultArgs = { + const args = createOutputArgs({ messages: [ createMessage("user", "Hello"), createMessage("assistant", "Hi!"), ], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } + }) await processor.processOutputResult(args) expect(fetchMock).not.toHaveBeenCalled() }) - it("should use threadId from requestContext", async () => { + it("should use conversationId from requestContext", async () => { fetchMock.mockResolvedValue({ ok: true, json: () => Promise.resolve(createMockConversationResponse()), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) const requestContext = new RequestContext() requestContext.set(MASTRA_THREAD_ID_KEY, "ctx-thread-789") - const args: ProcessOutputResultArgs = { + const args = createOutputArgs({ messages: [ createMessage("user", "Hello"), createMessage("assistant", "Hi!"), ], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, requestContext, - } + }) await processor.processOutputResult(args) @@ -604,6 +528,7 @@ describe("SupermemoryOutputProcessor", () => { const callBody = JSON.parse( (fetchMock.mock.calls[0]?.[1] as { body: string }).body, ) + // Should use the RequestContext override, not the default conversationId expect(callBody.conversationId).toBe("ctx-thread-789") }) @@ -613,27 +538,20 @@ describe("SupermemoryOutputProcessor", () => { json: () => Promise.resolve(createMockConversationResponse()), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, - ) - - const messages: MastraDBMessage[] = [ - createMessage("system", "You are a helpful assistant"), - createMessage("user", "Hello"), - createMessage("assistant", "Hi there!"), - ] + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) - const args: ProcessOutputResultArgs = { - messages, - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } + const args = createOutputArgs({ + messages: [ + createMessage("system", "You are a helpful assistant"), + createMessage("user", "Hello"), + createMessage("assistant", "Hi there!"), + ], + }) await processor.processOutputResult(args) @@ -652,14 +570,12 @@ describe("SupermemoryOutputProcessor", () => { json: () => Promise.resolve(createMockConversationResponse()), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) const messages: MastraDBMessage[] = [ { @@ -685,12 +601,7 @@ describe("SupermemoryOutputProcessor", () => { }, ] - const args: ProcessOutputResultArgs = { - messages, - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } + const args = createOutputArgs({ messages }) await processor.processOutputResult(args) @@ -708,45 +619,33 @@ describe("SupermemoryOutputProcessor", () => { text: () => Promise.resolve("Server error"), }) - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) - const args: ProcessOutputResultArgs = { + const args = createOutputArgs({ messages: [ createMessage("user", "Hello"), createMessage("assistant", "Hi!"), ], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } + }) // Should not throw await expect(processor.processOutputResult(args)).resolves.toBeDefined() }) it("should not save when no messages to save", async () => { - const processor = new SupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: TEST_CONFIG.apiKey, - addMemory: "always", - threadId: "conv-456", - }, - ) + const processor = new SupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: "conv-456", + apiKey: TEST_CONFIG.apiKey, + addMemory: "always", + }) - const args: ProcessOutputResultArgs = { - messages: [], - messageList: createMockMessageList(), - abort: vi.fn() as never, - retryCount: 0, - } + const args = createOutputArgs({ messages: [] }) await processor.processOutputResult(args) @@ -773,13 +672,18 @@ describe("Factory functions", () => { describe("createSupermemoryProcessor", () => { it("should create input processor", () => { - const processor = createSupermemoryProcessor(TEST_CONFIG.containerTag) + const processor = createSupermemoryProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(processor).toBeInstanceOf(SupermemoryInputProcessor) expect(processor.id).toBe("supermemory-input") }) it("should pass options to processor", () => { - const processor = createSupermemoryProcessor(TEST_CONFIG.containerTag, { + const processor = createSupermemoryProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, apiKey: "custom-key", mode: "full", }) @@ -789,45 +693,43 @@ describe("Factory functions", () => { describe("createSupermemoryOutputProcessor", () => { it("should create output processor", () => { - const processor = createSupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - ) + const processor = createSupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(processor).toBeInstanceOf(SupermemoryOutputProcessor) expect(processor.id).toBe("supermemory-output") }) it("should pass options to processor", () => { - const processor = createSupermemoryOutputProcessor( - TEST_CONFIG.containerTag, - { - apiKey: "custom-key", - addMemory: "always", - threadId: "conv-123", - }, - ) + const processor = createSupermemoryOutputProcessor({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: "custom-key", + addMemory: "always", + }) expect(processor).toBeInstanceOf(SupermemoryOutputProcessor) }) }) describe("createSupermemoryProcessors", () => { it("should create both input and output processors", () => { - const { input, output } = createSupermemoryProcessors( - TEST_CONFIG.containerTag, - ) + const { input, output } = createSupermemoryProcessors({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(input).toBeInstanceOf(SupermemoryInputProcessor) expect(output).toBeInstanceOf(SupermemoryOutputProcessor) }) it("should share options between processors", () => { - const { input, output } = createSupermemoryProcessors( - TEST_CONFIG.containerTag, - { - apiKey: "custom-key", - mode: "full", - addMemory: "always", - threadId: "conv-123", - }, - ) + const { input, output } = createSupermemoryProcessors({ + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + apiKey: "custom-key", + mode: "full", + addMemory: "always", + }) expect(input.id).toBe("supermemory-input") expect(output.id).toBe("supermemory-output") }) @@ -857,7 +759,10 @@ describe("withSupermemory", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } expect(() => { - withSupermemory(config, TEST_CONFIG.containerTag) + withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) }).toThrow("SUPERMEMORY_API_KEY is not set") }) @@ -865,7 +770,9 @@ describe("withSupermemory", () => { delete process.env.SUPERMEMORY_API_KEY const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag, { + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, apiKey: "custom-key", }) @@ -877,7 +784,10 @@ describe("withSupermemory", () => { describe("processor injection", () => { it("should inject input and output processors", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(enhanced.inputProcessors).toHaveLength(1) expect(enhanced.outputProcessors).toHaveLength(1) @@ -892,7 +802,10 @@ describe("withSupermemory", () => { model: "gpt-4", customProp: "value", } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(enhanced.id).toBe("test-agent") expect(enhanced.name).toBe("Test Agent") @@ -911,7 +824,10 @@ describe("withSupermemory", () => { inputProcessors: [existingInputProcessor], } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(enhanced.inputProcessors).toHaveLength(2) expect(enhanced.inputProcessors?.[0]?.id).toBe("supermemory-input") @@ -929,7 +845,10 @@ describe("withSupermemory", () => { outputProcessors: [existingOutputProcessor], } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(enhanced.outputProcessors).toHaveLength(2) expect(enhanced.outputProcessors?.[0]?.id).toBe("existing-output") @@ -946,7 +865,10 @@ describe("withSupermemory", () => { outputProcessors: [existingOutput], } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag) + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, + }) expect(enhanced.inputProcessors).toHaveLength(2) expect(enhanced.outputProcessors).toHaveLength(2) @@ -960,10 +882,11 @@ describe("withSupermemory", () => { describe("options passthrough", () => { it("should pass options to processors", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } - const enhanced = withSupermemory(config, TEST_CONFIG.containerTag, { + const enhanced = withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: TEST_CONFIG.conversationId, mode: "full", addMemory: "always", - threadId: "conv-123", verbose: true, }) @@ -971,4 +894,28 @@ describe("withSupermemory", () => { expect(enhanced.outputProcessors).toHaveLength(1) }) }) + + describe("conversationId validation", () => { + it("should throw error if conversationId is empty", () => { + const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } + + expect(() => { + withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: "", + }) + }).toThrow("[supermemory] conversationId is required") + }) + + it("should throw error if conversationId is whitespace", () => { + const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } + + expect(() => { + withSupermemory(config, { + containerTag: TEST_CONFIG.containerTag, + conversationId: " ", + }) + }).toThrow("[supermemory] conversationId is required") + }) + }) }) From 83baa43a9f7b49abbd97541cca75d27246cffbed Mon Sep 17 00:00:00 2001 From: "claude[bot]" <41898282+claude[bot]@users.noreply.github.com> Date: Sat, 4 Apr 2026 04:53:55 +0000 Subject: [PATCH 2/2] fix: remove trailing whitespace in processor.ts Co-Authored-By: Claude Opus 4.5 --- apps/docs/integrations/mastra.mdx | 98 +++++++++-------- packages/tools/README.md | 30 +++--- packages/tools/src/mastra/processor.ts | 60 +++++------ packages/tools/src/mastra/types.ts | 4 +- packages/tools/src/mastra/wrapper.ts | 4 +- .../tools/test/mastra/integration.test.ts | 38 +++---- packages/tools/test/mastra/unit.test.ts | 100 +++++++++--------- 7 files changed, 168 insertions(+), 166 deletions(-) diff --git a/apps/docs/integrations/mastra.mdx b/apps/docs/integrations/mastra.mdx index 972bd1871..715367490 100644 --- a/apps/docs/integrations/mastra.mdx +++ b/apps/docs/integrations/mastra.mdx @@ -34,9 +34,9 @@ const agent = new Agent(withSupermemory( model: openai("gpt-4o"), instructions: "You are a helpful assistant.", }, - "user-123", // containerTag - scopes memories to this user - "conv-456", // conversationId - groups messages into the same document { + containerTag: "user-123", // scopes memories to this user + customId: "conv-456", // groups messages into the same document mode: "full", addMemory: "always", } @@ -51,9 +51,11 @@ const response = await agent.generate("What do you know about me?") ```typescript const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), ... }, - "user-123", - "conv-456", - { addMemory: "always" } + { + containerTag: "user-123", + customId: "conv-456", + addMemory: "always", + } )) ``` @@ -108,19 +110,19 @@ sequenceDiagram **Profile Mode (Default)** - Retrieves the user's complete profile without query-based filtering: ```typescript -const agent = new Agent(withSupermemory(config, "user-123", "conv-456", { mode: "profile" })) +const agent = new Agent(withSupermemory(config, { containerTag: "user-123", customId: "conv-456", mode: "profile" })) ``` **Query Mode** - Searches memories based on the user's message: ```typescript -const agent = new Agent(withSupermemory(config, "user-123", "conv-456", { mode: "query" })) +const agent = new Agent(withSupermemory(config, { containerTag: "user-123", customId: "conv-456", mode: "query" })) ``` **Full Mode** - Combines profile AND query-based search for maximum context: ```typescript -const agent = new Agent(withSupermemory(config, "user-123", "conv-456", { mode: "full" })) +const agent = new Agent(withSupermemory(config, { containerTag: "user-123", customId: "conv-456", mode: "full" })) ### Mode Comparison @@ -134,14 +136,16 @@ const agent = new Agent(withSupermemory(config, "user-123", "conv-456", { mode: ## Saving Conversations -Enable automatic conversation saving with `addMemory: "always"`. The `conversationId` parameter groups messages into the same document: +Enable automatic conversation saving with `addMemory: "always"`. The `customId` parameter groups messages into the same document: ```typescript const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", - "conv-456", - { addMemory: "always" } + { + containerTag: "user-123", + customId: "conv-456", + addMemory: "always", + } )) // All messages in this conversation are saved @@ -173,9 +177,9 @@ const claudePrompt = (data: MemoryPromptData) => ` const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", - "conv-456", { + containerTag: "user-123", + customId: "conv-456", mode: "full", promptTemplate: claudePrompt, } @@ -202,7 +206,9 @@ const agent = new Agent({ name: "My Assistant", model: openai("gpt-4o"), inputProcessors: [ - createSupermemoryProcessor("user-123", "conv-456", { + createSupermemoryProcessor({ + containerTag: "user-123", + customId: "conv-456", mode: "full", verbose: true, }), @@ -224,7 +230,9 @@ const agent = new Agent({ name: "My Assistant", model: openai("gpt-4o"), outputProcessors: [ - createSupermemoryOutputProcessor("user-123", "conv-456", { + createSupermemoryOutputProcessor({ + containerTag: "user-123", + customId: "conv-456", addMemory: "always", }), ], @@ -240,7 +248,9 @@ import { Agent } from "@mastra/core/agent" import { createSupermemoryProcessors } from "@supermemory/tools/mastra" import { openai } from "@ai-sdk/openai" -const { input, output } = createSupermemoryProcessors("user-123", "conv-456", { +const { input, output } = createSupermemoryProcessors({ + containerTag: "user-123", + customId: "conv-456", mode: "full", addMemory: "always", verbose: true, @@ -259,7 +269,7 @@ const agent = new Agent({ ## Using RequestContext -Mastra's `RequestContext` can provide a dynamic conversation ID override: +Mastra's `RequestContext` can provide a dynamic custom ID override: ```typescript import { Agent } from "@mastra/core/agent" @@ -269,15 +279,15 @@ import { openai } from "@ai-sdk/openai" const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", - "default-conv-id", { + containerTag: "user-123", + customId: "default-conv-id", mode: "full", addMemory: "always", } )) -// Override conversationId dynamically via RequestContext +// Override customId dynamically via RequestContext const ctx = new RequestContext() ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-id") @@ -293,9 +303,11 @@ Enable detailed logging for debugging: ```typescript const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", - "conv-456", - { verbose: true } + { + containerTag: "user-123", + customId: "conv-456", + verbose: true, + } )) // Console output: @@ -321,8 +333,10 @@ const agent = new Agent(withSupermemory( inputProcessors: [myLoggingProcessor], outputProcessors: [myAnalyticsProcessor], }, - "user-123", - "conv-456" + { + containerTag: "user-123", + customId: "conv-456", + } )) ``` @@ -337,17 +351,13 @@ Enhances a Mastra agent config with memory capabilities. ```typescript function withSupermemory( config: T, - containerTag: string, - conversationId: string, - options?: SupermemoryMastraOptions + options: SupermemoryMastraOptions ): T ``` **Parameters:** - `config` - The Mastra agent configuration object -- `containerTag` - User/container ID for scoping memories -- `conversationId` - Conversation ID to group messages into the same document -- `options` - Configuration options +- `options` - Configuration options including `containerTag` and `customId` **Returns:** Enhanced config with Supermemory processors injected @@ -357,9 +367,7 @@ Creates an input processor for memory injection. ```typescript function createSupermemoryProcessor( - containerTag: string, - conversationId: string, - options?: SupermemoryMastraOptions + options: SupermemoryMastraOptions ): SupermemoryInputProcessor ``` @@ -369,9 +377,7 @@ Creates an output processor for conversation saving. ```typescript function createSupermemoryOutputProcessor( - containerTag: string, - conversationId: string, - options?: SupermemoryMastraOptions + options: SupermemoryMastraOptions ): SupermemoryOutputProcessor ``` @@ -381,9 +387,7 @@ Creates both processors with shared configuration. ```typescript function createSupermemoryProcessors( - containerTag: string, - conversationId: string, - options?: SupermemoryMastraOptions + options: SupermemoryMastraOptions ): { input: SupermemoryInputProcessor output: SupermemoryOutputProcessor @@ -394,6 +398,8 @@ function createSupermemoryProcessors( ```typescript interface SupermemoryMastraOptions { + containerTag: string // User/container ID for scoping memories + customId: string // Custom ID to group messages into the same document apiKey?: string baseUrl?: string mode?: "profile" | "query" | "full" @@ -419,15 +425,17 @@ Processors gracefully handle errors without breaking the agent: - **API errors** - Logged and skipped; agent continues without memories - **Missing API key** - Throws immediately with helpful error message -- **Empty conversationId** - Throws immediately with helpful `[supermemory]`-prefixed error message +- **Empty customId** - Throws immediately with helpful `[supermemory]`-prefixed error message ```typescript // Missing API key throws immediately const agent = new Agent(withSupermemory( { id: "my-assistant", model: openai("gpt-4o"), instructions: "..." }, - "user-123", - "conv-456", - { apiKey: undefined } // Will check SUPERMEMORY_API_KEY env + { + containerTag: "user-123", + customId: "conv-456", + apiKey: undefined, // Will check SUPERMEMORY_API_KEY env + } )) // Error: SUPERMEMORY_API_KEY is not set ``` diff --git a/packages/tools/README.md b/packages/tools/README.md index c35d223b5..d3306a8f1 100644 --- a/packages/tools/README.md +++ b/packages/tools/README.md @@ -430,17 +430,17 @@ createSupermemoryProcessors("user-123", "conv-456", { mode: "full" }) // New API - clearer with explicit key-value pairs withSupermemory(config, { containerTag: "user-123", - conversationId: "conv-456", + customId: "conv-456", mode: "full" }) new SupermemoryInputProcessor({ containerTag: "user-123", - conversationId: "conv-456", + customId: "conv-456", mode: "full" }) createSupermemoryProcessors({ containerTag: "user-123", - conversationId: "conv-456", + customId: "conv-456", mode: "full" }) ``` @@ -464,7 +464,7 @@ const agent = new Agent(withSupermemory( }, { containerTag: "user-123", // scopes memories to this user - conversationId: "conv-456", // groups messages into the same document + customId: "conv-456", // groups messages into the same document mode: "full", addMemory: "always", } @@ -485,7 +485,7 @@ import { openai } from "@ai-sdk/openai" const { input, output } = createSupermemoryProcessors({ containerTag: "user-123", - conversationId: "conv-456", + customId: "conv-456", mode: "full", addMemory: "always", verbose: true, // Enable logging @@ -514,11 +514,11 @@ import { openai } from "@ai-sdk/openai" async function main() { const userId = "user-alex-123" - const conversationId = `conv-${Date.now()}` + const customId = `conv-${Date.now()}` const { input, output } = createSupermemoryProcessors({ containerTag: userId, - conversationId: conversationId, + customId: customId, mode: "profile", // Fetch user profile memories addMemory: "always", // Save all conversations verbose: true, @@ -558,21 +558,21 @@ main() // Profile mode - good for general personalization const { input } = createSupermemoryProcessors({ containerTag: "user-123", - conversationId: "conv-456", + customId: "conv-456", mode: "profile" }) // Query mode - good for specific lookups const { input } = createSupermemoryProcessors({ containerTag: "user-123", - conversationId: "conv-456", + customId: "conv-456", mode: "query" }) // Full mode - comprehensive context const { input } = createSupermemoryProcessors({ containerTag: "user-123", - conversationId: "conv-456", + customId: "conv-456", mode: "full" }) ``` @@ -593,15 +593,15 @@ ${data.generalSearchMemories} const { input, output } = createSupermemoryProcessors({ containerTag: "user-123", - conversationId: "conv-456", + customId: "conv-456", mode: "full", promptTemplate: customTemplate, }) ``` -#### Using RequestContext for Dynamic Conversation IDs +#### Using RequestContext for Dynamic Custom IDs -Mastra's RequestContext can override the `conversationId` dynamically per request: +Mastra's RequestContext can override the `customId` dynamically per request: ```typescript import { Agent } from "@mastra/core/agent" @@ -610,7 +610,7 @@ import { createSupermemoryProcessors } from "@supermemory/tools/mastra" const { input, output } = createSupermemoryProcessors({ containerTag: "user-123", - conversationId: "default-conv-id", + customId: "default-conv-id", mode: "profile", addMemory: "always", }) @@ -623,7 +623,7 @@ const agent = new Agent({ outputProcessors: [output], }) -// Override conversationId dynamically per request +// Override customId dynamically per request const ctx = new RequestContext() ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-123") diff --git a/packages/tools/src/mastra/processor.ts b/packages/tools/src/mastra/processor.ts index d6cf08554..f3ada2862 100644 --- a/packages/tools/src/mastra/processor.ts +++ b/packages/tools/src/mastra/processor.ts @@ -43,7 +43,7 @@ import type { */ interface ProcessorContext { containerTag: string - conversationId: string + customId: string apiKey: string baseUrl: string mode: MemoryMode @@ -59,7 +59,7 @@ interface ProcessorContext { function createProcessorContext( options: SupermemoryMastraOptions, ): ProcessorContext { - const { containerTag, conversationId } = options + const { containerTag, customId } = options if ( !containerTag || @@ -68,16 +68,16 @@ function createProcessorContext( ) { throw new Error( "[supermemory] containerTag is required and must be a non-empty string. " + - "Pass it in the options object: new SupermemoryInputProcessor({ containerTag: 'user-123', conversationId: 'conv-456' })", + "Pass it in the options object: new SupermemoryInputProcessor({ containerTag: 'user-123', customId: 'conv-456' })", ) } - if (typeof conversationId !== "string" || !conversationId.trim()) { + if (typeof customId !== "string" || !customId.trim()) { throw new Error( - "[supermemory] conversationId is required and must be a non-empty string. " + + "[supermemory] customId is required and must be a non-empty string. " + "Pass a unique identifier (e.g., session ID, chat ID) in the options object. " + "This ensures messages are grouped into the same document for a conversation. " + - "Example: new SupermemoryInputProcessor({ containerTag: 'user-123', conversationId: 'conversation-456' })", + "Example: new SupermemoryInputProcessor({ containerTag: 'user-123', customId: 'conversation-456' })", ) } @@ -87,7 +87,7 @@ function createProcessorContext( return { containerTag, - conversationId, + customId, apiKey, baseUrl, mode: options.mode ?? "profile", @@ -99,13 +99,13 @@ function createProcessorContext( } /** - * Gets the effective conversationId from context or RequestContext. + * Gets the effective customId from context or RequestContext. * * Priority order: - * 1. RequestContext - * 2. Default conversationId from processor options + * 1. RequestContext + * 2. Default customId from processor options */ -function getEffectiveConversationId( +function getEffectiveCustomId( ctx: ProcessorContext, requestContext?: RequestContext, ): string { @@ -116,8 +116,8 @@ function getEffectiveConversationId( | undefined if (fromCtx) return fromCtx } - // Fall back to required default conversationId - return ctx.conversationId + // Fall back to required default customId + return ctx.customId } /** @@ -140,7 +140,7 @@ function getEffectiveConversationId( * inputProcessors: [ * new SupermemoryInputProcessor({ * containerTag: "user-123", - * conversationId: "conv-456", + * customId: "conv-456", * mode: "full", * verbose: true, * }), @@ -175,13 +175,10 @@ export class SupermemoryInputProcessor implements Processor { return messageList } - const effectiveConversationId = getEffectiveConversationId( - this.ctx, - requestContext, - ) + const effectiveCustomId = getEffectiveCustomId(this.ctx, requestContext) const turnKey = MemoryCache.makeTurnKey( this.ctx.containerTag, - effectiveConversationId, + effectiveCustomId, this.ctx.mode, queryText || "", ) @@ -195,7 +192,7 @@ export class SupermemoryInputProcessor implements Processor { this.ctx.logger.info("Starting memory search", { containerTag: this.ctx.containerTag, - conversationId: effectiveConversationId, + customId: effectiveCustomId, mode: this.ctx.mode, }) @@ -247,7 +244,7 @@ export class SupermemoryInputProcessor implements Processor { * outputProcessors: [ * new SupermemoryOutputProcessor({ * containerTag: "user-123", - * conversationId: "conv-456", + * customId: "conv-456", * addMemory: "always", * }), * ], @@ -273,10 +270,7 @@ export class SupermemoryOutputProcessor implements Processor { return messages } - const effectiveConversationId = getEffectiveConversationId( - this.ctx, - requestContext, - ) + const effectiveCustomId = getEffectiveCustomId(this.ctx, requestContext) try { const conversationMessages = this.convertToConversationMessages(messages) @@ -287,7 +281,7 @@ export class SupermemoryOutputProcessor implements Processor { } const response = await addConversation({ - conversationId: effectiveConversationId, + conversationId: effectiveCustomId, messages: conversationMessages, containerTags: [this.ctx.containerTag], apiKey: this.ctx.apiKey, @@ -296,7 +290,7 @@ export class SupermemoryOutputProcessor implements Processor { this.ctx.logger.info("Conversation saved successfully", { containerTag: this.ctx.containerTag, - conversationId: effectiveConversationId, + customId: effectiveCustomId, messageCount: conversationMessages.length, responseId: response.id, }) @@ -353,7 +347,7 @@ export class SupermemoryOutputProcessor implements Processor { /** * Creates a Supermemory input processor for memory injection. * - * @param options - Configuration options including containerTag and conversationId + * @param options - Configuration options including containerTag and customId * @returns Configured SupermemoryInputProcessor instance * * @example @@ -364,7 +358,7 @@ export class SupermemoryOutputProcessor implements Processor { * * const processor = createSupermemoryProcessor({ * containerTag: "user-123", - * conversationId: "conv-456", + * customId: "conv-456", * mode: "full", * verbose: true, * }) @@ -386,7 +380,7 @@ export function createSupermemoryProcessor( /** * Creates a Supermemory output processor for saving conversations. * - * @param options - Configuration options including containerTag and conversationId + * @param options - Configuration options including containerTag and customId * @returns Configured SupermemoryOutputProcessor instance * * @example @@ -397,7 +391,7 @@ export function createSupermemoryProcessor( * * const processor = createSupermemoryOutputProcessor({ * containerTag: "user-123", - * conversationId: "conv-456", + * customId: "conv-456", * addMemory: "always", * }) * @@ -421,7 +415,7 @@ export function createSupermemoryOutputProcessor( * Use this when you want both memory injection and conversation saving * with consistent settings across both processors. * - * @param options - Configuration options shared by both processors including containerTag and conversationId + * @param options - Configuration options shared by both processors including containerTag and customId * @returns Object containing both input and output processors * * @example @@ -432,7 +426,7 @@ export function createSupermemoryOutputProcessor( * * const { input, output } = createSupermemoryProcessors({ * containerTag: "user-123", - * conversationId: "conv-456", + * customId: "conv-456", * mode: "full", * addMemory: "always", * }) diff --git a/packages/tools/src/mastra/types.ts b/packages/tools/src/mastra/types.ts index 49d7d2ee0..9870ea3ad 100644 --- a/packages/tools/src/mastra/types.ts +++ b/packages/tools/src/mastra/types.ts @@ -43,9 +43,9 @@ export interface SupermemoryMastraOptions extends SupermemoryBaseOptions { containerTag: string /** - * Conversation ID for grouping messages into the same document + * Custom ID for grouping messages into the same document (e.g., conversation ID) */ - conversationId: string + customId: string } export type { PromptTemplate, MemoryMode, AddMemoryMode, MemoryPromptData } diff --git a/packages/tools/src/mastra/wrapper.ts b/packages/tools/src/mastra/wrapper.ts index 515656738..8710200f1 100644 --- a/packages/tools/src/mastra/wrapper.ts +++ b/packages/tools/src/mastra/wrapper.ts @@ -37,7 +37,7 @@ interface AgentConfig { * - Output processor: Optionally saves conversations after responses * * @param config - The Mastra agent configuration to enhance - * @param options - Configuration options including containerTag, conversationId, and memory behavior + * @param options - Configuration options including containerTag, customId, and memory behavior * @returns Enhanced agent config with Supermemory processors injected * * @example @@ -55,7 +55,7 @@ interface AgentConfig { * }, * { * containerTag: "user-123", - * conversationId: "conv-456", + * customId: "conv-456", * mode: "full", * addMemory: "always", * } diff --git a/packages/tools/test/mastra/integration.test.ts b/packages/tools/test/mastra/integration.test.ts index 2e1d573fb..88546e08a 100644 --- a/packages/tools/test/mastra/integration.test.ts +++ b/packages/tools/test/mastra/integration.test.ts @@ -37,7 +37,7 @@ const INTEGRATION_CONFIG = { apiKey: process.env.SUPERMEMORY_API_KEY || "", baseUrl: process.env.SUPERMEMORY_BASE_URL || "https://api.supermemory.ai", containerTag: "integration-test-mastra", - conversationId: `integration-test-${Date.now()}`, + customId: `integration-test-${Date.now()}`, } const shouldRunIntegration = !!process.env.SUPERMEMORY_API_KEY @@ -126,7 +126,7 @@ describe.skipIf(!shouldRunIntegration)( it("should fetch real memories and inject into messageList", async () => { const processor = new SupermemoryInputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "profile", @@ -152,7 +152,7 @@ describe.skipIf(!shouldRunIntegration)( const processor = new SupermemoryInputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "query", @@ -191,7 +191,7 @@ describe.skipIf(!shouldRunIntegration)( const processor = new SupermemoryInputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "full", @@ -225,7 +225,7 @@ describe.skipIf(!shouldRunIntegration)( const processor = new SupermemoryInputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "profile", @@ -260,7 +260,7 @@ describe.skipIf(!shouldRunIntegration)( const processor = new SupermemoryInputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "profile", @@ -284,11 +284,11 @@ describe.skipIf(!shouldRunIntegration)( it("should save conversation when addMemory is always", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") - const conversationId = `test-mastra-${Date.now()}` + const customId = `test-mastra-${Date.now()}` const processor = new SupermemoryOutputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: conversationId, + customId: customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, addMemory: "always", @@ -318,7 +318,7 @@ describe.skipIf(!shouldRunIntegration)( const processor = new SupermemoryOutputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: "test-thread", + customId: "test-thread", apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, addMemory: "never", @@ -343,12 +343,12 @@ describe.skipIf(!shouldRunIntegration)( fetchSpy.mockRestore() }) - it("should use conversationId from RequestContext when available", async () => { + it("should use customId from RequestContext when available", async () => { const fetchSpy = vi.spyOn(globalThis, "fetch") const processor = new SupermemoryOutputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, addMemory: "always", @@ -361,7 +361,7 @@ describe.skipIf(!shouldRunIntegration)( await processor.processOutputResult( createOutputArgs({ messages: [ - createMessage("user", "Test with RequestContext conversationId"), + createMessage("user", "Test with RequestContext customId"), createMessage("assistant", "Got it!"), ], requestContext, @@ -383,7 +383,7 @@ describe.skipIf(!shouldRunIntegration)( it("should create working input and output processors", async () => { const { input, output } = createSupermemoryProcessors({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: `processors-test-${Date.now()}`, + customId: `processors-test-${Date.now()}`, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "profile", @@ -420,7 +420,7 @@ describe.skipIf(!shouldRunIntegration)( const enhanced = withSupermemory(config, { containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: `wrapper-test-${Date.now()}`, + customId: `wrapper-test-${Date.now()}`, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "profile", @@ -470,7 +470,7 @@ describe.skipIf(!shouldRunIntegration)( const enhanced = withSupermemory(config, { containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "profile", @@ -491,7 +491,7 @@ describe.skipIf(!shouldRunIntegration)( it("verbose mode should not break functionality", async () => { const processor = new SupermemoryInputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "profile", @@ -514,7 +514,7 @@ describe.skipIf(!shouldRunIntegration)( const processor = new SupermemoryInputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: INTEGRATION_CONFIG.apiKey, baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "profile", @@ -543,7 +543,7 @@ describe.skipIf(!shouldRunIntegration)( it("should handle invalid API key gracefully", async () => { const processor = new SupermemoryInputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: INTEGRATION_CONFIG.conversationId, + customId: INTEGRATION_CONFIG.customId, apiKey: "invalid-api-key-12345", baseUrl: INTEGRATION_CONFIG.baseUrl, mode: "profile", @@ -563,7 +563,7 @@ describe.skipIf(!shouldRunIntegration)( it("output processor should handle save errors gracefully", async () => { const processor = new SupermemoryOutputProcessor({ containerTag: INTEGRATION_CONFIG.containerTag, - conversationId: "error-test", + customId: "error-test", apiKey: "invalid-api-key-12345", baseUrl: INTEGRATION_CONFIG.baseUrl, addMemory: "always", diff --git a/packages/tools/test/mastra/unit.test.ts b/packages/tools/test/mastra/unit.test.ts index 9a087c3d7..606cb16eb 100644 --- a/packages/tools/test/mastra/unit.test.ts +++ b/packages/tools/test/mastra/unit.test.ts @@ -29,7 +29,7 @@ const TEST_CONFIG = { apiKey: "test-api-key", baseUrl: "https://api.supermemory.ai", containerTag: "test-mastra-user", - conversationId: "test-conv-123", + customId: "test-conv-123", } interface MockAgentConfig { @@ -101,7 +101,7 @@ const createMockProfileResponse = ( const createMockConversationResponse = () => ({ id: "mem-123", - conversationId: "conv-456", + customId: "conv-456", status: "created", }) @@ -155,7 +155,7 @@ describe("SupermemoryInputProcessor", () => { it("should create processor with default options", () => { const processor = new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(processor.id).toBe("supermemory-input") expect(processor.name).toBe("Supermemory Memory Injection") @@ -167,7 +167,7 @@ describe("SupermemoryInputProcessor", () => { expect(() => { new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) }).toThrow("SUPERMEMORY_API_KEY is not set") }) @@ -177,28 +177,28 @@ describe("SupermemoryInputProcessor", () => { const processor = new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: "custom-key", }) expect(processor.id).toBe("supermemory-input") }) - it("should throw error if conversationId is empty", () => { + it("should throw error if customId is empty", () => { expect(() => { new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: "", + customId: "", }) - }).toThrow("[supermemory] conversationId is required") + }).toThrow("[supermemory] customId is required") }) - it("should throw error if conversationId is whitespace", () => { + it("should throw error if customId is whitespace", () => { expect(() => { new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: " ", + customId: " ", }) - }).toThrow("[supermemory] conversationId is required") + }).toThrow("[supermemory] customId is required") }) }) @@ -217,7 +217,7 @@ describe("SupermemoryInputProcessor", () => { const processor = new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: TEST_CONFIG.apiKey, mode: "profile", }) @@ -246,7 +246,7 @@ describe("SupermemoryInputProcessor", () => { const processor = new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: TEST_CONFIG.apiKey, mode: "profile", }) @@ -275,7 +275,7 @@ describe("SupermemoryInputProcessor", () => { const processor = new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: TEST_CONFIG.apiKey, mode: "query", }) @@ -298,7 +298,7 @@ describe("SupermemoryInputProcessor", () => { it("should return messageList in query mode when no user message", async () => { const processor = new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: TEST_CONFIG.apiKey, mode: "query", }) @@ -323,7 +323,7 @@ describe("SupermemoryInputProcessor", () => { const processor = new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: TEST_CONFIG.apiKey, mode: "profile", }) @@ -340,7 +340,7 @@ describe("SupermemoryInputProcessor", () => { expect(messageList.addSystem).not.toHaveBeenCalled() }) - it("should use conversationId from requestContext fallback", async () => { + it("should use customId from requestContext fallback", async () => { fetchMock.mockResolvedValue({ ok: true, json: () => Promise.resolve(createMockProfileResponse(["Memory"])), @@ -348,7 +348,7 @@ describe("SupermemoryInputProcessor", () => { const processor = new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: TEST_CONFIG.apiKey, mode: "profile", }) @@ -374,7 +374,7 @@ describe("SupermemoryInputProcessor", () => { const processor = new SupermemoryInputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: TEST_CONFIG.apiKey, mode: "query", }) @@ -428,7 +428,7 @@ describe("SupermemoryOutputProcessor", () => { it("should create processor with default options", () => { const processor = new SupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(processor.id).toBe("supermemory-output") expect(processor.name).toBe("Supermemory Conversation Save") @@ -444,7 +444,7 @@ describe("SupermemoryOutputProcessor", () => { const processor = new SupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: "conv-456", + customId: "conv-456", apiKey: TEST_CONFIG.apiKey, addMemory: "always", }) @@ -481,7 +481,7 @@ describe("SupermemoryOutputProcessor", () => { it("should not save conversation when addMemory is never", async () => { const processor = new SupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: "conv-456", + customId: "conv-456", apiKey: TEST_CONFIG.apiKey, addMemory: "never", }) @@ -498,7 +498,7 @@ describe("SupermemoryOutputProcessor", () => { expect(fetchMock).not.toHaveBeenCalled() }) - it("should use conversationId from requestContext", async () => { + it("should use customId from requestContext", async () => { fetchMock.mockResolvedValue({ ok: true, json: () => Promise.resolve(createMockConversationResponse()), @@ -506,7 +506,7 @@ describe("SupermemoryOutputProcessor", () => { const processor = new SupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: TEST_CONFIG.apiKey, addMemory: "always", }) @@ -528,7 +528,7 @@ describe("SupermemoryOutputProcessor", () => { const callBody = JSON.parse( (fetchMock.mock.calls[0]?.[1] as { body: string }).body, ) - // Should use the RequestContext override, not the default conversationId + // Should use the RequestContext override, not the default customId expect(callBody.conversationId).toBe("ctx-thread-789") }) @@ -540,7 +540,7 @@ describe("SupermemoryOutputProcessor", () => { const processor = new SupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: "conv-456", + customId: "conv-456", apiKey: TEST_CONFIG.apiKey, addMemory: "always", }) @@ -572,7 +572,7 @@ describe("SupermemoryOutputProcessor", () => { const processor = new SupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: "conv-456", + customId: "conv-456", apiKey: TEST_CONFIG.apiKey, addMemory: "always", }) @@ -621,7 +621,7 @@ describe("SupermemoryOutputProcessor", () => { const processor = new SupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: "conv-456", + customId: "conv-456", apiKey: TEST_CONFIG.apiKey, addMemory: "always", }) @@ -640,7 +640,7 @@ describe("SupermemoryOutputProcessor", () => { it("should not save when no messages to save", async () => { const processor = new SupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: "conv-456", + customId: "conv-456", apiKey: TEST_CONFIG.apiKey, addMemory: "always", }) @@ -674,7 +674,7 @@ describe("Factory functions", () => { it("should create input processor", () => { const processor = createSupermemoryProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(processor).toBeInstanceOf(SupermemoryInputProcessor) expect(processor.id).toBe("supermemory-input") @@ -683,7 +683,7 @@ describe("Factory functions", () => { it("should pass options to processor", () => { const processor = createSupermemoryProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: "custom-key", mode: "full", }) @@ -695,7 +695,7 @@ describe("Factory functions", () => { it("should create output processor", () => { const processor = createSupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(processor).toBeInstanceOf(SupermemoryOutputProcessor) expect(processor.id).toBe("supermemory-output") @@ -704,7 +704,7 @@ describe("Factory functions", () => { it("should pass options to processor", () => { const processor = createSupermemoryOutputProcessor({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: "custom-key", addMemory: "always", }) @@ -716,7 +716,7 @@ describe("Factory functions", () => { it("should create both input and output processors", () => { const { input, output } = createSupermemoryProcessors({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(input).toBeInstanceOf(SupermemoryInputProcessor) expect(output).toBeInstanceOf(SupermemoryOutputProcessor) @@ -725,7 +725,7 @@ describe("Factory functions", () => { it("should share options between processors", () => { const { input, output } = createSupermemoryProcessors({ containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: "custom-key", mode: "full", addMemory: "always", @@ -761,7 +761,7 @@ describe("withSupermemory", () => { expect(() => { withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) }).toThrow("SUPERMEMORY_API_KEY is not set") }) @@ -772,7 +772,7 @@ describe("withSupermemory", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } const enhanced = withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, apiKey: "custom-key", }) @@ -786,7 +786,7 @@ describe("withSupermemory", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } const enhanced = withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(enhanced.inputProcessors).toHaveLength(1) @@ -804,7 +804,7 @@ describe("withSupermemory", () => { } const enhanced = withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(enhanced.id).toBe("test-agent") @@ -826,7 +826,7 @@ describe("withSupermemory", () => { const enhanced = withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(enhanced.inputProcessors).toHaveLength(2) @@ -847,7 +847,7 @@ describe("withSupermemory", () => { const enhanced = withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(enhanced.outputProcessors).toHaveLength(2) @@ -867,7 +867,7 @@ describe("withSupermemory", () => { const enhanced = withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, }) expect(enhanced.inputProcessors).toHaveLength(2) @@ -884,7 +884,7 @@ describe("withSupermemory", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } const enhanced = withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: TEST_CONFIG.conversationId, + customId: TEST_CONFIG.customId, mode: "full", addMemory: "always", verbose: true, @@ -895,27 +895,27 @@ describe("withSupermemory", () => { }) }) - describe("conversationId validation", () => { - it("should throw error if conversationId is empty", () => { + describe("customId validation", () => { + it("should throw error if customId is empty", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } expect(() => { withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: "", + customId: "", }) - }).toThrow("[supermemory] conversationId is required") + }).toThrow("[supermemory] customId is required") }) - it("should throw error if conversationId is whitespace", () => { + it("should throw error if customId is whitespace", () => { const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" } expect(() => { withSupermemory(config, { containerTag: TEST_CONFIG.containerTag, - conversationId: " ", + customId: " ", }) - }).toThrow("[supermemory] conversationId is required") + }).toThrow("[supermemory] customId is required") }) }) })