diff --git a/apps/docs/integrations/mastra.mdx b/apps/docs/integrations/mastra.mdx
index 0874c763e..715367490 100644
--- a/apps/docs/integrations/mastra.mdx
+++ b/apps/docs/integrations/mastra.mdx
@@ -34,11 +34,11 @@ const agent = new Agent(withSupermemory(
model: openai("gpt-4o"),
instructions: "You are a helpful assistant.",
},
- "user-123", // containerTag - scopes memories to this user
{
+ containerTag: "user-123", // scopes memories to this user
+ customId: "conv-456", // groups messages into the same document
mode: "full",
addMemory: "always",
- threadId: "conv-456",
}
))
@@ -51,10 +51,10 @@ const response = await agent.generate("What do you know about me?")
```typescript
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), ... },
- "user-123",
{
+ containerTag: "user-123",
+ customId: "conv-456",
addMemory: "always",
- threadId: "conv-456" // Required for conversation grouping
}
))
```
@@ -100,7 +100,6 @@ sequenceDiagram
| `baseUrl` | `string` | `https://api.supermemory.ai` | Custom API endpoint |
| `mode` | `"profile" \| "query" \| "full"` | `"profile"` | Memory search mode |
| `addMemory` | `"always" \| "never"` | `"never"` | Auto-save conversations |
-| `threadId` | `string` | - | Conversation ID for grouping messages |
| `verbose` | `boolean` | `false` | Enable debug logging |
| `promptTemplate` | `function` | - | Custom memory formatting |
@@ -111,19 +110,19 @@ sequenceDiagram
**Profile Mode (Default)** - Retrieves the user's complete profile without query-based filtering:
```typescript
-const agent = new Agent(withSupermemory(config, "user-123", { mode: "profile" }))
+const agent = new Agent(withSupermemory(config, { containerTag: "user-123", customId: "conv-456", mode: "profile" }))
```
**Query Mode** - Searches memories based on the user's message:
```typescript
-const agent = new Agent(withSupermemory(config, "user-123", { mode: "query" }))
+const agent = new Agent(withSupermemory(config, { containerTag: "user-123", customId: "conv-456", mode: "query" }))
```
**Full Mode** - Combines profile AND query-based search for maximum context:
```typescript
-const agent = new Agent(withSupermemory(config, "user-123", { mode: "full" }))
+const agent = new Agent(withSupermemory(config, { containerTag: "user-123", customId: "conv-456", mode: "full" }))
### Mode Comparison
@@ -137,15 +136,15 @@ const agent = new Agent(withSupermemory(config, "user-123", { mode: "full" }))
## Saving Conversations
-Enable automatic conversation saving with `addMemory: "always"`. A `threadId` is required to group messages:
+Enable automatic conversation saving with `addMemory: "always"`. The `customId` parameter groups messages into the same document:
```typescript
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
- "user-123",
{
+ containerTag: "user-123",
+ customId: "conv-456",
addMemory: "always",
- threadId: "conv-456",
}
))
@@ -154,10 +153,6 @@ await agent.generate("I prefer TypeScript over JavaScript")
await agent.generate("My favorite framework is Next.js")
```
-
- Without a `threadId`, the output processor will log a warning and skip saving. Always provide a `threadId` when using `addMemory: "always"`.
-
-
---
## Custom Prompt Templates
@@ -182,8 +177,9 @@ const claudePrompt = (data: MemoryPromptData) => `
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
- "user-123",
{
+ containerTag: "user-123",
+ customId: "conv-456",
mode: "full",
promptTemplate: claudePrompt,
}
@@ -210,7 +206,9 @@ const agent = new Agent({
name: "My Assistant",
model: openai("gpt-4o"),
inputProcessors: [
- createSupermemoryProcessor("user-123", {
+ createSupermemoryProcessor({
+ containerTag: "user-123",
+ customId: "conv-456",
mode: "full",
verbose: true,
}),
@@ -232,9 +230,10 @@ const agent = new Agent({
name: "My Assistant",
model: openai("gpt-4o"),
outputProcessors: [
- createSupermemoryOutputProcessor("user-123", {
+ createSupermemoryOutputProcessor({
+ containerTag: "user-123",
+ customId: "conv-456",
addMemory: "always",
- threadId: "conv-456",
}),
],
})
@@ -249,10 +248,11 @@ import { Agent } from "@mastra/core/agent"
import { createSupermemoryProcessors } from "@supermemory/tools/mastra"
import { openai } from "@ai-sdk/openai"
-const { input, output } = createSupermemoryProcessors("user-123", {
+const { input, output } = createSupermemoryProcessors({
+ containerTag: "user-123",
+ customId: "conv-456",
mode: "full",
addMemory: "always",
- threadId: "conv-456",
verbose: true,
})
@@ -269,7 +269,7 @@ const agent = new Agent({
## Using RequestContext
-Mastra's `RequestContext` can provide `threadId` dynamically:
+Mastra's `RequestContext` can provide a dynamic custom ID override:
```typescript
import { Agent } from "@mastra/core/agent"
@@ -279,15 +279,15 @@ import { openai } from "@ai-sdk/openai"
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
- "user-123",
{
+ containerTag: "user-123",
+ customId: "default-conv-id",
mode: "full",
addMemory: "always",
- // threadId not set - will use RequestContext
}
))
-// Set threadId dynamically via RequestContext
+// Override customId dynamically via RequestContext
const ctx = new RequestContext()
ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-id")
@@ -303,8 +303,11 @@ Enable detailed logging for debugging:
```typescript
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
- "user-123",
- { verbose: true }
+ {
+ containerTag: "user-123",
+ customId: "conv-456",
+ verbose: true,
+ }
))
// Console output:
@@ -330,7 +333,10 @@ const agent = new Agent(withSupermemory(
inputProcessors: [myLoggingProcessor],
outputProcessors: [myAnalyticsProcessor],
},
- "user-123"
+ {
+ containerTag: "user-123",
+ customId: "conv-456",
+ }
))
```
@@ -345,15 +351,13 @@ Enhances a Mastra agent config with memory capabilities.
```typescript
function withSupermemory(
config: T,
- containerTag: string,
- options?: SupermemoryMastraOptions
+ options: SupermemoryMastraOptions
): T
```
**Parameters:**
- `config` - The Mastra agent configuration object
-- `containerTag` - User/container ID for scoping memories
-- `options` - Configuration options
+- `options` - Configuration options including `containerTag` and `customId`
**Returns:** Enhanced config with Supermemory processors injected
@@ -363,8 +367,7 @@ Creates an input processor for memory injection.
```typescript
function createSupermemoryProcessor(
- containerTag: string,
- options?: SupermemoryMastraOptions
+ options: SupermemoryMastraOptions
): SupermemoryInputProcessor
```
@@ -374,8 +377,7 @@ Creates an output processor for conversation saving.
```typescript
function createSupermemoryOutputProcessor(
- containerTag: string,
- options?: SupermemoryMastraOptions
+ options: SupermemoryMastraOptions
): SupermemoryOutputProcessor
```
@@ -385,8 +387,7 @@ Creates both processors with shared configuration.
```typescript
function createSupermemoryProcessors(
- containerTag: string,
- options?: SupermemoryMastraOptions
+ options: SupermemoryMastraOptions
): {
input: SupermemoryInputProcessor
output: SupermemoryOutputProcessor
@@ -397,11 +398,12 @@ function createSupermemoryProcessors(
```typescript
interface SupermemoryMastraOptions {
+ containerTag: string // User/container ID for scoping memories
+ customId: string // Custom ID to group messages into the same document
apiKey?: string
baseUrl?: string
mode?: "profile" | "query" | "full"
addMemory?: "always" | "never"
- threadId?: string
verbose?: boolean
promptTemplate?: (data: MemoryPromptData) => string
}
@@ -423,14 +425,17 @@ Processors gracefully handle errors without breaking the agent:
- **API errors** - Logged and skipped; agent continues without memories
- **Missing API key** - Throws immediately with helpful error message
-- **Missing threadId** - Warns in console; skips saving
+- **Empty customId** - Throws immediately with helpful `[supermemory]`-prefixed error message
```typescript
// Missing API key throws immediately
const agent = new Agent(withSupermemory(
{ id: "my-assistant", model: openai("gpt-4o"), instructions: "..." },
- "user-123",
- { apiKey: undefined } // Will check SUPERMEMORY_API_KEY env
+ {
+ containerTag: "user-123",
+ customId: "conv-456",
+ apiKey: undefined, // Will check SUPERMEMORY_API_KEY env
+ }
))
// Error: SUPERMEMORY_API_KEY is not set
```
diff --git a/packages/tools/README.md b/packages/tools/README.md
index 2d03411d3..d3306a8f1 100644
--- a/packages/tools/README.md
+++ b/packages/tools/README.md
@@ -409,10 +409,42 @@ const addResult = await tools.addMemory({
### Mastra Usage
+> **⚠️ Breaking Change in v2.0.0**: The Mastra integration API has been updated to use object-based parameters instead of positional parameters for better clarity. See the migration guide below.
+
Add persistent memory to [Mastra](https://mastra.ai) AI agents. The integration provides processors that:
- **Input Processor**: Fetches relevant memories and injects them into the system prompt before LLM calls
- **Output Processor**: Optionally saves conversations to Supermemory after responses
+#### Migration from v1.x to v2.0.0
+
+**v1.x (old API with positional parameters):**
+```typescript
+// Old API - DEPRECATED
+withSupermemory(config, "user-123", "conv-456", { mode: "full" })
+new SupermemoryInputProcessor("user-123", "conv-456", { mode: "full" })
+createSupermemoryProcessors("user-123", "conv-456", { mode: "full" })
+```
+
+**v2.0.0 (new API with object parameters):**
+```typescript
+// New API - clearer with explicit key-value pairs
+withSupermemory(config, {
+ containerTag: "user-123",
+ customId: "conv-456",
+ mode: "full"
+})
+new SupermemoryInputProcessor({
+ containerTag: "user-123",
+ customId: "conv-456",
+ mode: "full"
+})
+createSupermemoryProcessors({
+ containerTag: "user-123",
+ customId: "conv-456",
+ mode: "full"
+})
+```
+
#### Quick Start with `withSupermemory` Wrapper
The simplest way to add memory to a Mastra agent - wrap your config before creating the Agent:
@@ -430,11 +462,11 @@ const agent = new Agent(withSupermemory(
model: openai("gpt-4o"),
instructions: "You are a helpful assistant.",
},
- "user-123", // containerTag - scopes memories to this user
{
+ containerTag: "user-123", // scopes memories to this user
+ customId: "conv-456", // groups messages into the same document
mode: "full",
addMemory: "always",
- threadId: "conv-456",
}
))
@@ -451,10 +483,11 @@ import { Agent } from "@mastra/core/agent"
import { createSupermemoryProcessors } from "@supermemory/tools/mastra"
import { openai } from "@ai-sdk/openai"
-const { input, output } = createSupermemoryProcessors("user-123", {
+const { input, output } = createSupermemoryProcessors({
+ containerTag: "user-123",
+ customId: "conv-456",
mode: "full",
addMemory: "always",
- threadId: "conv-456",
verbose: true, // Enable logging
})
@@ -481,12 +514,13 @@ import { openai } from "@ai-sdk/openai"
async function main() {
const userId = "user-alex-123"
- const threadId = `thread-${Date.now()}`
+ const customId = `conv-${Date.now()}`
- const { input, output } = createSupermemoryProcessors(userId, {
+ const { input, output } = createSupermemoryProcessors({
+ containerTag: userId,
+ customId: customId,
mode: "profile", // Fetch user profile memories
addMemory: "always", // Save all conversations
- threadId,
verbose: true,
})
@@ -522,13 +556,25 @@ main()
```typescript
// Profile mode - good for general personalization
-const { input } = createSupermemoryProcessors("user-123", { mode: "profile" })
+const { input } = createSupermemoryProcessors({
+ containerTag: "user-123",
+ customId: "conv-456",
+ mode: "profile"
+})
// Query mode - good for specific lookups
-const { input } = createSupermemoryProcessors("user-123", { mode: "query" })
+const { input } = createSupermemoryProcessors({
+ containerTag: "user-123",
+ customId: "conv-456",
+ mode: "query"
+})
// Full mode - comprehensive context
-const { input } = createSupermemoryProcessors("user-123", { mode: "full" })
+const { input } = createSupermemoryProcessors({
+ containerTag: "user-123",
+ customId: "conv-456",
+ mode: "full"
+})
```
#### Custom Prompt Templates
@@ -545,25 +591,28 @@ ${data.generalSearchMemories}
`.trim()
-const { input, output } = createSupermemoryProcessors("user-123", {
+const { input, output } = createSupermemoryProcessors({
+ containerTag: "user-123",
+ customId: "conv-456",
mode: "full",
promptTemplate: customTemplate,
})
```
-#### Using RequestContext for Dynamic Thread IDs
+#### Using RequestContext for Dynamic Custom IDs
-Instead of hardcoding `threadId`, use Mastra's RequestContext for dynamic values:
+Mastra's RequestContext can override the `customId` dynamically per request:
```typescript
import { Agent } from "@mastra/core/agent"
import { RequestContext, MASTRA_THREAD_ID_KEY } from "@mastra/core/request-context"
import { createSupermemoryProcessors } from "@supermemory/tools/mastra"
-const { input, output } = createSupermemoryProcessors("user-123", {
+const { input, output } = createSupermemoryProcessors({
+ containerTag: "user-123",
+ customId: "default-conv-id",
mode: "profile",
addMemory: "always",
- // threadId not set here - will be read from RequestContext
})
const agent = new Agent({
@@ -574,7 +623,7 @@ const agent = new Agent({
outputProcessors: [output],
})
-// Set threadId dynamically per request
+// Override customId dynamically per request
const ctx = new RequestContext()
ctx.set(MASTRA_THREAD_ID_KEY, "dynamic-thread-123")
@@ -589,7 +638,6 @@ interface SupermemoryMastraOptions {
baseUrl?: string // Custom API endpoint
mode?: "profile" | "query" | "full" // Memory search mode (default: "profile")
addMemory?: "always" | "never" // Auto-save conversations (default: "never")
- threadId?: string // Conversation ID for grouping messages
verbose?: boolean // Enable debug logging (default: false)
promptTemplate?: (data: MemoryPromptData) => string // Custom memory formatting
}
diff --git a/packages/tools/package.json b/packages/tools/package.json
index 8d192aba6..80004a6d1 100644
--- a/packages/tools/package.json
+++ b/packages/tools/package.json
@@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
- "version": "1.4.01",
+ "version": "2.0.0",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
diff --git a/packages/tools/src/mastra/processor.ts b/packages/tools/src/mastra/processor.ts
index d2975127d..f3ada2862 100644
--- a/packages/tools/src/mastra/processor.ts
+++ b/packages/tools/src/mastra/processor.ts
@@ -43,11 +43,11 @@ import type {
*/
interface ProcessorContext {
containerTag: string
+ customId: string
apiKey: string
baseUrl: string
mode: MemoryMode
addMemory: "always" | "never"
- threadId?: string
logger: Logger
promptTemplate?: PromptTemplate
memoryCache: MemoryCache
@@ -57,20 +57,41 @@ interface ProcessorContext {
* Creates the shared processor context from options.
*/
function createProcessorContext(
- containerTag: string,
- options: SupermemoryMastraOptions = {},
+ options: SupermemoryMastraOptions,
): ProcessorContext {
+ const { containerTag, customId } = options
+
+ if (
+ !containerTag ||
+ typeof containerTag !== "string" ||
+ !containerTag.trim()
+ ) {
+ throw new Error(
+ "[supermemory] containerTag is required and must be a non-empty string. " +
+ "Pass it in the options object: new SupermemoryInputProcessor({ containerTag: 'user-123', customId: 'conv-456' })",
+ )
+ }
+
+ if (typeof customId !== "string" || !customId.trim()) {
+ throw new Error(
+ "[supermemory] customId is required and must be a non-empty string. " +
+ "Pass a unique identifier (e.g., session ID, chat ID) in the options object. " +
+ "This ensures messages are grouped into the same document for a conversation. " +
+ "Example: new SupermemoryInputProcessor({ containerTag: 'user-123', customId: 'conversation-456' })",
+ )
+ }
+
const apiKey = validateApiKey(options.apiKey)
const baseUrl = normalizeBaseUrl(options.baseUrl)
const logger = createLogger(options.verbose ?? false)
return {
containerTag,
+ customId,
apiKey,
baseUrl,
mode: options.mode ?? "profile",
addMemory: options.addMemory ?? "never",
- threadId: options.threadId,
logger,
promptTemplate: options.promptTemplate,
memoryCache: new MemoryCache(),
@@ -78,19 +99,25 @@ function createProcessorContext(
}
/**
- * Gets the effective threadId from options or RequestContext.
+ * Gets the effective customId from context or RequestContext.
+ *
+ * Priority order:
+ * 1. RequestContext
+ * 2. Default customId from processor options
*/
-function getEffectiveThreadId(
+function getEffectiveCustomId(
ctx: ProcessorContext,
requestContext?: RequestContext,
-): string | undefined {
- if (ctx.threadId) {
- return ctx.threadId
- }
+): string {
+ // Check RequestContext FIRST to allow dynamic per-request override
if (requestContext) {
- return requestContext.get(MASTRA_THREAD_ID_KEY) as string | undefined
+ const fromCtx = requestContext.get(MASTRA_THREAD_ID_KEY) as
+ | string
+ | undefined
+ if (fromCtx) return fromCtx
}
- return undefined
+ // Fall back to required default customId
+ return ctx.customId
}
/**
@@ -111,7 +138,9 @@ function getEffectiveThreadId(
* name: "My Agent",
* model: openai("gpt-4o"),
* inputProcessors: [
- * new SupermemoryInputProcessor("user-123", {
+ * new SupermemoryInputProcessor({
+ * containerTag: "user-123",
+ * customId: "conv-456",
* mode: "full",
* verbose: true,
* }),
@@ -125,8 +154,8 @@ export class SupermemoryInputProcessor implements Processor {
private ctx: ProcessorContext
- constructor(containerTag: string, options: SupermemoryMastraOptions = {}) {
- this.ctx = createProcessorContext(containerTag, options)
+ constructor(options: SupermemoryMastraOptions) {
+ this.ctx = createProcessorContext(options)
}
async processInput(args: ProcessInputArgs): Promise {
@@ -146,10 +175,10 @@ export class SupermemoryInputProcessor implements Processor {
return messageList
}
- const effectiveThreadId = getEffectiveThreadId(this.ctx, requestContext)
+ const effectiveCustomId = getEffectiveCustomId(this.ctx, requestContext)
const turnKey = MemoryCache.makeTurnKey(
this.ctx.containerTag,
- effectiveThreadId,
+ effectiveCustomId,
this.ctx.mode,
queryText || "",
)
@@ -163,7 +192,7 @@ export class SupermemoryInputProcessor implements Processor {
this.ctx.logger.info("Starting memory search", {
containerTag: this.ctx.containerTag,
- threadId: effectiveThreadId,
+ customId: effectiveCustomId,
mode: this.ctx.mode,
})
@@ -213,9 +242,10 @@ export class SupermemoryInputProcessor implements Processor {
* name: "My Agent",
* model: openai("gpt-4o"),
* outputProcessors: [
- * new SupermemoryOutputProcessor("user-123", {
+ * new SupermemoryOutputProcessor({
+ * containerTag: "user-123",
+ * customId: "conv-456",
* addMemory: "always",
- * threadId: "conv-456",
* }),
* ],
* })
@@ -227,26 +257,20 @@ export class SupermemoryOutputProcessor implements Processor {
private ctx: ProcessorContext
- constructor(containerTag: string, options: SupermemoryMastraOptions = {}) {
- this.ctx = createProcessorContext(containerTag, options)
+ constructor(options: SupermemoryMastraOptions) {
+ this.ctx = createProcessorContext(options)
}
async processOutputResult(
args: ProcessOutputResultArgs,
): Promise {
- const { messages, messageList, requestContext } = args
+ const { messages, requestContext } = args
if (this.ctx.addMemory !== "always") {
return messages
}
- const effectiveThreadId = getEffectiveThreadId(this.ctx, requestContext)
- if (!effectiveThreadId) {
- this.ctx.logger.warn(
- "No threadId provided for conversation save. Provide via options.threadId or RequestContext.",
- )
- return messages
- }
+ const effectiveCustomId = getEffectiveCustomId(this.ctx, requestContext)
try {
const conversationMessages = this.convertToConversationMessages(messages)
@@ -257,7 +281,7 @@ export class SupermemoryOutputProcessor implements Processor {
}
const response = await addConversation({
- conversationId: effectiveThreadId,
+ conversationId: effectiveCustomId,
messages: conversationMessages,
containerTags: [this.ctx.containerTag],
apiKey: this.ctx.apiKey,
@@ -266,7 +290,7 @@ export class SupermemoryOutputProcessor implements Processor {
this.ctx.logger.info("Conversation saved successfully", {
containerTag: this.ctx.containerTag,
- conversationId: effectiveThreadId,
+ customId: effectiveCustomId,
messageCount: conversationMessages.length,
responseId: response.id,
})
@@ -323,8 +347,7 @@ export class SupermemoryOutputProcessor implements Processor {
/**
* Creates a Supermemory input processor for memory injection.
*
- * @param containerTag - The container tag/user ID for scoping memories
- * @param options - Configuration options
+ * @param options - Configuration options including containerTag and customId
* @returns Configured SupermemoryInputProcessor instance
*
* @example
@@ -333,7 +356,9 @@ export class SupermemoryOutputProcessor implements Processor {
* import { createSupermemoryProcessor } from "@supermemory/tools/mastra"
* import { openai } from "@ai-sdk/openai"
*
- * const processor = createSupermemoryProcessor("user-123", {
+ * const processor = createSupermemoryProcessor({
+ * containerTag: "user-123",
+ * customId: "conv-456",
* mode: "full",
* verbose: true,
* })
@@ -347,17 +372,15 @@ export class SupermemoryOutputProcessor implements Processor {
* ```
*/
export function createSupermemoryProcessor(
- containerTag: string,
- options: SupermemoryMastraOptions = {},
+ options: SupermemoryMastraOptions,
): SupermemoryInputProcessor {
- return new SupermemoryInputProcessor(containerTag, options)
+ return new SupermemoryInputProcessor(options)
}
/**
* Creates a Supermemory output processor for saving conversations.
*
- * @param containerTag - The container tag/user ID for scoping memories
- * @param options - Configuration options
+ * @param options - Configuration options including containerTag and customId
* @returns Configured SupermemoryOutputProcessor instance
*
* @example
@@ -366,9 +389,10 @@ export function createSupermemoryProcessor(
* import { createSupermemoryOutputProcessor } from "@supermemory/tools/mastra"
* import { openai } from "@ai-sdk/openai"
*
- * const processor = createSupermemoryOutputProcessor("user-123", {
+ * const processor = createSupermemoryOutputProcessor({
+ * containerTag: "user-123",
+ * customId: "conv-456",
* addMemory: "always",
- * threadId: "conv-456",
* })
*
* const agent = new Agent({
@@ -380,10 +404,9 @@ export function createSupermemoryProcessor(
* ```
*/
export function createSupermemoryOutputProcessor(
- containerTag: string,
- options: SupermemoryMastraOptions = {},
+ options: SupermemoryMastraOptions,
): SupermemoryOutputProcessor {
- return new SupermemoryOutputProcessor(containerTag, options)
+ return new SupermemoryOutputProcessor(options)
}
/**
@@ -392,8 +415,7 @@ export function createSupermemoryOutputProcessor(
* Use this when you want both memory injection and conversation saving
* with consistent settings across both processors.
*
- * @param containerTag - The container tag/user ID for scoping memories
- * @param options - Configuration options shared by both processors
+ * @param options - Configuration options shared by both processors including containerTag and customId
* @returns Object containing both input and output processors
*
* @example
@@ -402,10 +424,11 @@ export function createSupermemoryOutputProcessor(
* import { createSupermemoryProcessors } from "@supermemory/tools/mastra"
* import { openai } from "@ai-sdk/openai"
*
- * const { input, output } = createSupermemoryProcessors("user-123", {
+ * const { input, output } = createSupermemoryProcessors({
+ * containerTag: "user-123",
+ * customId: "conv-456",
* mode: "full",
* addMemory: "always",
- * threadId: "conv-456",
* })
*
* const agent = new Agent({
@@ -418,14 +441,13 @@ export function createSupermemoryOutputProcessor(
* ```
*/
export function createSupermemoryProcessors(
- containerTag: string,
- options: SupermemoryMastraOptions = {},
+ options: SupermemoryMastraOptions,
): {
input: SupermemoryInputProcessor
output: SupermemoryOutputProcessor
} {
return {
- input: new SupermemoryInputProcessor(containerTag, options),
- output: new SupermemoryOutputProcessor(containerTag, options),
+ input: new SupermemoryInputProcessor(options),
+ output: new SupermemoryOutputProcessor(options),
}
}
diff --git a/packages/tools/src/mastra/types.ts b/packages/tools/src/mastra/types.ts
index efe2e7e4e..9870ea3ad 100644
--- a/packages/tools/src/mastra/types.ts
+++ b/packages/tools/src/mastra/types.ts
@@ -38,10 +38,14 @@ export type { RequestContext } from "@mastra/core/request-context"
*/
export interface SupermemoryMastraOptions extends SupermemoryBaseOptions {
/**
- * When using the output processor, set this to enable automatic conversation saving.
- * The threadId is used to group messages into a single conversation.
+ * Container tag for scoping memories (e.g., user ID)
*/
- threadId?: string
+ containerTag: string
+
+ /**
+ * Custom ID for grouping messages into the same document (e.g., conversation ID)
+ */
+ customId: string
}
export type { PromptTemplate, MemoryMode, AddMemoryMode, MemoryPromptData }
diff --git a/packages/tools/src/mastra/wrapper.ts b/packages/tools/src/mastra/wrapper.ts
index 8137e7eed..8710200f1 100644
--- a/packages/tools/src/mastra/wrapper.ts
+++ b/packages/tools/src/mastra/wrapper.ts
@@ -37,8 +37,7 @@ interface AgentConfig {
* - Output processor: Optionally saves conversations after responses
*
* @param config - The Mastra agent configuration to enhance
- * @param containerTag - The container tag/user ID for scoping memories
- * @param options - Configuration options for memory behavior
+ * @param options - Configuration options including containerTag, customId, and memory behavior
* @returns Enhanced agent config with Supermemory processors injected
*
* @example
@@ -54,11 +53,11 @@ interface AgentConfig {
* model: openai("gpt-4o"),
* instructions: "You are a helpful assistant.",
* },
- * "user-123",
* {
+ * containerTag: "user-123",
+ * customId: "conv-456",
* mode: "full",
* addMemory: "always",
- * threadId: "conv-456",
* }
* )
*
@@ -69,13 +68,12 @@ interface AgentConfig {
*/
export function withSupermemory(
config: T,
- containerTag: string,
- options: SupermemoryMastraOptions = {},
+ options: SupermemoryMastraOptions,
): T {
validateApiKey(options.apiKey)
- const inputProcessor = new SupermemoryInputProcessor(containerTag, options)
- const outputProcessor = new SupermemoryOutputProcessor(containerTag, options)
+ const inputProcessor = new SupermemoryInputProcessor(options)
+ const outputProcessor = new SupermemoryOutputProcessor(options)
const existingInputProcessors = config.inputProcessors ?? []
const existingOutputProcessors = config.outputProcessors ?? []
diff --git a/packages/tools/src/shared/cache.ts b/packages/tools/src/shared/cache.ts
index 15ce16fe4..002b02dad 100644
--- a/packages/tools/src/shared/cache.ts
+++ b/packages/tools/src/shared/cache.ts
@@ -13,19 +13,19 @@ export class MemoryCache {
* Normalizes the message by trimming and collapsing whitespace.
*
* @param containerTag - The container tag/user ID
- * @param threadId - Optional thread/conversation ID
+ * @param conversationId - Optional conversation ID
* @param mode - The memory retrieval mode
* @param message - The user message content
* @returns A unique cache key for this turn
*/
static makeTurnKey(
containerTag: string,
- threadId: string | undefined,
+ conversationId: string | undefined,
mode: MemoryMode,
message: string,
): string {
const normalizedMessage = message.trim().replace(/\s+/g, " ")
- return `${containerTag}:${threadId || ""}:${mode}:${normalizedMessage}`
+ return `${containerTag}:${conversationId || ""}:${mode}:${normalizedMessage}`
}
/**
diff --git a/packages/tools/src/shared/types.ts b/packages/tools/src/shared/types.ts
index 421785f52..3103a88fd 100644
--- a/packages/tools/src/shared/types.ts
+++ b/packages/tools/src/shared/types.ts
@@ -113,8 +113,8 @@ export interface SupermemoryBaseOptions {
apiKey?: string
/** Custom Supermemory API base URL */
baseUrl?: string
- /** Optional conversation/thread ID to group messages for contextual memory generation */
- threadId?: string
+ /** Conversation ID to group messages into the same document for a conversation */
+ conversationId?: string
/** Memory retrieval mode */
mode?: MemoryMode
/** Memory persistence mode */
diff --git a/packages/tools/test/mastra/integration.test.ts b/packages/tools/test/mastra/integration.test.ts
index f33b974ea..88546e08a 100644
--- a/packages/tools/test/mastra/integration.test.ts
+++ b/packages/tools/test/mastra/integration.test.ts
@@ -37,6 +37,7 @@ const INTEGRATION_CONFIG = {
apiKey: process.env.SUPERMEMORY_API_KEY || "",
baseUrl: process.env.SUPERMEMORY_BASE_URL || "https://api.supermemory.ai",
containerTag: "integration-test-mastra",
+ customId: `integration-test-${Date.now()}`,
}
const shouldRunIntegration = !!process.env.SUPERMEMORY_API_KEY
@@ -95,32 +96,49 @@ const createIntegrationMessageList = (): MessageList & {
}
}
+const createInputArgs = (
+ overrides: Partial = {},
+): ProcessInputArgs =>
+ ({
+ messages: [],
+ systemMessages: [],
+ messageList: createIntegrationMessageList(),
+ abort: vi.fn() as never,
+ retryCount: 0,
+ ...overrides,
+ }) as ProcessInputArgs
+
+const createOutputArgs = (
+ overrides: Partial = {},
+): ProcessOutputResultArgs =>
+ ({
+ messages: [],
+ messageList: createIntegrationMessageList(),
+ abort: vi.fn() as never,
+ retryCount: 0,
+ ...overrides,
+ }) as ProcessOutputResultArgs
+
describe.skipIf(!shouldRunIntegration)(
"Integration: Mastra processors with real API",
() => {
describe("SupermemoryInputProcessor", () => {
it("should fetch real memories and inject into messageList", async () => {
- const processor = new SupermemoryInputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "profile",
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "profile",
+ })
const messageList = createIntegrationMessageList()
- const messages: MastraDBMessage[] = [
- createMessage("user", "Hello, what do you know about me?"),
- ]
-
- const args: ProcessInputArgs = {
- messages,
- systemMessages: [],
+ const args = createInputArgs({
+ messages: [
+ createMessage("user", "Hello, what do you know about me?"),
+ ],
messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
+ })
await processor.processInput(args)
@@ -132,31 +150,25 @@ describe.skipIf(!shouldRunIntegration)(
it("should use query mode with user message as search query", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const processor = new SupermemoryInputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "query",
- },
+ const processor = new SupermemoryInputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "query",
+ })
+
+ await processor.processInput(
+ createInputArgs({
+ messages: [
+ createMessage(
+ "user",
+ "What are my favorite programming languages?",
+ ),
+ ],
+ }),
)
- const messageList = createIntegrationMessageList()
- const args: ProcessInputArgs = {
- messages: [
- createMessage(
- "user",
- "What are my favorite programming languages?",
- ),
- ],
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args)
-
const profileCalls = fetchSpy.mock.calls.filter(
(call) =>
typeof call[0] === "string" && call[0].includes("/v4/profile"),
@@ -177,26 +189,20 @@ describe.skipIf(!shouldRunIntegration)(
it("should use full mode with both profile and query", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const processor = new SupermemoryInputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "full",
- },
+ const processor = new SupermemoryInputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "full",
+ })
+
+ await processor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Full mode test query")],
+ }),
)
- const messageList = createIntegrationMessageList()
- const args: ProcessInputArgs = {
- messages: [createMessage("user", "Full mode test query")],
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args)
-
const profileCalls = fetchSpy.mock.calls.filter(
(call) =>
typeof call[0] === "string" && call[0].includes("/v4/profile"),
@@ -217,42 +223,25 @@ describe.skipIf(!shouldRunIntegration)(
it("should cache memories for repeated calls with same message", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const processor = new SupermemoryInputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "profile",
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "profile",
+ })
const messages: MastraDBMessage[] = [
createMessage("user", "Cache test message"),
]
- const args1: ProcessInputArgs = {
- messages,
- systemMessages: [],
- messageList: createIntegrationMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args1)
+ await processor.processInput(createInputArgs({ messages }))
const callsAfterFirst = fetchSpy.mock.calls.filter(
(call) =>
typeof call[0] === "string" && call[0].includes("/v4/profile"),
).length
- const args2: ProcessInputArgs = {
- messages,
- systemMessages: [],
- messageList: createIntegrationMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args2)
+ await processor.processInput(createInputArgs({ messages }))
const callsAfterSecond = fetchSpy.mock.calls.filter(
(call) =>
typeof call[0] === "string" && call[0].includes("/v4/profile"),
@@ -269,26 +258,22 @@ describe.skipIf(!shouldRunIntegration)(
generalSearchMemories: string
}) => `${data.userMemories}`
- const processor = new SupermemoryInputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "profile",
- promptTemplate: customTemplate,
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "profile",
+ promptTemplate: customTemplate,
+ })
const messageList = createIntegrationMessageList()
- const args: ProcessInputArgs = {
- messages: [createMessage("user", "Custom template test")],
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args)
+ await processor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Custom template test")],
+ messageList,
+ }),
+ )
const systemContent = messageList.getSystemContent()
expect(systemContent).toMatch(/.*<\/mastra-memories>/s)
@@ -299,30 +284,25 @@ describe.skipIf(!shouldRunIntegration)(
it("should save conversation when addMemory is always", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const threadId = `test-mastra-${Date.now()}`
-
- const processor = new SupermemoryOutputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- addMemory: "always",
- threadId,
- },
+ const customId = `test-mastra-${Date.now()}`
+
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ addMemory: "always",
+ })
+
+ await processor.processOutputResult(
+ createOutputArgs({
+ messages: [
+ createMessage("user", "Hello from Mastra integration test"),
+ createMessage("assistant", "Hi! I'm responding to the test."),
+ ],
+ }),
)
- const args: ProcessOutputResultArgs = {
- messages: [
- createMessage("user", "Hello from Mastra integration test"),
- createMessage("assistant", "Hi! I'm responding to the test."),
- ],
- messageList: createIntegrationMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processOutputResult(args)
-
const conversationCalls = fetchSpy.mock.calls.filter(
(call) =>
typeof call[0] === "string" &&
@@ -336,28 +316,23 @@ describe.skipIf(!shouldRunIntegration)(
it("should not save when addMemory is never", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const processor = new SupermemoryOutputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- addMemory: "never",
- threadId: "test-thread",
- },
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: "test-thread",
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ addMemory: "never",
+ })
+
+ await processor.processOutputResult(
+ createOutputArgs({
+ messages: [
+ createMessage("user", "This should not be saved"),
+ createMessage("assistant", "Agreed"),
+ ],
+ }),
)
- const args: ProcessOutputResultArgs = {
- messages: [
- createMessage("user", "This should not be saved"),
- createMessage("assistant", "Agreed"),
- ],
- messageList: createIntegrationMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processOutputResult(args)
-
const conversationCalls = fetchSpy.mock.calls.filter(
(call) =>
typeof call[0] === "string" &&
@@ -368,34 +343,30 @@ describe.skipIf(!shouldRunIntegration)(
fetchSpy.mockRestore()
})
- it("should use threadId from RequestContext when not in options", async () => {
+ it("should use customId from RequestContext when available", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const processor = new SupermemoryOutputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- addMemory: "always",
- },
- )
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ addMemory: "always",
+ })
const contextThreadId = `context-thread-${Date.now()}`
const requestContext = new RequestContext()
requestContext.set(MASTRA_THREAD_ID_KEY, contextThreadId)
- const args: ProcessOutputResultArgs = {
- messages: [
- createMessage("user", "Test with RequestContext threadId"),
- createMessage("assistant", "Got it!"),
- ],
- messageList: createIntegrationMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- requestContext,
- }
-
- await processor.processOutputResult(args)
+ await processor.processOutputResult(
+ createOutputArgs({
+ messages: [
+ createMessage("user", "Test with RequestContext customId"),
+ createMessage("assistant", "Got it!"),
+ ],
+ requestContext,
+ }),
+ )
const conversationCalls = fetchSpy.mock.calls.filter(
(call) =>
@@ -410,40 +381,32 @@ describe.skipIf(!shouldRunIntegration)(
describe("createSupermemoryProcessors", () => {
it("should create working input and output processors", async () => {
- const { input, output } = createSupermemoryProcessors(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "profile",
- addMemory: "always",
- threadId: `processors-test-${Date.now()}`,
- },
- )
+ const { input, output } = createSupermemoryProcessors({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `processors-test-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "profile",
+ addMemory: "always",
+ })
const messageList = createIntegrationMessageList()
- const inputArgs: ProcessInputArgs = {
- messages: [createMessage("user", "Test processors factory")],
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await input.processInput(inputArgs)
+ await input.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Test processors factory")],
+ messageList,
+ }),
+ )
expect(messageList.addSystem).toHaveBeenCalled()
- const outputArgs: ProcessOutputResultArgs = {
- messages: [
- createMessage("user", "Test processors factory"),
- createMessage("assistant", "Response"),
- ],
- messageList: createIntegrationMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await output.processOutputResult(outputArgs)
+ await output.processOutputResult(
+ createOutputArgs({
+ messages: [
+ createMessage("user", "Test processors factory"),
+ createMessage("assistant", "Response"),
+ ],
+ }),
+ )
})
})
@@ -455,17 +418,14 @@ describe.skipIf(!shouldRunIntegration)(
model: "gpt-4o",
}
- const enhanced = withSupermemory(
- config,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "profile",
- addMemory: "always",
- threadId: `wrapper-test-${Date.now()}`,
- },
- )
+ const enhanced = withSupermemory(config, {
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: `wrapper-test-${Date.now()}`,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "profile",
+ addMemory: "always",
+ })
expect(enhanced.id).toBe("test-mastra-agent")
expect(enhanced.name).toBe("Test Mastra Agent")
@@ -478,15 +438,12 @@ describe.skipIf(!shouldRunIntegration)(
if (inputProcessor?.processInput) {
const messageList = createIntegrationMessageList()
- const args: ProcessInputArgs = {
- messages: [createMessage("user", "Wrapper test")],
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await inputProcessor.processInput(args)
+ await inputProcessor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Wrapper test")],
+ messageList,
+ }),
+ )
expect(messageList.addSystem).toHaveBeenCalled()
}
})
@@ -511,15 +468,13 @@ describe.skipIf(!shouldRunIntegration)(
outputProcessors: [existingOutputProcessor],
}
- const enhanced = withSupermemory(
- config,
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "profile",
- },
- )
+ const enhanced = withSupermemory(config, {
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "profile",
+ })
expect(enhanced.inputProcessors).toHaveLength(2)
expect(enhanced.outputProcessors).toHaveLength(2)
@@ -534,26 +489,22 @@ describe.skipIf(!shouldRunIntegration)(
describe("Options", () => {
it("verbose mode should not break functionality", async () => {
- const processor = new SupermemoryInputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "profile",
- verbose: true,
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "profile",
+ verbose: true,
+ })
const messageList = createIntegrationMessageList()
- const args: ProcessInputArgs = {
- messages: [createMessage("user", "Verbose mode test")],
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args)
+ await processor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Verbose mode test")],
+ messageList,
+ }),
+ )
expect(messageList.addSystem).toHaveBeenCalled()
})
@@ -561,25 +512,20 @@ describe.skipIf(!shouldRunIntegration)(
it("custom baseUrl should be used for API calls", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch")
- const processor = new SupermemoryInputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: INTEGRATION_CONFIG.apiKey,
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "profile",
- },
+ const processor = new SupermemoryInputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "profile",
+ })
+
+ await processor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Base URL test")],
+ }),
)
- const args: ProcessInputArgs = {
- messages: [createMessage("user", "Base URL test")],
- systemMessages: [],
- messageList: createIntegrationMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args)
-
const profileCalls = fetchSpy.mock.calls.filter(
(call) =>
typeof call[0] === "string" && call[0].includes("/v4/profile"),
@@ -595,51 +541,44 @@ describe.skipIf(!shouldRunIntegration)(
describe("Error handling", () => {
it("should handle invalid API key gracefully", async () => {
- const processor = new SupermemoryInputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: "invalid-api-key-12345",
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- mode: "profile",
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: INTEGRATION_CONFIG.customId,
+ apiKey: "invalid-api-key-12345",
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ mode: "profile",
+ })
const messageList = createIntegrationMessageList()
- const args: ProcessInputArgs = {
- messages: [createMessage("user", "Invalid key test")],
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- const result = await processor.processInput(args)
+ const result = await processor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Invalid key test")],
+ messageList,
+ }),
+ )
expect(result).toBe(messageList)
expect(messageList.addSystem).not.toHaveBeenCalled()
})
it("output processor should handle save errors gracefully", async () => {
- const processor = new SupermemoryOutputProcessor(
- INTEGRATION_CONFIG.containerTag,
- {
- apiKey: "invalid-api-key-12345",
- baseUrl: INTEGRATION_CONFIG.baseUrl,
- addMemory: "always",
- threadId: "error-test",
- },
- )
-
- const args: ProcessOutputResultArgs = {
- messages: [
- createMessage("user", "Error test"),
- createMessage("assistant", "Response"),
- ],
- messageList: createIntegrationMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await expect(processor.processOutputResult(args)).resolves.toBeDefined()
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: INTEGRATION_CONFIG.containerTag,
+ customId: "error-test",
+ apiKey: "invalid-api-key-12345",
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ addMemory: "always",
+ })
+
+ await expect(
+ processor.processOutputResult(
+ createOutputArgs({
+ messages: [
+ createMessage("user", "Error test"),
+ createMessage("assistant", "Response"),
+ ],
+ }),
+ ),
+ ).resolves.toBeDefined()
})
})
},
diff --git a/packages/tools/test/mastra/unit.test.ts b/packages/tools/test/mastra/unit.test.ts
index 0161546dc..606cb16eb 100644
--- a/packages/tools/test/mastra/unit.test.ts
+++ b/packages/tools/test/mastra/unit.test.ts
@@ -29,6 +29,7 @@ const TEST_CONFIG = {
apiKey: "test-api-key",
baseUrl: "https://api.supermemory.ai",
containerTag: "test-mastra-user",
+ customId: "test-conv-123",
}
interface MockAgentConfig {
@@ -100,10 +101,33 @@ const createMockProfileResponse = (
const createMockConversationResponse = () => ({
id: "mem-123",
- conversationId: "conv-456",
+ customId: "conv-456",
status: "created",
})
+const createInputArgs = (
+ overrides: Partial = {},
+): ProcessInputArgs =>
+ ({
+ messages: [],
+ systemMessages: [],
+ messageList: createMockMessageList(),
+ abort: vi.fn() as never,
+ retryCount: 0,
+ ...overrides,
+ }) as ProcessInputArgs
+
+const createOutputArgs = (
+ overrides: Partial = {},
+): ProcessOutputResultArgs =>
+ ({
+ messages: [],
+ messageList: createMockMessageList(),
+ abort: vi.fn() as never,
+ retryCount: 0,
+ ...overrides,
+ }) as ProcessOutputResultArgs
+
describe("SupermemoryInputProcessor", () => {
let originalEnv: string | undefined
let originalFetch: typeof globalThis.fetch
@@ -129,7 +153,10 @@ describe("SupermemoryInputProcessor", () => {
describe("constructor", () => {
it("should create processor with default options", () => {
- const processor = new SupermemoryInputProcessor(TEST_CONFIG.containerTag)
+ const processor = new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(processor.id).toBe("supermemory-input")
expect(processor.name).toBe("Supermemory Memory Injection")
})
@@ -138,21 +165,41 @@ describe("SupermemoryInputProcessor", () => {
delete process.env.SUPERMEMORY_API_KEY
expect(() => {
- new SupermemoryInputProcessor(TEST_CONFIG.containerTag)
+ new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
}).toThrow("SUPERMEMORY_API_KEY is not set")
})
it("should accept API key via options", () => {
delete process.env.SUPERMEMORY_API_KEY
- const processor = new SupermemoryInputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: "custom-key",
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: "custom-key",
+ })
expect(processor.id).toBe("supermemory-input")
})
+
+ it("should throw error if customId is empty", () => {
+ expect(() => {
+ new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "",
+ })
+ }).toThrow("[supermemory] customId is required")
+ })
+
+ it("should throw error if customId is whitespace", () => {
+ expect(() => {
+ new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: " ",
+ })
+ }).toThrow("[supermemory] customId is required")
+ })
})
describe("processInput", () => {
@@ -168,24 +215,18 @@ describe("SupermemoryInputProcessor", () => {
),
})
- const processor = new SupermemoryInputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- mode: "profile",
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "profile",
+ })
const messageList = createMockMessageList()
- const messages: MastraDBMessage[] = [createMessage("user", "Hello")]
-
- const args: ProcessInputArgs = {
- messages,
- systemMessages: [],
+ const args = createInputArgs({
+ messages: [createMessage("user", "Hello")],
messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
+ })
await processor.processInput(args)
@@ -203,36 +244,19 @@ describe("SupermemoryInputProcessor", () => {
Promise.resolve(createMockProfileResponse(["Cached memory"])),
})
- const processor = new SupermemoryInputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- mode: "profile",
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "profile",
+ })
const messages: MastraDBMessage[] = [createMessage("user", "Hello")]
- const args1: ProcessInputArgs = {
- messages,
- systemMessages: [],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args1)
+ await processor.processInput(createInputArgs({ messages }))
expect(fetchMock).toHaveBeenCalledTimes(1)
- const args2: ProcessInputArgs = {
- messages,
- systemMessages: [],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args2)
+ await processor.processInput(createInputArgs({ messages }))
expect(fetchMock).toHaveBeenCalledTimes(1)
})
@@ -249,56 +273,40 @@ describe("SupermemoryInputProcessor", () => {
})
})
- const processor = new SupermemoryInputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- mode: "query",
- },
- )
-
- const args1: ProcessInputArgs = {
- messages: [createMessage("user", "First message")],
- systemMessages: [],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
+ const processor = new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "query",
+ })
- await processor.processInput(args1)
+ await processor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "First message")],
+ }),
+ )
expect(fetchMock).toHaveBeenCalledTimes(1)
- const args2: ProcessInputArgs = {
- messages: [createMessage("user", "Different message")],
- systemMessages: [],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args2)
+ await processor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Different message")],
+ }),
+ )
expect(fetchMock).toHaveBeenCalledTimes(2)
})
it("should return messageList in query mode when no user message", async () => {
- const processor = new SupermemoryInputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- mode: "query",
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "query",
+ })
const messageList = createMockMessageList()
- const args: ProcessInputArgs = {
- messages: [],
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- const result = await processor.processInput(args)
+ const result = await processor.processInput(
+ createInputArgs({ messages: [], messageList }),
+ )
expect(result).toBe(messageList)
expect(fetchMock).not.toHaveBeenCalled()
@@ -313,84 +321,47 @@ describe("SupermemoryInputProcessor", () => {
text: () => Promise.resolve("Server error"),
})
- const processor = new SupermemoryInputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- mode: "profile",
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "profile",
+ })
const messageList = createMockMessageList()
- const args: ProcessInputArgs = {
- messages: [createMessage("user", "Hello")],
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- const result = await processor.processInput(args)
+ const result = await processor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Hello")],
+ messageList,
+ }),
+ )
expect(result).toBe(messageList)
expect(messageList.addSystem).not.toHaveBeenCalled()
})
- it("should use threadId from options", async () => {
+ it("should use customId from requestContext fallback", async () => {
fetchMock.mockResolvedValue({
ok: true,
json: () => Promise.resolve(createMockProfileResponse(["Memory"])),
})
- const processor = new SupermemoryInputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- threadId: "thread-123",
- mode: "profile",
- },
- )
-
- const args: ProcessInputArgs = {
- messages: [createMessage("user", "Hello")],
- systemMessages: [],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args)
-
- expect(fetchMock).toHaveBeenCalled()
- })
-
- it("should use threadId from requestContext when not in options", async () => {
- fetchMock.mockResolvedValue({
- ok: true,
- json: () => Promise.resolve(createMockProfileResponse(["Memory"])),
+ const processor = new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "profile",
})
- const processor = new SupermemoryInputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- mode: "profile",
- },
- )
-
const requestContext = new RequestContext()
requestContext.set(MASTRA_THREAD_ID_KEY, "ctx-thread-456")
- const args: ProcessInputArgs = {
- messages: [createMessage("user", "Hello")],
- systemMessages: [],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- requestContext,
- }
-
- await processor.processInput(args)
+ await processor.processInput(
+ createInputArgs({
+ messages: [createMessage("user", "Hello")],
+ requestContext,
+ }),
+ )
expect(fetchMock).toHaveBeenCalled()
})
@@ -401,13 +372,12 @@ describe("SupermemoryInputProcessor", () => {
json: () => Promise.resolve(createMockProfileResponse(["Memory"])),
})
- const processor = new SupermemoryInputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- mode: "query",
- },
- )
+ const processor = new SupermemoryInputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "query",
+ })
const messages: MastraDBMessage[] = [
{
@@ -424,16 +394,7 @@ describe("SupermemoryInputProcessor", () => {
},
]
- const messageList = createMockMessageList()
- const args: ProcessInputArgs = {
- messages,
- systemMessages: [],
- messageList,
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processInput(args)
+ await processor.processInput(createInputArgs({ messages }))
expect(fetchMock).toHaveBeenCalled()
})
@@ -465,7 +426,10 @@ describe("SupermemoryOutputProcessor", () => {
describe("constructor", () => {
it("should create processor with default options", () => {
- const processor = new SupermemoryOutputProcessor(TEST_CONFIG.containerTag)
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(processor.id).toBe("supermemory-output")
expect(processor.name).toBe("Supermemory Conversation Save")
})
@@ -478,26 +442,19 @@ describe("SupermemoryOutputProcessor", () => {
json: () => Promise.resolve(createMockConversationResponse()),
})
- const processor = new SupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- addMemory: "always",
- threadId: "conv-456",
- },
- )
-
- const messages: MastraDBMessage[] = [
- createMessage("user", "Hello"),
- createMessage("assistant", "Hi there!"),
- ]
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "conv-456",
+ apiKey: TEST_CONFIG.apiKey,
+ addMemory: "always",
+ })
- const args: ProcessOutputResultArgs = {
- messages,
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
+ const args = createOutputArgs({
+ messages: [
+ createMessage("user", "Hello"),
+ createMessage("assistant", "Hi there!"),
+ ],
+ })
await processor.processOutputResult(args)
@@ -522,81 +479,48 @@ describe("SupermemoryOutputProcessor", () => {
})
it("should not save conversation when addMemory is never", async () => {
- const processor = new SupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- addMemory: "never",
- threadId: "conv-456",
- },
- )
-
- const args: ProcessOutputResultArgs = {
- messages: [
- createMessage("user", "Hello"),
- createMessage("assistant", "Hi!"),
- ],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
-
- await processor.processOutputResult(args)
-
- expect(fetchMock).not.toHaveBeenCalled()
- })
-
- it("should not save when no threadId provided", async () => {
- const processor = new SupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- addMemory: "always",
- },
- )
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "conv-456",
+ apiKey: TEST_CONFIG.apiKey,
+ addMemory: "never",
+ })
- const args: ProcessOutputResultArgs = {
+ const args = createOutputArgs({
messages: [
createMessage("user", "Hello"),
createMessage("assistant", "Hi!"),
],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
+ })
await processor.processOutputResult(args)
expect(fetchMock).not.toHaveBeenCalled()
})
- it("should use threadId from requestContext", async () => {
+ it("should use customId from requestContext", async () => {
fetchMock.mockResolvedValue({
ok: true,
json: () => Promise.resolve(createMockConversationResponse()),
})
- const processor = new SupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- addMemory: "always",
- },
- )
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: TEST_CONFIG.apiKey,
+ addMemory: "always",
+ })
const requestContext = new RequestContext()
requestContext.set(MASTRA_THREAD_ID_KEY, "ctx-thread-789")
- const args: ProcessOutputResultArgs = {
+ const args = createOutputArgs({
messages: [
createMessage("user", "Hello"),
createMessage("assistant", "Hi!"),
],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
requestContext,
- }
+ })
await processor.processOutputResult(args)
@@ -604,6 +528,7 @@ describe("SupermemoryOutputProcessor", () => {
const callBody = JSON.parse(
(fetchMock.mock.calls[0]?.[1] as { body: string }).body,
)
+ // Should use the RequestContext override, not the default customId
expect(callBody.conversationId).toBe("ctx-thread-789")
})
@@ -613,27 +538,20 @@ describe("SupermemoryOutputProcessor", () => {
json: () => Promise.resolve(createMockConversationResponse()),
})
- const processor = new SupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- addMemory: "always",
- threadId: "conv-456",
- },
- )
-
- const messages: MastraDBMessage[] = [
- createMessage("system", "You are a helpful assistant"),
- createMessage("user", "Hello"),
- createMessage("assistant", "Hi there!"),
- ]
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "conv-456",
+ apiKey: TEST_CONFIG.apiKey,
+ addMemory: "always",
+ })
- const args: ProcessOutputResultArgs = {
- messages,
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
+ const args = createOutputArgs({
+ messages: [
+ createMessage("system", "You are a helpful assistant"),
+ createMessage("user", "Hello"),
+ createMessage("assistant", "Hi there!"),
+ ],
+ })
await processor.processOutputResult(args)
@@ -652,14 +570,12 @@ describe("SupermemoryOutputProcessor", () => {
json: () => Promise.resolve(createMockConversationResponse()),
})
- const processor = new SupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- addMemory: "always",
- threadId: "conv-456",
- },
- )
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "conv-456",
+ apiKey: TEST_CONFIG.apiKey,
+ addMemory: "always",
+ })
const messages: MastraDBMessage[] = [
{
@@ -685,12 +601,7 @@ describe("SupermemoryOutputProcessor", () => {
},
]
- const args: ProcessOutputResultArgs = {
- messages,
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
+ const args = createOutputArgs({ messages })
await processor.processOutputResult(args)
@@ -708,45 +619,33 @@ describe("SupermemoryOutputProcessor", () => {
text: () => Promise.resolve("Server error"),
})
- const processor = new SupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- addMemory: "always",
- threadId: "conv-456",
- },
- )
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "conv-456",
+ apiKey: TEST_CONFIG.apiKey,
+ addMemory: "always",
+ })
- const args: ProcessOutputResultArgs = {
+ const args = createOutputArgs({
messages: [
createMessage("user", "Hello"),
createMessage("assistant", "Hi!"),
],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
+ })
// Should not throw
await expect(processor.processOutputResult(args)).resolves.toBeDefined()
})
it("should not save when no messages to save", async () => {
- const processor = new SupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: TEST_CONFIG.apiKey,
- addMemory: "always",
- threadId: "conv-456",
- },
- )
+ const processor = new SupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "conv-456",
+ apiKey: TEST_CONFIG.apiKey,
+ addMemory: "always",
+ })
- const args: ProcessOutputResultArgs = {
- messages: [],
- messageList: createMockMessageList(),
- abort: vi.fn() as never,
- retryCount: 0,
- }
+ const args = createOutputArgs({ messages: [] })
await processor.processOutputResult(args)
@@ -773,13 +672,18 @@ describe("Factory functions", () => {
describe("createSupermemoryProcessor", () => {
it("should create input processor", () => {
- const processor = createSupermemoryProcessor(TEST_CONFIG.containerTag)
+ const processor = createSupermemoryProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(processor).toBeInstanceOf(SupermemoryInputProcessor)
expect(processor.id).toBe("supermemory-input")
})
it("should pass options to processor", () => {
- const processor = createSupermemoryProcessor(TEST_CONFIG.containerTag, {
+ const processor = createSupermemoryProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
apiKey: "custom-key",
mode: "full",
})
@@ -789,45 +693,43 @@ describe("Factory functions", () => {
describe("createSupermemoryOutputProcessor", () => {
it("should create output processor", () => {
- const processor = createSupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- )
+ const processor = createSupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(processor).toBeInstanceOf(SupermemoryOutputProcessor)
expect(processor.id).toBe("supermemory-output")
})
it("should pass options to processor", () => {
- const processor = createSupermemoryOutputProcessor(
- TEST_CONFIG.containerTag,
- {
- apiKey: "custom-key",
- addMemory: "always",
- threadId: "conv-123",
- },
- )
+ const processor = createSupermemoryOutputProcessor({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: "custom-key",
+ addMemory: "always",
+ })
expect(processor).toBeInstanceOf(SupermemoryOutputProcessor)
})
})
describe("createSupermemoryProcessors", () => {
it("should create both input and output processors", () => {
- const { input, output } = createSupermemoryProcessors(
- TEST_CONFIG.containerTag,
- )
+ const { input, output } = createSupermemoryProcessors({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(input).toBeInstanceOf(SupermemoryInputProcessor)
expect(output).toBeInstanceOf(SupermemoryOutputProcessor)
})
it("should share options between processors", () => {
- const { input, output } = createSupermemoryProcessors(
- TEST_CONFIG.containerTag,
- {
- apiKey: "custom-key",
- mode: "full",
- addMemory: "always",
- threadId: "conv-123",
- },
- )
+ const { input, output } = createSupermemoryProcessors({
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ apiKey: "custom-key",
+ mode: "full",
+ addMemory: "always",
+ })
expect(input.id).toBe("supermemory-input")
expect(output.id).toBe("supermemory-output")
})
@@ -857,7 +759,10 @@ describe("withSupermemory", () => {
const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
expect(() => {
- withSupermemory(config, TEST_CONFIG.containerTag)
+ withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
}).toThrow("SUPERMEMORY_API_KEY is not set")
})
@@ -865,7 +770,9 @@ describe("withSupermemory", () => {
delete process.env.SUPERMEMORY_API_KEY
const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
- const enhanced = withSupermemory(config, TEST_CONFIG.containerTag, {
+ const enhanced = withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
apiKey: "custom-key",
})
@@ -877,7 +784,10 @@ describe("withSupermemory", () => {
describe("processor injection", () => {
it("should inject input and output processors", () => {
const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
- const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
+ const enhanced = withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(enhanced.inputProcessors).toHaveLength(1)
expect(enhanced.outputProcessors).toHaveLength(1)
@@ -892,7 +802,10 @@ describe("withSupermemory", () => {
model: "gpt-4",
customProp: "value",
}
- const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
+ const enhanced = withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(enhanced.id).toBe("test-agent")
expect(enhanced.name).toBe("Test Agent")
@@ -911,7 +824,10 @@ describe("withSupermemory", () => {
inputProcessors: [existingInputProcessor],
}
- const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
+ const enhanced = withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(enhanced.inputProcessors).toHaveLength(2)
expect(enhanced.inputProcessors?.[0]?.id).toBe("supermemory-input")
@@ -929,7 +845,10 @@ describe("withSupermemory", () => {
outputProcessors: [existingOutputProcessor],
}
- const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
+ const enhanced = withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(enhanced.outputProcessors).toHaveLength(2)
expect(enhanced.outputProcessors?.[0]?.id).toBe("existing-output")
@@ -946,7 +865,10 @@ describe("withSupermemory", () => {
outputProcessors: [existingOutput],
}
- const enhanced = withSupermemory(config, TEST_CONFIG.containerTag)
+ const enhanced = withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
+ })
expect(enhanced.inputProcessors).toHaveLength(2)
expect(enhanced.outputProcessors).toHaveLength(2)
@@ -960,10 +882,11 @@ describe("withSupermemory", () => {
describe("options passthrough", () => {
it("should pass options to processors", () => {
const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
- const enhanced = withSupermemory(config, TEST_CONFIG.containerTag, {
+ const enhanced = withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: TEST_CONFIG.customId,
mode: "full",
addMemory: "always",
- threadId: "conv-123",
verbose: true,
})
@@ -971,4 +894,28 @@ describe("withSupermemory", () => {
expect(enhanced.outputProcessors).toHaveLength(1)
})
})
+
+ describe("customId validation", () => {
+ it("should throw error if customId is empty", () => {
+ const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
+
+ expect(() => {
+ withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: "",
+ })
+ }).toThrow("[supermemory] customId is required")
+ })
+
+ it("should throw error if customId is whitespace", () => {
+ const config: MockAgentConfig = { id: "test-agent", name: "Test Agent" }
+
+ expect(() => {
+ withSupermemory(config, {
+ containerTag: TEST_CONFIG.containerTag,
+ customId: " ",
+ })
+ }).toThrow("[supermemory] customId is required")
+ })
+ })
})