-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathcontext-intelligence.ts
More file actions
189 lines (157 loc) · 5.68 KB
/
context-intelligence.ts
File metadata and controls
189 lines (157 loc) · 5.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
/**
* Context Intelligence Layer - Production-Safe Example
* Demonstrates Level 3 caching with context optimization (input-only)
*/
import { NeuroCache } from '../src';
import { OpenAIProvider } from '../src/providers/OpenAIProvider';
import { MemoryStore } from '../src/store/MemoryStore';
async function contextIntelligenceDemo(): Promise<void> {
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) {
console.error('Error: OPENAI_API_KEY not set');
process.exit(1);
}
console.log('=== Context Intelligence Layer Demo (Production-Safe) ===\n');
// Create provider
const provider = new OpenAIProvider({ apiKey });
// Create cache with Context Intelligence enabled (production-safe)
const cache = new NeuroCache({
provider,
store: new MemoryStore(),
enableContextIntelligence: true, // Enable Level 3
contextOptimizationStrategy: {
enableDeduplication: true, // Remove duplicate messages
normalizeContent: true, // Cleanup whitespace
enableHistoryTrimming: false, // Optional: trim old history
preserveSystemMessages: true // Never remove system messages
},
minOptimizationThreshold: 5, // Min tokens to trigger optimization
ttl: 3600,
logging: true
});
console.log('Step 1: First request with duplicate messages');
console.log('-----------------------------------------------\n');
const response1 = await cache.generate({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'system', content: 'You are a helpful assistant. Give concise answers.' },
{ role: 'user', content: 'What is the capital of France?' },
{ role: 'assistant', content: 'Paris.' },
{ role: 'user', content: 'What is the capital of France?' } // Duplicate - will be removed!
]
});
console.log(`Response: ${response1.content}\n`);
console.log('Note: Duplicate user message was automatically removed before sending to LLM\n');
console.log('\nStep 2: Another request with repeated context');
console.log('----------------------------------------------\n');
const response2 = await cache.generate({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Count to 3' },
{ role: 'assistant', content: '1, 2, 3' },
{ role: 'user', content: 'Count to 3' }, // Duplicate
{ role: 'assistant', content: '1, 2, 3' },
{ role: 'user', content: 'Now count to 5' }
]
});
console.log(`Response: ${response2.content}\n`);
console.log('\n=== Metrics ===');
console.log(cache.getMetricsSummary());
}
async function deduplicationDemo(): Promise<void> {
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) return;
console.log('\n\n=== Context Deduplication Demo ===\n');
const provider = new OpenAIProvider({ apiKey });
const cache = new NeuroCache({
provider,
store: new MemoryStore(),
enableContextIntelligence: true,
contextOptimizationStrategy: {
enableDeduplication: true,
normalizeContent: true
},
minOptimizationThreshold: 1,
logging: false
});
// Conversation with repeated user message
console.log('Original messages: 5');
console.log('After deduplication: fewer (duplicates removed)\n');
const response = await cache.generate({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Count to 3' },
{ role: 'assistant', content: '1, 2, 3' },
{ role: 'user', content: 'Count to 3' }, // Duplicate - removed
{ role: 'assistant', content: '1, 2, 3' }, // Duplicate - removed
{ role: 'user', content: 'Now count to 5' }
]
});
console.log(`Response: ${response.content}\n`);
console.log('Metrics:');
console.log(cache.getMetricsSummary());
}
async function comparisonDemo(): Promise<void> {
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) return;
console.log('\n\n=== Performance Comparison ===\n');
const provider = new OpenAIProvider({ apiKey });
// WITHOUT Context Intelligence
console.log('Testing WITHOUT Context Intelligence:');
const cacheBasic = new NeuroCache({
provider,
store: new MemoryStore(),
enableContextIntelligence: false,
logging: false
});
const start1 = Date.now();
// Requests with duplicates (no optimization)
await cacheBasic.generate({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Hi' },
{ role: 'user', content: 'Hi' } // Duplicate sent to LLM
]
});
const duration1 = Date.now() - start1;
console.log(`Duration: ${duration1}ms`);
console.log(`Cache hits: ${cacheBasic.getMetrics().cacheHits}`);
console.log(`Duplicates removed: 0 (no optimization)\n`);
// WITH Context Intelligence
console.log('Testing WITH Context Intelligence:');
const cacheIntelligent = new NeuroCache({
provider,
store: new MemoryStore(),
enableContextIntelligence: true,
contextOptimizationStrategy: {
enableDeduplication: true,
normalizeContent: true
},
minOptimizationThreshold: 1,
logging: false
});
const start2 = Date.now();
// Same request, but duplicates are removed before LLM call
await cacheIntelligent.generate({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Hi' },
{ role: 'user', content: 'Hi' } // Automatically removed!
]
});
const duration2 = Date.now() - start2;
const ciMetrics = cacheIntelligent.getMetrics();
console.log(`Duration: ${duration2}ms`);
console.log(`Cache hits: ${ciMetrics.cacheHits}`);
console.log(`Tokens saved: ${ciMetrics.tokensSaved}\n`);
}
async function main(): Promise<void> {
try {
await contextIntelligenceDemo();
await deduplicationDemo();
await comparisonDemo();
} catch (error) {
console.error('Error:', error);
}
}
main();