Skip to content

Commit fc432a9

Browse files
committed
fix: correct OpenAI GPT-5.1 Responses API parsing and provider defaults
Fixed parsing errors for OpenAI GPT-5.1 and OpenAI-compatible providers: OpenAI GPT-5.1 Response Structure: - Changed from `type` to `object` field - Updated output array structure: output[{type: "message", content: [{type: "output_text", text: "..."}]}] - Fixed usage token field names (input_tokens instead of prompt_tokens) - Added proper content extraction from nested message/content structure OpenAI-Compatible Providers: - Set Ollama to use Chat Completions API (doesn't support Responses API) - Set LM Studio to use Chat Completions API (doesn't support Responses API) - Changed default to Chat Completions API for broader compatibility - Added debug logging to capture raw JSON responses for troubleshooting Build & Test Improvements: - Updated Makefile to include all LLM provider features (anthropic, openai-llm, openai-compatible) - Enhanced test_agentic_mcp.py with traceback printing for better error visibility - Added LIBRARY_PATH for FAISS linking on macOS Files changed: - crates/codegraph-ai/src/openai_llm_provider.rs: Fixed GPT-5.1 response structs and parsing - crates/codegraph-ai/src/openai_compatible_provider.rs: Set Chat Completions as default, added debug logging - Makefile: Added all LLM provider features to build command - test_agentic_mcp.py: Added traceback printing for error details
1 parent dacbc03 commit fc432a9

File tree

4 files changed

+74
-40
lines changed

4 files changed

+74
-40
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ build-release:
1313

1414
# Build MCP server with AutoAgents experimental feature
1515
build-mcp-autoagents:
16-
cargo build --release -p codegraph-mcp --bin codegraph --features "ai-enhanced,autoagents-experimental,faiss,embeddings-ollama"
16+
LIBRARY_PATH=/opt/homebrew/lib:$$LIBRARY_PATH cargo build --release -p codegraph-mcp --bin codegraph --features "ai-enhanced,autoagents-experimental,faiss,embeddings-ollama,codegraph-ai/anthropic,codegraph-ai/openai-llm,codegraph-ai/openai-compatible"
1717

1818
# Build MCP HTTP server with experimental HTTP transport
1919
.PHONY: build-mcp-http

crates/codegraph-ai/src/openai_compatible_provider.rs

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ impl Default for OpenAICompatibleConfig {
3636
max_retries: 3,
3737
api_key: None,
3838
provider_name: "openai-compatible".to_string(),
39-
use_responses_api: true, // All providers support Responses API
39+
use_responses_api: false, // Most providers only support Chat Completions API
4040
}
4141
}
4242
}
@@ -49,7 +49,7 @@ impl OpenAICompatibleConfig {
4949
model,
5050
context_window: 256_000,
5151
provider_name: "lmstudio".to_string(),
52-
use_responses_api: true,
52+
use_responses_api: false, // LM Studio doesn't support Responses API
5353
..Default::default()
5454
}
5555
}
@@ -61,7 +61,7 @@ impl OpenAICompatibleConfig {
6161
model,
6262
context_window: 256_000,
6363
provider_name: "ollama".to_string(),
64-
use_responses_api: true,
64+
use_responses_api: false, // Ollama doesn't support Responses API
6565
..Default::default()
6666
}
6767
}
@@ -72,7 +72,7 @@ impl OpenAICompatibleConfig {
7272
base_url,
7373
model,
7474
provider_name,
75-
use_responses_api: true,
75+
use_responses_api: false, // Default to Chat Completions API for compatibility
7676
..Default::default()
7777
}
7878
}
@@ -214,13 +214,25 @@ impl OpenAICompatibleProvider {
214214
));
215215
}
216216

217-
response
218-
.json::<ResponseAPIResponse>()
219-
.await
220-
.context(format!(
221-
"Failed to parse {} Responses API response",
222-
self.config.provider_name
223-
))
217+
// Get raw response text for debugging
218+
let response_text = response.text().await.context(format!(
219+
"Failed to read {} Responses API response body",
220+
self.config.provider_name
221+
))?;
222+
223+
// Log the raw response for debugging
224+
tracing::debug!(
225+
provider = %self.config.provider_name,
226+
response = %response_text,
227+
"Raw Responses API response"
228+
);
229+
230+
// Parse the response
231+
serde_json::from_str::<ResponseAPIResponse>(&response_text).context(format!(
232+
"Failed to parse {} Responses API response. Raw response: {}",
233+
self.config.provider_name,
234+
response_text
235+
))
224236
}
225237

226238
/// Try request using Chat Completions API (fallback for older systems)

crates/codegraph-ai/src/openai_llm_provider.rs

Lines changed: 44 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -179,10 +179,24 @@ impl OpenAIProvider {
179179
return Err(anyhow!("OpenAI API error ({}): {}", status, error_text));
180180
}
181181

182-
response
183-
.json::<OpenAIResponse>()
182+
// Get raw response text for debugging
183+
let response_text = response
184+
.text()
184185
.await
185-
.context("Failed to parse OpenAI Responses API response")
186+
.context("Failed to read OpenAI Responses API response body")?;
187+
188+
// Log the raw response for debugging
189+
tracing::debug!(
190+
model = %self.config.model,
191+
response = %response_text,
192+
"Raw OpenAI Responses API response"
193+
);
194+
195+
// Parse the response
196+
serde_json::from_str::<OpenAIResponse>(&response_text).context(format!(
197+
"Failed to parse OpenAI Responses API response. Raw response: {}",
198+
response_text
199+
))
186200
}
187201
}
188202

@@ -196,22 +210,22 @@ impl LLMProvider for OpenAIProvider {
196210
let start = Instant::now();
197211
let response = self.send_request(messages, config).await?;
198212

199-
// Handle both old output_text field and new output array format
200-
let content = if !response.output_text.is_empty() {
201-
response.output_text
202-
} else if !response.output.is_empty() {
203-
response.output.iter()
204-
.map(|o| o.content.as_str())
205-
.collect::<Vec<_>>()
206-
.join("\n")
207-
} else {
208-
String::new()
209-
};
213+
// Extract text from output array
214+
// OpenAI GPT-5 returns: output[{type: "message", content: [{type: "output_text", text: "..."}]}]
215+
let content = response
216+
.output
217+
.iter()
218+
.filter(|item| item.output_type == "message")
219+
.flat_map(|item| &item.content)
220+
.filter(|c| c.content_type == "output_text")
221+
.map(|c| c.text.as_str())
222+
.collect::<Vec<_>>()
223+
.join("\n");
210224

211225
Ok(LLMResponse {
212226
content,
213227
total_tokens: response.usage.as_ref().map(|u| u.total_tokens),
214-
prompt_tokens: response.usage.as_ref().map(|u| u.prompt_tokens),
228+
prompt_tokens: response.usage.as_ref().map(|u| u.input_tokens),
215229
completion_tokens: response.usage.as_ref().map(|u| u.output_tokens),
216230
finish_reason: response.status.clone(),
217231
model: self.config.model.clone(),
@@ -356,34 +370,36 @@ struct OpenAIRequest {
356370
#[derive(Debug, Deserialize)]
357371
struct OpenAIResponse {
358372
id: String,
359-
#[serde(rename = "type")]
360-
response_type: String,
373+
object: String,
361374
#[serde(default)]
362375
status: Option<String>,
363376
#[serde(default)]
364-
output_text: String,
365-
#[serde(default)]
366-
output: Vec<ResponseOutput>,
377+
output: Vec<OutputItem>,
367378
#[serde(default)]
368-
usage: Option<Usage>,
379+
usage: Option<OpenAIUsage>,
369380
}
370381

371382
#[derive(Debug, Deserialize)]
372-
struct ResponseOutput {
383+
struct OutputItem {
373384
#[serde(rename = "type")]
374385
output_type: String,
375386
#[serde(default)]
376-
content: String,
387+
content: Vec<OutputContent>,
377388
}
378389

379390
#[derive(Debug, Deserialize)]
380-
struct Usage {
381-
prompt_tokens: usize,
382-
#[serde(alias = "completion_tokens")]
391+
struct OutputContent {
392+
#[serde(rename = "type")]
393+
content_type: String,
394+
#[serde(default)]
395+
text: String,
396+
}
397+
398+
#[derive(Debug, Deserialize)]
399+
struct OpenAIUsage {
400+
input_tokens: usize,
383401
output_tokens: usize,
384402
total_tokens: usize,
385-
#[serde(default)]
386-
reasoning_tokens: Option<usize>,
387403
}
388404

389405
#[cfg(test)]

test_agentic_mcp.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,9 @@ async def run_stdio_tests():
282282
except Exception as e:
283283
duration = asyncio.get_event_loop().time() - start_time
284284
print(f" ❌ ERROR: {e}")
285+
print(f"\n 📋 Full error details:")
286+
import traceback
287+
traceback.print_exc()
285288
results.append({
286289
"test": tool_name,
287290
"success": False,
@@ -413,6 +416,9 @@ async def run_http_tests():
413416
except Exception as e:
414417
duration = asyncio.get_event_loop().time() - start_time
415418
print(f" ❌ ERROR: {e}")
419+
print(f"\n 📋 Full error details:")
420+
import traceback
421+
traceback.print_exc()
416422
results.append({
417423
"test": tool_name,
418424
"success": False,

0 commit comments

Comments
 (0)