Skip to content

Commit 2fa7c00

Browse files
committed
wip: adding models and Configuration
1 parent 02d3446 commit 2fa7c00

File tree

7 files changed

+225
-34
lines changed

7 files changed

+225
-34
lines changed

Sources/CompilerSwiftAI/CompilerClient.swift

Lines changed: 42 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,28 @@ import OSLog
44

55
/// Primary interface for interacting with Compiler's Back End
66
public final actor CompilerClient {
7+
public struct Configuration {
8+
/// Current streaming configuration
9+
public var streamingChat: StreamConfiguration
10+
/// Whether to enable debug logging
11+
public var enableDebugLogging: Bool
12+
13+
public init(
14+
streamingChat: StreamConfiguration = .openAI(.gpt4o),
15+
enableDebugLogging: Bool = false
16+
) {
17+
self.streamingChat = streamingChat
18+
self.enableDebugLogging = enableDebugLogging
19+
}
20+
}
21+
722
/// Application ID (retrievable from the Comiler Developer Dashboard)
823
let appID: UUID
24+
25+
private(set) var configuration: Configuration
926

10-
internal let baseURL: String = "https://backend.compiler.inc"
27+
// internal let baseURL: String = "https://backend.compiler.inc"
28+
internal let baseURL: String = "http://localhost:3000"
1129
internal let keychain: KeychainHelper = KeychainHelper.standard
1230
internal let functionLogger: DebugLogger
1331
internal let modelLogger: DebugLogger
@@ -16,11 +34,29 @@ public final actor CompilerClient {
1634
/// Initialize the Compiler Client
1735
/// - Parameters:
1836
/// - appID: Application ID (retrievable from the Comiler Developer Dashboard)
19-
/// - enableDebugLogging: Whether or not to log debug info
20-
public init(appID: UUID, enableDebugLogging: Bool = false) {
37+
/// - configuration: Client configuration including streaming chat settings and debug options
38+
public init(
39+
appID: UUID,
40+
configuration: Configuration = Configuration()
41+
) {
2142
self.appID = appID
22-
self.functionLogger = DebugLogger(Logger.functionCalls, isEnabled: enableDebugLogging)
23-
self.modelLogger = DebugLogger(Logger.modelCalls, isEnabled: enableDebugLogging)
24-
self.authLogger = DebugLogger(Logger.auth, isEnabled: enableDebugLogging)
43+
self.configuration = configuration
44+
self.functionLogger = DebugLogger(Logger.functionCalls, isEnabled: configuration.enableDebugLogging)
45+
self.modelLogger = DebugLogger(Logger.modelCalls, isEnabled: configuration.enableDebugLogging)
46+
self.authLogger = DebugLogger(Logger.auth, isEnabled: configuration.enableDebugLogging)
47+
}
48+
49+
/// Update streaming chat configuration
50+
/// - Parameter update: Closure that takes an inout StreamConfiguration parameter
51+
public func updateStreamingChat(
52+
_ update: (inout StreamConfiguration) -> Void
53+
) {
54+
update(&configuration.streamingChat)
55+
}
56+
57+
/// Creates an immutable streaming session configuration
58+
/// This captures the current streaming configuration at a point in time
59+
public func makeStreamingSession() -> StreamConfiguration {
60+
configuration.streamingChat
2561
}
2662
}

Sources/CompilerSwiftAI/Model Calling/CompilerClient+Streaming.swift

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,10 +54,11 @@ extension CompilerClient {
5454
do {
5555
let encoder = JSONEncoder()
5656
encoder.outputFormatting = [.prettyPrinted, .sortedKeys]
57-
let jsonData = try encoder.encode(body)
58-
request.httpBody = jsonData
5957

60-
modelLogger.debug("Streaming request body JSON: \(String(data: jsonData, encoding: .utf8) ?? "nil")")
58+
// AppId is only in the endpoint URL, not in query params or body
59+
request.httpBody = try encoder.encode(body)
60+
61+
modelLogger.debug("Streaming request body JSON: \(String(data: request.httpBody ?? Data(), encoding: .utf8) ?? "nil")")
6162
} catch {
6263
modelLogger.error("Failed to encode request: \(error)")
6364
return AsyncThrowingStream { $0.finish(throwing: error) }
@@ -105,9 +106,12 @@ extension CompilerClient {
105106
// For non-empty content, trim just the leading space after "data:"
106107
let trimmedContent = content.hasPrefix(" ") ? String(content.dropFirst()) : content
107108
modelLogger.debug("Content: \(trimmedContent.debugDescription)")
109+
modelLogger.debug("Yielding content of length: \(trimmedContent.count)")
108110

109-
// Yield the content
111+
// Yield the content directly - server handles JSON extraction
110112
continuation.yield(trimmedContent)
113+
114+
modelLogger.debug("Content yielded successfully")
111115
}
112116

113117
modelLogger.debug("SSE stream complete")

Sources/CompilerSwiftAI/Model Calling/Message.swift

Lines changed: 26 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,24 @@ struct Message: Codable, Sendable, Identifiable, Equatable {
99
let content: [Content]
1010
var state: MessageState
1111

12+
private enum CodingKeys: String, CodingKey {
13+
case role, content
14+
}
15+
16+
init(from decoder: Decoder) throws {
17+
let container = try decoder.container(keyedBy: CodingKeys.self)
18+
self.id = UUID() // Generate new UUID on decode since we don't send it
19+
self.role = try container.decode(Role.self, forKey: .role)
20+
self.content = try container.decode([Content].self, forKey: .content)
21+
self.state = .complete // Default to complete state when decoding
22+
}
23+
24+
func encode(to encoder: Encoder) throws {
25+
var container = encoder.container(keyedBy: CodingKeys.self)
26+
try container.encode(role, forKey: .role)
27+
try container.encode(content, forKey: .content)
28+
}
29+
1230
enum Role: String, Codable, Sendable {
1331
case system
1432
case user
@@ -28,33 +46,24 @@ struct Message: Codable, Sendable, Identifiable, Equatable {
2846
case text(String)
2947
case image(ImageContent)
3048

31-
private enum CodingKeys: String, CodingKey {
32-
case type, content
33-
}
34-
3549
func encode(to encoder: Encoder) throws {
36-
var container = encoder.container(keyedBy: CodingKeys.self)
50+
var container = encoder.singleValueContainer()
3751
switch self {
3852
case .text(let text):
39-
try container.encode(text, forKey: .content)
53+
try container.encode(text)
4054
case .image(let imageContent):
41-
try container.encode(imageContent, forKey: .content)
55+
try container.encode(imageContent)
4256
}
4357
}
4458

4559
init(from decoder: Decoder) throws {
46-
let container = try decoder.container(keyedBy: CodingKeys.self)
47-
let type = try container.decode(String.self, forKey: .type)
48-
49-
switch type {
50-
case "text":
51-
let text = try container.decode(String.self, forKey: .content)
60+
let container = try decoder.singleValueContainer()
61+
if let text = try? container.decode(String.self) {
5262
self = .text(text)
53-
case "image":
54-
let imageContent = try container.decode(ImageContent.self, forKey: .content)
63+
} else if let imageContent = try? container.decode(ImageContent.self) {
5564
self = .image(imageContent)
56-
default:
57-
throw DecodingError.dataCorruptedError(forKey: .type, in: container, debugDescription: "Unknown content type")
65+
} else {
66+
throw DecodingError.dataCorruptedError(in: container, debugDescription: "Unable to decode ContentData")
5867
}
5968
}
6069
}

Sources/CompilerSwiftAI/Model Calling/ModelCall.swift

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,23 @@ struct StreamRequest: ModelCallRequestBase {
5252

5353
Logger.modelCalls.debug("Preparing stream request with \(messages.count) messages")
5454
}
55+
56+
private enum CodingKeys: String, CodingKey {
57+
case provider, model, messages, temperature, maxTokens
58+
}
59+
60+
func encode(to encoder: Encoder) throws {
61+
var container = encoder.container(keyedBy: CodingKeys.self)
62+
try container.encode(provider, forKey: .provider)
63+
try container.encode(model, forKey: .model)
64+
try container.encode(messages, forKey: .messages)
65+
if let temperature = temperature {
66+
try container.encode(temperature, forKey: .temperature)
67+
}
68+
if let maxTokens = maxTokens {
69+
try container.encode(maxTokens, forKey: .maxTokens)
70+
}
71+
}
5572
}
5673

5774
/// Response format for completion calls
Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,23 @@
11
// Copyright © 2025 Compiler, Inc. All rights reserved.
22

33
/// AI Models supported by Compiler
4-
enum ModelProvider: String, Codable, Sendable, Equatable {
4+
public enum ModelProvider: String, Codable, Sendable, Equatable {
55
case openai
66
case anthropic
77
case perplexity
88
case deepseek
99
case gemini
1010
}
1111

12-
enum OpenAIModel: String, Codable {
12+
public enum OpenAIModel: String, Codable {
1313
case gpt4o = "chatgpt-4o-latest"
1414
case gpt4oMini = "gpt-4o-mini"
1515
case o1 = "o1"
1616
case o1Mini = "o1-mini"
1717
case o3Mini = "o3-mini"
1818
}
1919

20-
enum GeminiModel: String, Codable {
20+
public enum GeminiModel: String, Codable {
2121
case flash = "gemini-2.0-flash"
2222
case flashLitePreview = "gemini-2.0-flash-lite-preview-02-05"
2323
case flash15 = "gemini-1.5-flash"
@@ -26,18 +26,18 @@ enum GeminiModel: String, Codable {
2626
case textEmbedding = "text-embedding-004"
2727
}
2828

29-
enum AnthropicModel: String, Codable {
29+
public enum AnthropicModel: String, Codable {
3030
case claudeSonnet = "claude-3-5-sonnet-latest"
3131
case claudeHaiku = "claude-3-5-haiku-latest"
3232
case claudeOpus = "claude-3-5-opus-latest"
3333
}
3434

35-
enum PerplexityModel: String, Codable {
35+
public enum PerplexityModel: String, Codable {
3636
case sonarReasoning = "sonar-reasoning"
3737
case sonarPro = "sonar-pro"
3838
case sonar = "sonar"
3939
}
40-
enum DeepSeekModel: String, Codable {
40+
public enum DeepSeekModel: String, Codable {
4141
case chat = "deepseek-chat"
4242
case reasoner = "deepseek-reasoner"
4343
}
Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
// Copyright © 2025 Compiler, Inc. All rights reserved.
2+
3+
import Foundation
4+
5+
/// Configuration for streaming chat sessions
6+
public struct StreamConfiguration: Sendable {
7+
// Internal access to metadata
8+
internal let metadata: ModelMetadata
9+
10+
// Internal init for SDK use
11+
internal init(metadata: ModelMetadata) {
12+
self.metadata = metadata
13+
}
14+
15+
/// Create a streaming configuration with raw values
16+
public init(
17+
provider: ModelProvider,
18+
model: Model,
19+
temperature: Float? = nil,
20+
maxTokens: Int? = nil
21+
) {
22+
self.metadata = ModelMetadata(
23+
provider: provider,
24+
capabilities: [.chat],
25+
model: model,
26+
temperature: temperature,
27+
maxTokens: maxTokens
28+
)
29+
}
30+
}
31+
32+
// MARK: - Public Factory Methods
33+
public extension StreamConfiguration {
34+
/// Create an OpenAI streaming configuration
35+
/// - Parameters:
36+
/// - model: The OpenAI model to use
37+
/// - temperature: Optional temperature parameter (0.0 - 1.0)
38+
/// - maxTokens: Optional maximum tokens to generate
39+
static func openAI(
40+
_ model: OpenAIModel,
41+
temperature: Float? = nil,
42+
maxTokens: Int? = nil
43+
) -> StreamConfiguration {
44+
.init(metadata: ModelMetadata(
45+
provider: .openai,
46+
model: model.rawValue,
47+
temperature: temperature,
48+
maxTokens: maxTokens
49+
))
50+
}
51+
52+
/// Create an Anthropic streaming configuration
53+
/// - Parameters:
54+
/// - model: The Anthropic model to use
55+
/// - temperature: Optional temperature parameter (0.0 - 1.0)
56+
/// - maxTokens: Optional maximum tokens to generate
57+
static func anthropic(
58+
_ model: AnthropicModel,
59+
temperature: Float? = nil,
60+
maxTokens: Int? = nil
61+
) -> StreamConfiguration {
62+
.init(metadata: ModelMetadata(
63+
provider: .anthropic,
64+
model: model.rawValue,
65+
temperature: temperature,
66+
maxTokens: maxTokens
67+
))
68+
}
69+
70+
/// Create a Perplexity streaming configuration
71+
/// - Parameters:
72+
/// - model: The Perplexity model to use
73+
/// - temperature: Optional temperature parameter (0.0 - 1.0)
74+
/// - maxTokens: Optional maximum tokens to generate
75+
static func perplexity(
76+
_ model: PerplexityModel,
77+
temperature: Float? = nil,
78+
maxTokens: Int? = nil
79+
) -> StreamConfiguration {
80+
.init(metadata: ModelMetadata(
81+
provider: .perplexity,
82+
model: model.rawValue,
83+
temperature: temperature,
84+
maxTokens: maxTokens
85+
))
86+
}
87+
88+
/// Create a DeepSeek streaming configuration
89+
/// - Parameters:
90+
/// - model: The DeepSeek model to use
91+
/// - temperature: Optional temperature parameter (0.0 - 1.0)
92+
/// - maxTokens: Optional maximum tokens to generate
93+
static func deepseek(
94+
_ model: DeepSeekModel,
95+
temperature: Float? = nil,
96+
maxTokens: Int? = nil
97+
) -> StreamConfiguration {
98+
.init(metadata: ModelMetadata(
99+
provider: .deepseek,
100+
model: model.rawValue,
101+
temperature: temperature,
102+
maxTokens: maxTokens
103+
))
104+
}
105+
106+
/// Create a Gemini streaming configuration
107+
/// - Parameters:
108+
/// - model: The Gemini model to use
109+
/// - temperature: Optional temperature parameter (0.0 - 1.0)
110+
/// - maxTokens: Optional maximum tokens to generate
111+
static func gemini(
112+
_ model: GeminiModel,
113+
temperature: Float? = nil,
114+
maxTokens: Int? = nil
115+
) -> StreamConfiguration {
116+
.init(metadata: ModelMetadata(
117+
provider: .gemini,
118+
model: model.rawValue,
119+
temperature: temperature,
120+
maxTokens: maxTokens
121+
))
122+
}
123+
}

Sources/CompilerSwiftAI/UI/Chat/ChatView/ChatViewModel.swift

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,9 @@ class ChatViewModel: Transcribable {
193193
let messagesSoFar = await self.chatHistory.messages
194194
self.logger.log("Calling service.streamModelResponse with \(messagesSoFar.count) messages.")
195195

196-
let stream = await self.client.streamModelResponse(using: .openAI(.gpt4oMini), messages: messagesSoFar)
196+
// Get immutable streaming configuration
197+
let config = await self.client.makeStreamingSession()
198+
let stream = await self.client.streamModelResponse(using: config.metadata, messages: messagesSoFar)
197199

198200
var chunkCount = 0
199201
for try await partialMessage in stream {

0 commit comments

Comments
 (0)