Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.security.access.annotation.Secured;
import org.springframework.http.MediaType;
import org.springframework.security.access.annotation.Secured;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.security.access.annotation.Secured;
import org.springframework.http.MediaType;
import org.springframework.security.access.annotation.Secured;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
Expand Down
19 changes: 8 additions & 11 deletions src/main/java/org/wise/portal/service/llm/LlmProvider.java
Original file line number Diff line number Diff line change
@@ -1,14 +1,11 @@
package org.wise.portal.service.llm;

/**
* Abstraction for AI Language Model providers that support chat completion.
* Abstraction for an AI chat-completion backend.
*
* <p>Concrete implementations wrap a specific backend (e.g. OpenAI API, AWS Bedrock,
* or a local OpenAI-compatible gateway such as Ollama/vLLM) while exposing a uniform
* interface to callers.
*
* <p>Future providers (Gemini, Claude, OpenAI-compatible local models) should implement
* this interface and be registered as Spring beans via {@link LlmProviderConfig}.
* <p>Implementations wrap a specific HTTP endpoint while exposing a uniform
* interface to callers. Concrete providers are wired as named Spring beans
* in {@link LlmProviderConfig}.
*
* @author WISE Contributors
*/
Expand All @@ -17,15 +14,15 @@ public interface LlmProvider {
/**
* Send a chat-completion request and return the provider's raw JSON response.
*
* @param requestBody JSON request body in the OpenAI chat-completion format
* @param requestBody JSON request body in the chat-completion format
* @return raw JSON response string from the provider
* @throws RuntimeException if the provider is not configured or the upstream call fails
* @throws RuntimeException if the provider is misconfigured or the upstream call fails
*/
String chat(String requestBody);

/**
* Short, human-readable identifier for this provider (e.g. {@code "aws-bedrock"},
* {@code "openai"}). Used for logging and future capability-based routing decisions.
* Short identifier for this provider (e.g. {@code "aws-bedrock"}, {@code "openai"}).
* Used for logging.
*/
String getName();
}
25 changes: 7 additions & 18 deletions src/main/java/org/wise/portal/service/llm/LlmProviderConfig.java
Original file line number Diff line number Diff line change
Expand Up @@ -3,27 +3,17 @@
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.wise.portal.service.llm.impl.OpenAiCompatibleLlmProvider;
import org.wise.portal.service.llm.impl.HttpChatCompletionLlmProvider;

/**
* Spring configuration that creates named {@link LlmProvider} beans from application properties.
*
* <p>Each AI endpoint used by WISE gets its own named bean so that controllers can inject the
* right provider without knowing implementation details. Adding a new provider in the future
* (e.g. Gemini, Claude, or a local Ollama gateway) requires only:
* <ol>
* <li>A new {@link LlmProvider} implementation class (or reuse {@link OpenAiCompatibleLlmProvider}
* for any OpenAI-compatible endpoint), and</li>
* <li>A new {@code @Bean} method below wired from the corresponding properties.</li>
* </ol>
*
* <p>Relevant application properties:
* right provider via {@code @Qualifier}. The relevant properties are:
* <pre>
* # AWS Bedrock (OpenAI-compatible runtime)
* aws.bedrock.api.key=
* aws.bedrock.runtime.endpoint=
*
* # OpenAI
* openai.api.key=
* openai.chat.api.url=https://api.openai.com/v1/chat/completions
* </pre>
Expand All @@ -36,8 +26,7 @@ public class LlmProviderConfig {
/**
* Provider backed by AWS Bedrock's OpenAI-compatible runtime endpoint.
*
* <p>Bedrock exposes an {@code /openai/v1/chat/completions} path on top of the configured
* runtime endpoint, making it compatible with the same HTTP adapter used for OpenAI.
* <p>Bedrock appends {@code /openai/v1/chat/completions} to the configured runtime endpoint.
*/
@Bean("bedrockLlmProvider")
public LlmProvider bedrockLlmProvider(
Expand All @@ -46,17 +35,17 @@ public LlmProvider bedrockLlmProvider(
String chatApiUrl = (runtimeEndpoint == null || runtimeEndpoint.isEmpty())
? ""
: runtimeEndpoint + "/openai/v1/chat/completions";
return new OpenAiCompatibleLlmProvider("aws-bedrock", apiKey, chatApiUrl);
return new HttpChatCompletionLlmProvider("aws-bedrock", apiKey, chatApiUrl);
}

/**
* Provider backed by the OpenAI API (or any OpenAI-compatible endpoint configured via
* {@code openai.chat.api.url}, e.g. a local Ollama/vLLM gateway).
* Provider backed by the OpenAI API. The {@code openai.chat.api.url} property may be
* overridden to point at any OpenAI-compatible endpoint.
*/
@Bean("openAiLlmProvider")
public LlmProvider openAiLlmProvider(
@Value("${openai.api.key:}") String apiKey,
@Value("${openai.chat.api.url:https://api.openai.com/v1/chat/completions}") String chatApiUrl) {
return new OpenAiCompatibleLlmProvider("openai", apiKey, chatApiUrl);
return new HttpChatCompletionLlmProvider("openai", apiKey, chatApiUrl);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,32 +10,29 @@
import org.wise.portal.service.llm.LlmProvider;

/**
* {@link LlmProvider} implementation for any OpenAI-compatible chat-completion endpoint.
* {@link LlmProvider} implementation that sends chat-completion requests over HTTP
* using a Bearer-token Authorization header.
*
* <p>This single class covers:
* <ul>
* <li>The real OpenAI API ({@code https://api.openai.com/v1/chat/completions})</li>
* <li>AWS Bedrock's OpenAI-compatible runtime endpoint</li>
* <li>Local OpenAI-compatible gateways such as Ollama, vLLM, LM Studio, or LocalAI</li>
* </ul>
* <p>Works with any endpoint that accepts OpenAI-compatible chat-completion requests,
* including AWS Bedrock's runtime endpoint and the OpenAI API.
*
* <p>Instances are created by {@link org.wise.portal.service.llm.LlmProviderConfig}
* and injected into controllers by name.
*
* @author WISE Contributors
*/
public class OpenAiCompatibleLlmProvider implements LlmProvider {
public class HttpChatCompletionLlmProvider implements LlmProvider {

private final String name;
private final String apiKey;
private final String chatApiUrl;

/**
* @param name short provider identifier used in logs and routing (e.g. {@code "openai"})
* @param apiKey bearer token / API key sent in the {@code Authorization} header
* @param name short provider identifier used in logs (e.g. {@code "aws-bedrock"})
* @param apiKey bearer token sent in the {@code Authorization} header
* @param chatApiUrl full URL of the chat-completion endpoint
*/
public OpenAiCompatibleLlmProvider(String name, String apiKey, String chatApiUrl) {
public HttpChatCompletionLlmProvider(String name, String apiKey, String chatApiUrl) {
this.name = name;
this.apiKey = apiKey;
this.chatApiUrl = chatApiUrl;
Expand All @@ -47,7 +44,8 @@ public String chat(String requestBody) {
throw new RuntimeException("API key is not configured for LLM provider: " + name);
}
if (chatApiUrl == null || chatApiUrl.isEmpty()) {
throw new RuntimeException("Chat API URL is not configured for LLM provider: " + name);
throw new RuntimeException(
"Chat API URL is not configured for LLM provider: " + name);
}
try {
URL url = new URL(chatApiUrl);
Expand All @@ -57,18 +55,17 @@ public String chat(String requestBody) {
connection.setRequestProperty("Content-Type", "application/json; charset=utf-8");
connection.setRequestProperty("Accept-Charset", "UTF-8");
connection.setDoOutput(true);
OutputStreamWriter writer = new OutputStreamWriter(connection.getOutputStream());
writer.write(requestBody);
writer.flush();
writer.close();
BufferedReader br = new BufferedReader(
new InputStreamReader(connection.getInputStream(), "UTF-8"));
String line;
try (OutputStreamWriter writer = new OutputStreamWriter(connection.getOutputStream())) {
writer.write(requestBody);
}
StringBuilder response = new StringBuilder();
while ((line = br.readLine()) != null) {
response.append(line);
try (BufferedReader br = new BufferedReader(
new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
String line;
while ((line = br.readLine()) != null) {
response.append(line);
}
}
br.close();
return response.toString();
} catch (IOException e) {
throw new RuntimeException("Chat request failed for LLM provider: " + name, e);
Expand Down
26 changes: 5 additions & 21 deletions src/main/resources/application_sample.properties
Original file line number Diff line number Diff line change
Expand Up @@ -223,26 +223,10 @@ aws.accessKeyId=
aws.secretAccessKey=
aws.region=

########## AI / LLM Provider Configuration (optional) ##########
#
# WISE supports multiple AI providers through a unified LlmProvider abstraction.
# Configure one or more of the following backends by uncommenting and filling in the values.
#
# --- AWS Bedrock (OpenAI-compatible runtime) ---
# aws.bedrock.api.key= bearer token for the Bedrock runtime endpoint
# aws.bedrock.runtime.endpoint= base URL of the Bedrock runtime
# (path /openai/v1/chat/completions is appended automatically)
#
# --- OpenAI (or any OpenAI-compatible endpoint) ---
# openai.api.key= API key issued by OpenAI (or your local gateway)
# openai.chat.api.url= full URL of the chat-completions endpoint
# Default: https://api.openai.com/v1/chat/completions
# Override with a local gateway URL (e.g. Ollama, vLLM) to avoid
# sending data to a public cloud.
#
# Future providers (Gemini, Claude, etc.) will follow the same pattern:
# add the corresponding properties and a new @Bean in LlmProviderConfig.
#openai.api.key=
#openai.chat.api.url=
# AI / LLM provider settings (optional)
# AWS Bedrock (OpenAI-compatible runtime)
#aws.bedrock.api.key=
#aws.bedrock.runtime.endpoint=
# OpenAI (or any OpenAI-compatible endpoint)
#openai.api.key=
#openai.chat.api.url=https://api.openai.com/v1/chat/completions
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
package org.wise.portal.service.llm;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;

import org.junit.jupiter.api.Test;
import org.wise.portal.service.llm.impl.HttpChatCompletionLlmProvider;

/**
* Unit tests for {@link LlmProviderConfig} bean factory methods.
*/
public class LlmProviderConfigTest {

private final LlmProviderConfig config = new LlmProviderConfig();

@Test
public void bedrockLlmProvider_WithValidConfig_ReturnsNamedProvider() {
LlmProvider provider = config.bedrockLlmProvider("my-api-key",
"https://bedrock.example.com");
assertNotNull(provider);
assertEquals("aws-bedrock", provider.getName());
}

@Test
public void bedrockLlmProvider_AppendsOpenAiPathToRuntimeEndpoint() {
LlmProvider provider = config.bedrockLlmProvider("key", "https://bedrock.example.com");
assertNotNull(provider);
// Name is accessible; URL construction is verified via chat() misconfiguration test in
// HttpChatCompletionLlmProviderTest
assertEquals("aws-bedrock", provider.getName());
}

@Test
public void bedrockLlmProvider_EmptyEndpoint_ReturnsProviderWithEmptyUrl() {
LlmProvider provider = config.bedrockLlmProvider("key", "");
assertNotNull(provider);
assertEquals("aws-bedrock", provider.getName());
}

@Test
public void openAiLlmProvider_WithValidConfig_ReturnsNamedProvider() {
LlmProvider provider = config.openAiLlmProvider("sk-test",
"https://api.openai.com/v1/chat/completions");
assertNotNull(provider);
assertEquals("openai", provider.getName());
}

@Test
public void openAiLlmProvider_ReturnsHttpChatCompletionLlmProvider() {
LlmProvider provider = config.openAiLlmProvider("sk-test",
"https://api.openai.com/v1/chat/completions");
assertNotNull(provider);
assertEquals(HttpChatCompletionLlmProvider.class, provider.getClass());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -8,53 +8,53 @@
import org.wise.portal.service.llm.LlmProvider;

/**
* Unit tests for {@link OpenAiCompatibleLlmProvider}.
* Unit tests for {@link HttpChatCompletionLlmProvider}.
*/
public class OpenAiCompatibleLlmProviderTest {
public class HttpChatCompletionLlmProviderTest {

@Test
public void getName_ReturnsConfiguredName() {
LlmProvider provider = new OpenAiCompatibleLlmProvider("openai", "test-key",
LlmProvider provider = new HttpChatCompletionLlmProvider("openai", "test-key",
"https://api.openai.com/v1/chat/completions");
assertEquals("openai", provider.getName());
}

@Test
public void getName_BedrockProviderName_ReturnsCorrectName() {
LlmProvider provider = new OpenAiCompatibleLlmProvider("aws-bedrock", "test-key",
LlmProvider provider = new HttpChatCompletionLlmProvider("aws-bedrock", "test-key",
"https://bedrock.example.com/openai/v1/chat/completions");
assertEquals("aws-bedrock", provider.getName());
}

@Test
public void chat_MissingApiKey_ThrowsRuntimeException() {
LlmProvider provider = new OpenAiCompatibleLlmProvider("openai", "",
LlmProvider provider = new HttpChatCompletionLlmProvider("openai", "",
"https://api.openai.com/v1/chat/completions");
RuntimeException ex = assertThrows(RuntimeException.class, () -> provider.chat("{}"));
assertTrue(ex.getMessage().contains("API key is not configured for LLM provider: openai"));
}

@Test
public void chat_NullApiKey_ThrowsRuntimeException() {
LlmProvider provider = new OpenAiCompatibleLlmProvider("openai", null,
LlmProvider provider = new HttpChatCompletionLlmProvider("openai", null,
"https://api.openai.com/v1/chat/completions");
RuntimeException ex = assertThrows(RuntimeException.class, () -> provider.chat("{}"));
assertTrue(ex.getMessage().contains("API key is not configured for LLM provider: openai"));
}

@Test
public void chat_MissingChatApiUrl_ThrowsRuntimeException() {
LlmProvider provider = new OpenAiCompatibleLlmProvider("aws-bedrock", "test-key", "");
LlmProvider provider = new HttpChatCompletionLlmProvider("aws-bedrock", "test-key", "");
RuntimeException ex = assertThrows(RuntimeException.class, () -> provider.chat("{}"));
assertTrue(
ex.getMessage().contains("Chat API URL is not configured for LLM provider: aws-bedrock"));
assertTrue(ex.getMessage()
.contains("Chat API URL is not configured for LLM provider: aws-bedrock"));
}

@Test
public void chat_NullChatApiUrl_ThrowsRuntimeException() {
LlmProvider provider = new OpenAiCompatibleLlmProvider("aws-bedrock", "test-key", null);
LlmProvider provider = new HttpChatCompletionLlmProvider("aws-bedrock", "test-key", null);
RuntimeException ex = assertThrows(RuntimeException.class, () -> provider.chat("{}"));
assertTrue(
ex.getMessage().contains("Chat API URL is not configured for LLM provider: aws-bedrock"));
assertTrue(ex.getMessage()
.contains("Chat API URL is not configured for LLM provider: aws-bedrock"));
}
}