Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,67 +1,37 @@
package org.wise.portal.presentation.web;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.net.HttpURLConnection;
import java.net.URL;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.env.Environment;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.security.access.annotation.Secured;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
import org.wise.portal.service.llm.LlmProvider;

/**
* REST endpoint that forwards chat-completion requests to the AWS Bedrock LLM provider.
*
* <p>The actual HTTP call is delegated to the {@link LlmProvider} abstraction, keeping
* this controller free of provider-specific details. To switch or extend the underlying
* AI backend, register a different {@link LlmProvider} bean named {@code "bedrockLlmProvider"}
* in {@link org.wise.portal.service.llm.LlmProviderConfig}.
*/
@RestController
@RequestMapping("/api/aws-bedrock/chat")
public class AWSBedrockController {

@Autowired
Environment appProperties;
@Qualifier("bedrockLlmProvider")
private LlmProvider llmProvider;

@ResponseBody
@Secured("ROLE_USER")
@PostMapping
@PostMapping(produces = MediaType.APPLICATION_JSON_VALUE)
protected String sendChatMessage(@RequestBody String body) {
String apiKey = appProperties.getProperty("aws.bedrock.api.key");
if (apiKey == null || apiKey.isEmpty()) {
throw new RuntimeException("aws.bedrock.api.key is not set");
}
String apiEndpoint = appProperties.getProperty("aws.bedrock.runtime.endpoint");
if (apiEndpoint == null || apiEndpoint.isEmpty()) {
throw new RuntimeException("aws.bedrock.runtime.endpoint is not set");
}
// assume openai-only support for now. We'll add other models later.
apiEndpoint += "/openai/v1/chat/completions";

try {
URL url = new URL(apiEndpoint);
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("POST");
connection.setRequestProperty("Authorization", "Bearer " + apiKey);
connection.setRequestProperty("Content-Type", "application/json; charset=utf-8");
connection.setRequestProperty("Accept-Charset", "UTF-8");
connection.setDoOutput(true);
OutputStreamWriter writer = new OutputStreamWriter(connection.getOutputStream());
writer.write(body);
writer.flush();
writer.close();
BufferedReader br = new BufferedReader(
new InputStreamReader(connection.getInputStream(), "UTF-8"));
String line;
StringBuffer response = new StringBuffer();
while ((line = br.readLine()) != null) {
response.append(line);
}
br.close();
return response.toString();
} catch (IOException e) {
throw new RuntimeException(e);
}
return llmProvider.chat(body);
}

}
Original file line number Diff line number Diff line change
@@ -1,59 +1,37 @@
package org.wise.portal.presentation.web.controllers;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.net.HttpURLConnection;
import java.net.URL;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.security.access.annotation.Secured;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
import org.wise.portal.service.llm.LlmProvider;

/**
* REST endpoint that forwards chat-completion requests to the OpenAI LLM provider.
*
* <p>The actual HTTP call is delegated to the {@link LlmProvider} abstraction. The
* {@code openai.chat.api.url} property may point to any OpenAI-compatible endpoint,
* including local gateways such as Ollama or vLLM.
*
* @see org.wise.portal.service.llm.LlmProviderConfig
*/
@RestController
@RequestMapping("/api/chat-gpt")
public class ChatGptController {

@Value("${openai.api.key:}")
private String openAiApiKey;
@Autowired
@Qualifier("openAiLlmProvider")
private LlmProvider llmProvider;

@Value("${openai.chat.api.url:https://api.openai.com/v1/chat/completions}")
private String openAiChatApiUrl;

@ResponseBody
@Secured("ROLE_USER")
@PostMapping
protected String sendChatMessage(@RequestBody String body) {
if (openAiApiKey == null || openAiApiKey.isEmpty()) {
throw new RuntimeException("openai.api.key is not set");
}
try {
URL url = new URL(openAiChatApiUrl);
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("POST");
connection.setRequestProperty("Authorization", "Bearer " + openAiApiKey);
connection.setRequestProperty("Content-Type", "application/json; charset=utf-8");
connection.setRequestProperty("Accept-Charset", "UTF-8");
connection.setDoOutput(true);
OutputStreamWriter writer = new OutputStreamWriter(connection.getOutputStream());
writer.write(body);
writer.flush();
writer.close();
BufferedReader br = new BufferedReader(
new InputStreamReader(connection.getInputStream(), "UTF-8"));
String line;
StringBuffer response = new StringBuffer();
while ((line = br.readLine()) != null) {
response.append(line);
}
br.close();
return response.toString();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@ResponseBody
@Secured("ROLE_USER")
@PostMapping(produces = MediaType.APPLICATION_JSON_VALUE)
protected String sendChatMessage(@RequestBody String body) {
return llmProvider.chat(body);
}
}
31 changes: 31 additions & 0 deletions src/main/java/org/wise/portal/service/llm/LlmProvider.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
package org.wise.portal.service.llm;

/**
* Abstraction for AI Language Model providers that support chat completion.
*
* <p>Concrete implementations wrap a specific backend (e.g. OpenAI API, AWS Bedrock,
* or a local OpenAI-compatible gateway such as Ollama/vLLM) while exposing a uniform
* interface to callers.
*
* <p>Future providers (Gemini, Claude, OpenAI-compatible local models) should implement
* this interface and be registered as Spring beans via {@link LlmProviderConfig}.
*
* @author WISE Contributors
*/
public interface LlmProvider {

/**
* Send a chat-completion request and return the provider's raw JSON response.
*
* @param requestBody JSON request body in the OpenAI chat-completion format
* @return raw JSON response string from the provider
* @throws RuntimeException if the provider is not configured or the upstream call fails
*/
String chat(String requestBody);

/**
* Short, human-readable identifier for this provider (e.g. {@code "aws-bedrock"},
* {@code "openai"}). Used for logging and future capability-based routing decisions.
*/
String getName();
}
62 changes: 62 additions & 0 deletions src/main/java/org/wise/portal/service/llm/LlmProviderConfig.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
package org.wise.portal.service.llm;

import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.wise.portal.service.llm.impl.OpenAiCompatibleLlmProvider;

/**
* Spring configuration that creates named {@link LlmProvider} beans from application properties.
*
* <p>Each AI endpoint used by WISE gets its own named bean so that controllers can inject the
* right provider without knowing implementation details. Adding a new provider in the future
* (e.g. Gemini, Claude, or a local Ollama gateway) requires only:
* <ol>
* <li>A new {@link LlmProvider} implementation class (or reuse {@link OpenAiCompatibleLlmProvider}
* for any OpenAI-compatible endpoint), and</li>
* <li>A new {@code @Bean} method below wired from the corresponding properties.</li>
* </ol>
*
* <p>Relevant application properties:
* <pre>
* # AWS Bedrock (OpenAI-compatible runtime)
* aws.bedrock.api.key=
* aws.bedrock.runtime.endpoint=
*
* # OpenAI
* openai.api.key=
* openai.chat.api.url=https://api.openai.com/v1/chat/completions
* </pre>
*
* @author WISE Contributors
*/
@Configuration
public class LlmProviderConfig {

/**
* Provider backed by AWS Bedrock's OpenAI-compatible runtime endpoint.
*
* <p>Bedrock exposes an {@code /openai/v1/chat/completions} path on top of the configured
* runtime endpoint, making it compatible with the same HTTP adapter used for OpenAI.
*/
@Bean("bedrockLlmProvider")
public LlmProvider bedrockLlmProvider(
@Value("${aws.bedrock.api.key:}") String apiKey,
@Value("${aws.bedrock.runtime.endpoint:}") String runtimeEndpoint) {
String chatApiUrl = (runtimeEndpoint == null || runtimeEndpoint.isEmpty())
? ""
: runtimeEndpoint + "/openai/v1/chat/completions";
return new OpenAiCompatibleLlmProvider("aws-bedrock", apiKey, chatApiUrl);
}

/**
* Provider backed by the OpenAI API (or any OpenAI-compatible endpoint configured via
* {@code openai.chat.api.url}, e.g. a local Ollama/vLLM gateway).
*/
@Bean("openAiLlmProvider")
public LlmProvider openAiLlmProvider(
@Value("${openai.api.key:}") String apiKey,
@Value("${openai.chat.api.url:https://api.openai.com/v1/chat/completions}") String chatApiUrl) {
return new OpenAiCompatibleLlmProvider("openai", apiKey, chatApiUrl);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
package org.wise.portal.service.llm.impl;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.net.HttpURLConnection;
import java.net.URL;

import org.wise.portal.service.llm.LlmProvider;

/**
* {@link LlmProvider} implementation for any OpenAI-compatible chat-completion endpoint.
*
* <p>This single class covers:
* <ul>
* <li>The real OpenAI API ({@code https://api.openai.com/v1/chat/completions})</li>
* <li>AWS Bedrock's OpenAI-compatible runtime endpoint</li>
* <li>Local OpenAI-compatible gateways such as Ollama, vLLM, LM Studio, or LocalAI</li>
* </ul>
*
* <p>Instances are created by {@link org.wise.portal.service.llm.LlmProviderConfig}
* and injected into controllers by name.
*
* @author WISE Contributors
*/
public class OpenAiCompatibleLlmProvider implements LlmProvider {

private final String name;
private final String apiKey;
private final String chatApiUrl;

/**
* @param name short provider identifier used in logs and routing (e.g. {@code "openai"})
* @param apiKey bearer token / API key sent in the {@code Authorization} header
* @param chatApiUrl full URL of the chat-completion endpoint
*/
public OpenAiCompatibleLlmProvider(String name, String apiKey, String chatApiUrl) {
this.name = name;
this.apiKey = apiKey;
this.chatApiUrl = chatApiUrl;
}

@Override
public String chat(String requestBody) {
if (apiKey == null || apiKey.isEmpty()) {
throw new RuntimeException("API key is not configured for LLM provider: " + name);
}
if (chatApiUrl == null || chatApiUrl.isEmpty()) {
throw new RuntimeException("Chat API URL is not configured for LLM provider: " + name);
}
try {
URL url = new URL(chatApiUrl);
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("POST");
connection.setRequestProperty("Authorization", "Bearer " + apiKey);
connection.setRequestProperty("Content-Type", "application/json; charset=utf-8");
connection.setRequestProperty("Accept-Charset", "UTF-8");
connection.setDoOutput(true);
OutputStreamWriter writer = new OutputStreamWriter(connection.getOutputStream());
writer.write(requestBody);
writer.flush();
writer.close();
BufferedReader br = new BufferedReader(
new InputStreamReader(connection.getInputStream(), "UTF-8"));
String line;
StringBuilder response = new StringBuilder();
while ((line = br.readLine()) != null) {
response.append(line);
}
br.close();
return response.toString();
} catch (IOException e) {
throw new RuntimeException("Chat request failed for LLM provider: " + name, e);
}
}

@Override
public String getName() {
return name;
}
}
20 changes: 19 additions & 1 deletion src/main/resources/application_sample.properties
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,25 @@ aws.accessKeyId=
aws.secretAccessKey=
aws.region=

# OpenAI and AWS Bedrock Chat endpoints (optional)
########## AI / LLM Provider Configuration (optional) ##########
#
# WISE supports multiple AI providers through a unified LlmProvider abstraction.
# Configure one or more of the following backends by uncommenting and filling in the values.
#
# --- AWS Bedrock (OpenAI-compatible runtime) ---
# aws.bedrock.api.key= bearer token for the Bedrock runtime endpoint
# aws.bedrock.runtime.endpoint= base URL of the Bedrock runtime
# (path /openai/v1/chat/completions is appended automatically)
#
# --- OpenAI (or any OpenAI-compatible endpoint) ---
# openai.api.key= API key issued by OpenAI (or your local gateway)
# openai.chat.api.url= full URL of the chat-completions endpoint
# Default: https://api.openai.com/v1/chat/completions
# Override with a local gateway URL (e.g. Ollama, vLLM) to avoid
# sending data to a public cloud.
#
# Future providers (Gemini, Claude, etc.) will follow the same pattern:
# add the corresponding properties and a new @Bean in LlmProviderConfig.
#openai.api.key=
#openai.chat.api.url=
#aws.bedrock.api.key=
Expand Down
Loading