Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,18 @@

All notable changes to `uipath_llm_client` (core package) will be documented in this file.

## [1.8.0] - 2026-04-04

### Added
- `UiPathLiteLLM` — LiteLLM client wrapper for routing completions and embeddings through UiPath LLM Gateway
- Sync/async completions via `completion()` / `acompletion()`
- Sync/async embeddings via `embedding()` / `aembedding()`
- Injects UiPath-configured OpenAI client as HTTP transport
- Added `litellm` optional dependency (`uipath-llm-client[litellm]`)

### Changed
- Bumped dependencies: `uipath-platform>=0.1.18`, `anthropic>=0.89.0`, `litellm>=1.83.1`

## [1.7.0] - 2026-04-03

### Added
Expand Down
8 changes: 7 additions & 1 deletion packages/uipath_langchain_client/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,17 @@

All notable changes to `uipath_langchain_client` will be documented in this file.

## [1.7.1] - 2026-04-04
## [1.8.0] - 2026-04-04

### Added
- `UiPathChatLiteLLM` — LangChain chat model wrapping LiteLLM routed through UiPath LLM Gateway
- `UiPathLiteLLMEmbeddings` — LangChain embeddings wrapping LiteLLM routed through UiPath LLM Gateway
- Added `litellm` optional dependency (`uipath-langchain-client[litellm]`)
- `custom_class` parameter in `get_chat_model()` and `get_embedding_model()` factory functions to allow instantiating a user-provided class instead of the auto-detected one

### Changed
- Bumped dependencies: `langchain>=1.2.15`, `anthropic>=0.89.0`, `langchain-aws>=1.4.3`, `langchain-litellm>=0.6.4`

## [1.7.0] - 2026-04-03

### Added
Expand Down
17 changes: 10 additions & 7 deletions packages/uipath_langchain_client/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,35 +5,38 @@ dynamic = ["version"]
readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"langchain>=1.2.13",
"uipath-llm-client>=1.7.0",
"langchain>=1.2.15",
"uipath-llm-client>=1.8.0",
]

[project.optional-dependencies]
openai = [
"langchain-openai>=1.1.11",
"langchain-openai>=1.1.12",
]
google = [
"langchain-google-genai>=4.2.1",
]
anthropic = [
"langchain-anthropic>=1.4.0",
"anthropic[bedrock,vertex]>=0.86.0",
"anthropic[bedrock,vertex]>=0.89.0",
]
aws = [
"langchain-aws[anthropic]>=1.4.1",
"langchain-aws[anthropic]>=1.4.3",
]
vertexai = [
"langchain-google-vertexai>=3.2.2",
]
azure = [
"langchain-azure-ai>=1.1.1",
"langchain-azure-ai>=1.2.1",
]
fireworks = [
"langchain-fireworks>=1.1.0",
]
litellm = [
"langchain-litellm>=0.6.4",
]
all = [
"uipath-langchain-client[openai,aws,google,anthropic,azure,vertexai,fireworks]"
"uipath-langchain-client[openai,aws,google,anthropic,azure,vertexai,fireworks,litellm]"
]

[build-system]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
__title__ = "UiPath LangChain Client"
__description__ = "A Python client for interacting with UiPath's LLM services via LangChain."
__version__ = "1.7.1"
__version__ = "1.8.0"
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from uipath_langchain_client.clients.litellm.chat_models import UiPathChatLiteLLM
from uipath_langchain_client.clients.litellm.embeddings import UiPathLiteLLMEmbeddings

__all__ = [
"UiPathChatLiteLLM",
"UiPathLiteLLMEmbeddings",
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
from typing import Any, Optional, Self

from pydantic import Field, model_validator

from uipath_langchain_client.base_client import UiPathBaseChatModel

try:
from langchain_litellm import ChatLiteLLM
from openai import AsyncOpenAI, OpenAI
except ImportError as e:
raise ImportError(
"The 'litellm' extra is required to use UiPathChatLiteLLM. "
"Install it with: uv add uipath-langchain-client[litellm]"
) from e

from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)


class UiPathChatLiteLLM(UiPathBaseChatModel, ChatLiteLLM): # type: ignore[override]
"""LiteLLM chat model routed through UiPath LLM Gateway.

Combines UiPath's authentication and routing with LiteLLM's
multi-provider chat interface. The api_config must be provided
by the caller — provider routing is not auto-detected from model names.
"""

# Override ChatLiteLLM's model field to align with UiPathBaseLLMClient.model_name (alias="model")
model: str = Field(default="", alias="model_name")

_uipath_openai_client: Optional[OpenAI] = None
_uipath_async_openai_client: Optional[AsyncOpenAI] = None

@model_validator(mode="after")
def setup_uipath_client(self) -> Self:
base_url = str(self.uipath_sync_client.base_url).rstrip("/")

self._uipath_openai_client = OpenAI(
api_key="PLACEHOLDER",
max_retries=0,
http_client=self.uipath_sync_client,
base_url=base_url,
)
self._uipath_async_openai_client = AsyncOpenAI(
api_key="PLACEHOLDER",
max_retries=0,
http_client=self.uipath_async_client,
base_url=base_url,
)
return self

def completion_with_retry(
self, run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any
) -> Any:
"""Override to inject UiPath OpenAI client into litellm calls."""
from langchain_litellm.chat_models.litellm import _create_retry_decorator

kwargs["client"] = self._uipath_openai_client
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)

@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.completion(**kwargs)

return _completion_with_retry(**kwargs)

async def acompletion_with_retry(
self, run_manager: AsyncCallbackManagerForLLMRun | None = None, **kwargs: Any
) -> Any:
"""Override to inject UiPath async OpenAI client into litellm calls."""
from langchain_litellm.chat_models.litellm import _create_retry_decorator

kwargs["client"] = self._uipath_async_openai_client
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)

@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
return await self.client.acompletion(**kwargs)

return await _completion_with_retry(**kwargs)
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
from typing import Any, Self

from pydantic import Field, model_validator

from uipath_langchain_client.base_client import UiPathBaseEmbeddings

try:
from langchain_litellm import LiteLLMEmbeddings
from openai import AsyncOpenAI, OpenAI
except ImportError as e:
raise ImportError(
"The 'litellm' extra is required to use UiPathLiteLLMEmbeddings. "
"Install it with: uv add uipath-langchain-client[litellm]"
) from e


class UiPathLiteLLMEmbeddings(UiPathBaseEmbeddings, LiteLLMEmbeddings): # type: ignore[override]
"""LiteLLM embeddings routed through UiPath LLM Gateway.

Combines UiPath's authentication and routing with LiteLLM's
multi-provider embeddings interface. The api_config must be provided
by the caller.
"""

# Override LiteLLMEmbeddings' model field to align with UiPathBaseLLMClient.model_name
model: str = Field(default="", alias="model_name")

_uipath_openai_client: OpenAI | None = None
_uipath_async_openai_client: AsyncOpenAI | None = None

@model_validator(mode="after")
def setup_uipath_client(self) -> Self:
base_url = str(self.uipath_sync_client.base_url).rstrip("/")

self._uipath_openai_client = OpenAI(
api_key="PLACEHOLDER",
max_retries=0,
http_client=self.uipath_sync_client,
base_url=base_url,
)
self._uipath_async_openai_client = AsyncOpenAI(
api_key="PLACEHOLDER",
max_retries=0,
http_client=self.uipath_async_client,
base_url=base_url,
)
return self

def _embedding_with_retry(self, **kwargs: Any) -> Any:
"""Override to inject UiPath OpenAI client into litellm embedding calls."""
import litellm
from langchain_litellm.embeddings.litellm import _create_retry_decorator

kwargs["client"] = self._uipath_openai_client
retry_decorator = _create_retry_decorator(self.max_retries)

@retry_decorator
def _embed() -> Any:
return litellm.embedding(**kwargs)

return _embed()

async def _aembedding_with_retry(self, **kwargs: Any) -> Any:
"""Override to inject UiPath async OpenAI client into litellm embedding calls."""
import litellm
from langchain_litellm.embeddings.litellm import _create_retry_decorator

kwargs["client"] = self._uipath_async_openai_client
retry_decorator = _create_retry_decorator(self.max_retries)

@retry_decorator
async def _aembed() -> Any:
return await litellm.aembedding(**kwargs)

return await _aembed()
17 changes: 10 additions & 7 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ dependencies = [
"tenacity>=9.1.4",
"pydantic>=2.12.5",
"pydantic-settings>=2.13.1",
"uipath-platform>=0.1.2",
"uipath-platform>=0.1.18",
]

authors = [
Expand All @@ -19,16 +19,19 @@ authors = [

[project.optional-dependencies]
openai = [
"openai>=2.29.0",
"openai>=2.30.0",
]
google = [
"google-genai>=1.68.0",
"google-genai>=1.70.0",
]
anthropic = [
"anthropic>=0.86.0",
"anthropic>=0.89.0",
]
litellm = [
"litellm>=1.83.1",
]
all = [
"uipath-llm-client[openai,google,anthropic]",
"uipath-llm-client[openai,google,anthropic,litellm]",
]

[dependency-groups]
Expand All @@ -38,10 +41,10 @@ dev = [
"pytest-asyncio>=1.3.0",
"pytest-recording>=0.13.4",
"pyright>=1.1.408",
"ruff>=0.15.7",
"ruff>=0.15.9",
"openinference-instrumentation-langchain>=0.1.62",
"uipath-llm-client[all]",
"uipath_langchain_client[all]",
"openinference-instrumentation-langchain>=0.1.61",
]

[tool.uv.workspace]
Expand Down
2 changes: 1 addition & 1 deletion src/uipath/llm_client/__version__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
__title__ = "UiPath LLM Client"
__description__ = "A Python client for interacting with UiPath's LLM services."
__version__ = "1.7.0"
__version__ = "1.8.0"
5 changes: 5 additions & 0 deletions src/uipath/llm_client/clients/litellm/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from uipath.llm_client.clients.litellm.client import UiPathLiteLLM

__all__ = [
"UiPathLiteLLM",
]
Loading