Skip to content

Commit a5115b0

Browse files
authored
add base normalized client and fix langchain normalized client (#54)
1 parent b72c997 commit a5115b0

16 files changed

Lines changed: 3063 additions & 39 deletions

File tree

CHANGELOG.md

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,19 @@
22

33
All notable changes to `uipath_llm_client` (core package) will be documented in this file.
44

5+
## [1.7.0] - 2026-04-03
6+
7+
### Added
8+
- `UiPathNormalizedClient` — provider-agnostic LLM client with no optional dependencies
9+
- `client.completions.create/acreate/stream/astream` for chat completions
10+
- `client.embeddings.create/acreate` for embeddings
11+
- Structured output via `response_format` (Pydantic, TypedDict, dict, json_object)
12+
- Tool calling with dicts, Pydantic models, or callables
13+
- Streaming with SSE parsing
14+
- Full vendor parameter coverage: OpenAI (reasoning, logprobs, logit_bias), Anthropic (thinking, top_k), Google (thinking_level/budget, safety_settings, cached_content)
15+
- Typed response models: `ChatCompletion`, `ChatCompletionChunk`, `EmbeddingResponse`
16+
- Accepts both dict and Pydantic model messages
17+
518
## [1.6.0] - 2026-04-03
619

720
### Fixed

packages/uipath_langchain_client/CHANGELOG.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,13 @@
22

33
All notable changes to `uipath_langchain_client` will be documented in this file.
44

5+
## [1.7.0] - 2026-04-03
6+
7+
### Added
8+
- `UiPathChat.with_structured_output()` — supports `function_calling`, `json_schema`, and `json_mode` methods
9+
- `UiPathChat.bind_tools()` — added `parallel_tool_calls` parameter
10+
- Added vendor-specific parameters to `UiPathChat`: `logit_bias`, `logprobs`, `top_logprobs`, `parallel_tool_calls`, `top_k`, `safety_settings`, `cached_content`, `labels`, `seed`
11+
512
## [1.6.0] - 2026-04-03
613

714
### Fixed

packages/uipath_langchain_client/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ readme = "README.md"
66
requires-python = ">=3.11"
77
dependencies = [
88
"langchain>=1.2.13",
9-
"uipath-llm-client>=1.5.10",
9+
"uipath-llm-client>=1.7.0",
1010
]
1111

1212
[project.optional-dependencies]
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
__title__ = "UiPath LangChain Client"
22
__description__ = "A Python client for interacting with UiPath's LLM services via LangChain."
3-
__version__ = "1.6.0"
3+
__version__ = "1.7.0"

packages/uipath_langchain_client/src/uipath_langchain_client/clients/normalized/chat_models.py

Lines changed: 221 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@
2525

2626
import json
2727
from collections.abc import AsyncGenerator, Callable, Generator, Sequence
28-
from typing import Any
28+
from functools import partial
29+
from typing import Any, Literal, Union, cast
2930

3031
from langchain_core.callbacks import (
3132
AsyncCallbackManagerForLLMRun,
@@ -44,21 +45,76 @@
4445
UsageMetadata,
4546
)
4647
from langchain_core.messages.utils import convert_to_openai_messages
48+
from langchain_core.output_parsers import JsonOutputParser
49+
from langchain_core.output_parsers.openai_tools import (
50+
JsonOutputKeyToolsParser,
51+
PydanticToolsParser,
52+
)
4753
from langchain_core.outputs import (
4854
ChatGeneration,
4955
ChatGenerationChunk,
5056
ChatResult,
5157
)
52-
from langchain_core.runnables import Runnable
58+
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
5359
from langchain_core.tools import BaseTool
5460
from langchain_core.utils.function_calling import (
5561
convert_to_openai_function,
62+
convert_to_openai_tool,
5663
)
57-
from pydantic import Field
64+
from langchain_core.utils.pydantic import is_basemodel_subclass
65+
from pydantic import AliasChoices, BaseModel, Field
5866

5967
from uipath_langchain_client.base_client import UiPathBaseChatModel
6068
from uipath_langchain_client.settings import ApiType, RoutingMode, UiPathAPIConfig
6169

70+
_DictOrPydanticClass = Union[dict[str, Any], type[BaseModel], type]
71+
_DictOrPydantic = Union[dict[str, Any], BaseModel]
72+
73+
74+
def _oai_structured_outputs_parser(ai_msg: AIMessage, schema: type[BaseModel]) -> BaseModel:
75+
if not ai_msg.content:
76+
raise ValueError("Expected non-empty content from model.")
77+
content = ai_msg.content
78+
if isinstance(content, list):
79+
# Extract the first text block from content parts
80+
content = next((c for c in content if isinstance(c, str)), str(content[0]))
81+
parsed = json.loads(content)
82+
return schema.model_validate(parsed)
83+
84+
85+
def _build_normalized_response_format(
86+
schema: _DictOrPydanticClass, strict: bool | None = None
87+
) -> dict[str, Any]:
88+
"""Build response_format for the normalized API from a schema."""
89+
if isinstance(schema, dict):
90+
return {"type": "json_schema", "json_schema": schema}
91+
92+
if isinstance(schema, type) and issubclass(schema, BaseModel):
93+
json_schema = schema.model_json_schema()
94+
rf: dict[str, Any] = {
95+
"type": "json_schema",
96+
"json_schema": {
97+
"name": schema.__name__,
98+
"schema": json_schema,
99+
},
100+
}
101+
if strict is not None:
102+
rf["json_schema"]["strict"] = strict
103+
return rf
104+
105+
# TypedDict or other type — convert via openai tool schema
106+
tool_schema = convert_to_openai_tool(schema)
107+
rf = {
108+
"type": "json_schema",
109+
"json_schema": {
110+
"name": tool_schema["function"]["name"],
111+
"schema": tool_schema["function"]["parameters"],
112+
},
113+
}
114+
if strict is not None:
115+
rf["json_schema"]["strict"] = strict
116+
return rf
117+
62118

63119
class UiPathChat(UiPathBaseChatModel):
64120
"""LangChain chat model using UiPath's normalized (provider-agnostic) API.
@@ -101,33 +157,48 @@ class UiPathChat(UiPathBaseChatModel):
101157
freeze_base_url=True,
102158
)
103159

104-
# Standard LLM parameters
105-
max_tokens: int | None = None
160+
# Common
161+
max_tokens: int | None = Field(
162+
default=None,
163+
validation_alias=AliasChoices("max_tokens", "max_output_tokens", "max_completion_tokens"),
164+
)
106165
temperature: float | None = None
107-
stop: list[str] | str | None = Field(default=None, alias="stop_sequences")
166+
top_p: float | None = None
167+
top_k: int | None = None
168+
stop: list[str] | str | None = Field(
169+
default=None,
170+
validation_alias=AliasChoices("stop", "stop_sequences"),
171+
)
172+
n: int | None = Field(
173+
default=None,
174+
validation_alias=AliasChoices("n", "candidate_count"),
175+
)
176+
frequency_penalty: float | None = None
177+
presence_penalty: float | None = None
178+
seed: int | None = None
108179

109-
n: int | None = None # Number of completions to generate
110-
top_p: float | None = None # Nucleus sampling probability mass
111-
presence_penalty: float | None = None # Penalty for repeated tokens
112-
frequency_penalty: float | None = None # Frequency-based repetition penalty
113-
verbosity: str | None = None # Response verbosity: "low", "medium", or "high"
180+
model_kwargs: dict[str, Any] = Field(default_factory=dict)
181+
disabled_params: dict[str, Any] | None = None
114182

115-
model_kwargs: dict[str, Any] = Field(
116-
default_factory=dict
117-
) # Additional model-specific parameters
118-
disabled_params: dict[str, Any] | None = None # Parameters to exclude from requests
183+
# OpenAI
184+
logit_bias: dict[str, int] | None = None
185+
logprobs: bool | None = None
186+
top_logprobs: int | None = None
187+
parallel_tool_calls: bool | None = None
188+
reasoning_effort: str | None = None
189+
reasoning: dict[str, Any] | None = None
119190

120-
# OpenAI o1/o3 reasoning parameters
121-
reasoning: dict[str, Any] | None = None # {"effort": "low"|"medium"|"high", "summary": ...}
122-
reasoning_effort: str | None = None # "minimal", "low", "medium", or "high"
191+
# Anthropic
192+
thinking: dict[str, Any] | None = None
123193

124-
# Anthropic Claude extended thinking parameters
125-
thinking: dict[str, Any] | None = None # {"type": "enabled"|"disabled", "budget_tokens": N}
194+
# Google
195+
thinking_level: str | None = None
196+
thinking_budget: int | None = None
197+
include_thoughts: bool | None = None
198+
safety_settings: list[dict[str, Any]] | None = None
126199

127-
# Google Gemini thinking parameters
128-
thinking_level: str | None = None # Thinking depth level
129-
thinking_budget: int | None = None # Token budget for thinking
130-
include_thoughts: bool | None = None # Include thinking in response
200+
# Shared
201+
verbosity: str | None = None
131202

132203
@property
133204
def _llm_type(self) -> str:
@@ -138,20 +209,31 @@ def _llm_type(self) -> str:
138209
def _default_params(self) -> dict[str, Any]:
139210
"""Get the default parameters for the normalized API request."""
140211
exclude_if_none = {
141-
"frequency_penalty": self.frequency_penalty,
142-
"presence_penalty": self.presence_penalty,
143-
"top_p": self.top_p,
144-
"stop": self.stop or None, # Also exclude empty list for this
145-
"n": self.n,
146212
"max_tokens": self.max_tokens,
147213
"temperature": self.temperature,
148-
"verbosity": self.verbosity,
149-
"reasoning": self.reasoning,
214+
"top_p": self.top_p,
215+
"top_k": self.top_k,
216+
"stop": self.stop or None,
217+
"n": self.n,
218+
"frequency_penalty": self.frequency_penalty,
219+
"presence_penalty": self.presence_penalty,
220+
"seed": self.seed,
221+
# OpenAI
222+
"logit_bias": self.logit_bias,
223+
"logprobs": self.logprobs,
224+
"top_logprobs": self.top_logprobs,
225+
"parallel_tool_calls": self.parallel_tool_calls,
150226
"reasoning_effort": self.reasoning_effort,
227+
"reasoning": self.reasoning,
228+
# Anthropic
151229
"thinking": self.thinking,
230+
# Google
152231
"thinking_level": self.thinking_level,
153232
"thinking_budget": self.thinking_budget,
154233
"include_thoughts": self.include_thoughts,
234+
"safety_settings": self.safety_settings,
235+
# Shared
236+
"verbosity": self.verbosity,
155237
}
156238

157239
return {
@@ -181,6 +263,7 @@ def bind_tools(
181263
*,
182264
tool_choice: str | None = None,
183265
strict: bool | None = None,
266+
parallel_tool_calls: bool | None = None,
184267
**kwargs: Any,
185268
) -> Runnable[LanguageModelInput, AIMessage]:
186269
"""Bind tools to the model with automatic tool choice detection."""
@@ -197,7 +280,7 @@ def bind_tools(
197280
tool_choice = "auto"
198281

199282
if tool_choice in ["required", "auto"]:
200-
tool_choice_object = {
283+
tool_choice_object: dict[str, Any] = {
201284
"type": tool_choice,
202285
}
203286
else:
@@ -206,11 +289,113 @@ def bind_tools(
206289
"name": tool_choice,
207290
}
208291

209-
return super().bind(
210-
tools=formatted_tools,
211-
tool_choice=tool_choice_object,
292+
bind_kwargs: dict[str, Any] = {
293+
"tools": formatted_tools,
294+
"tool_choice": tool_choice_object,
212295
**kwargs,
213-
)
296+
}
297+
if parallel_tool_calls is not None:
298+
bind_kwargs["parallel_tool_calls"] = parallel_tool_calls
299+
300+
return super().bind(**bind_kwargs)
301+
302+
def with_structured_output(
303+
self,
304+
schema: _DictOrPydanticClass | None = None,
305+
*,
306+
method: Literal["function_calling", "json_mode", "json_schema"] = "function_calling",
307+
include_raw: bool = False,
308+
strict: bool | None = None,
309+
**kwargs: Any,
310+
) -> Runnable[LanguageModelInput, _DictOrPydantic]:
311+
"""Model wrapper that returns outputs formatted to match the given schema.
312+
313+
Args:
314+
schema: The output schema as a Pydantic class, TypedDict, JSON Schema dict,
315+
or OpenAI function schema.
316+
method: Either "json_schema" (uses response_format) or "function_calling"
317+
(uses tool calling to force the schema).
318+
include_raw: If True, returns dict with 'raw', 'parsed', and 'parsing_error'.
319+
strict: If True, model output is guaranteed to match the schema exactly.
320+
**kwargs: Additional arguments passed to bind().
321+
322+
Returns:
323+
A Runnable that parses the model output into the given schema.
324+
"""
325+
if schema is None:
326+
raise ValueError("schema must be specified.")
327+
328+
is_pydantic = isinstance(schema, type) and is_basemodel_subclass(schema)
329+
330+
if method == "function_calling":
331+
tool_name = convert_to_openai_tool(schema)["function"]["name"]
332+
llm = self.bind_tools(
333+
[schema],
334+
tool_choice="any",
335+
strict=strict,
336+
ls_structured_output_format={
337+
"kwargs": {"method": "function_calling", "strict": strict},
338+
"schema": schema,
339+
},
340+
**kwargs,
341+
)
342+
if is_pydantic:
343+
output_parser: Runnable = PydanticToolsParser(
344+
tools=[schema], # type: ignore[list-item]
345+
first_tool_only=True,
346+
)
347+
else:
348+
output_parser = JsonOutputKeyToolsParser(key_name=tool_name, first_tool_only=True)
349+
elif method == "json_mode":
350+
llm = self.bind(
351+
response_format={"type": "json_object"},
352+
ls_structured_output_format={
353+
"kwargs": {"method": method},
354+
"schema": schema,
355+
},
356+
**kwargs,
357+
)
358+
if is_pydantic:
359+
from langchain_core.output_parsers import PydanticOutputParser
360+
361+
output_parser = PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type]
362+
else:
363+
output_parser = JsonOutputParser()
364+
elif method == "json_schema":
365+
response_format = _build_normalized_response_format(schema, strict=strict)
366+
llm = self.bind(
367+
response_format=response_format,
368+
ls_structured_output_format={
369+
"kwargs": {"method": method, "strict": strict},
370+
"schema": convert_to_openai_tool(schema),
371+
},
372+
**kwargs,
373+
)
374+
if is_pydantic:
375+
output_parser = RunnableLambda(
376+
partial(_oai_structured_outputs_parser, schema=cast(type, schema))
377+
).with_types(output_type=cast(type, schema))
378+
else:
379+
output_parser = JsonOutputParser()
380+
else:
381+
raise ValueError(
382+
f"Unrecognized method: '{method}'. "
383+
"Expected 'function_calling', 'json_mode', or 'json_schema'."
384+
)
385+
386+
if include_raw:
387+
parser_assign = RunnablePassthrough.assign(
388+
parsed=lambda x: output_parser.invoke(x["raw"]),
389+
parsing_error=lambda _: None,
390+
)
391+
parser_none = RunnablePassthrough.assign(
392+
parsed=lambda _: None,
393+
)
394+
parser_with_fallback = parser_assign.with_fallbacks(
395+
[parser_none], exception_key="parsing_error"
396+
)
397+
return RunnablePassthrough.assign(raw=llm) | parser_with_fallback # type: ignore[return-value]
398+
return llm | output_parser # type: ignore[return-value]
214399

215400
def _preprocess_request(
216401
self, messages: list[BaseMessage], stop: list[str] | None = None, **kwargs: Any

src/uipath/llm_client/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
"""
2929

3030
from uipath.llm_client.__version__ import __version__
31+
from uipath.llm_client.clients.normalized import UiPathNormalizedClient
3132
from uipath.llm_client.httpx_client import (
3233
UiPathHttpxAsyncClient,
3334
UiPathHttpxClient,
@@ -60,6 +61,8 @@
6061
"get_default_client_settings",
6162
"PlatformSettings",
6263
"LLMGatewaySettings",
64+
# Normalized client
65+
"UiPathNormalizedClient",
6366
# HTTPX clients
6467
"UiPathHttpxClient",
6568
"UiPathHttpxAsyncClient",

0 commit comments

Comments
 (0)