From fc1ab1b7c80d988392520bba785115dd912bae7e Mon Sep 17 00:00:00 2001 From: Your Name Date: Wed, 11 Mar 2026 21:49:10 -0400 Subject: [PATCH 01/14] feat: add Anima model support --- anima-plan.md | 955 ++++++ invokeai/app/api/dependencies.py | 2 + invokeai/app/invocations/anima_denoise.py | 351 +++ .../app/invocations/anima_latents_to_image.py | 111 + .../app/invocations/anima_model_loader.py | 98 + .../app/invocations/anima_text_encoder.py | 189 ++ invokeai/app/invocations/fields.py | 10 + invokeai/app/invocations/primitives.py | 12 + invokeai/app/util/step_callback.py | 27 + invokeai/backend/anima/__init__.py | 7 + invokeai/backend/anima/anima_transformer.py | 1058 +++++++ invokeai/backend/anima/conditioning_data.py | 30 + invokeai/backend/flux/schedulers.py | 20 + .../backend/model_manager/configs/factory.py | 4 + .../backend/model_manager/configs/main.py | 61 + .../model_manager/configs/qwen3_encoder.py | 11 +- invokeai/backend/model_manager/configs/vae.py | 37 + .../model_manager/load/model_loaders/anima.py | 127 + .../model_manager/load/model_loaders/vae.py | 11 +- .../backend/model_manager/starter_models.py | 40 + invokeai/backend/model_manager/taxonomy.py | 5 + .../diffusion/conditioning_data.py | 26 + .../controlLayers/store/paramsSlice.ts | 32 + .../src/features/controlLayers/store/types.ts | 7 + .../web/src/features/modelManagerV2/models.ts | 5 + .../web/src/features/nodes/types/common.ts | 5 +- .../web/src/features/nodes/types/constants.ts | 1 + .../util/graph/generation/buildAnimaGraph.ts | 184 ++ .../nodes/util/graph/graphBuilderUtils.ts | 3 +- .../src/features/nodes/util/graph/types.ts | 6 +- .../features/parameters/types/constants.ts | 4 + .../parameters/util/optimalDimension.ts | 6 +- .../queue/hooks/useEnqueueGenerate.ts | 3 + .../frontend/web/src/services/api/schema.ts | 2801 ++++------------- 34 files changed, 4081 insertions(+), 2168 deletions(-) create mode 100644 anima-plan.md create mode 100644 invokeai/app/invocations/anima_denoise.py create mode 100644 invokeai/app/invocations/anima_latents_to_image.py create mode 100644 invokeai/app/invocations/anima_model_loader.py create mode 100644 invokeai/app/invocations/anima_text_encoder.py create mode 100644 invokeai/backend/anima/__init__.py create mode 100644 invokeai/backend/anima/anima_transformer.py create mode 100644 invokeai/backend/anima/conditioning_data.py create mode 100644 invokeai/backend/model_manager/load/model_loaders/anima.py create mode 100644 invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts diff --git a/anima-plan.md b/anima-plan.md new file mode 100644 index 00000000000..95ef366e79b --- /dev/null +++ b/anima-plan.md @@ -0,0 +1,955 @@ +# Comprehensive Plan: Adding Anima Model Support to InvokeAI + +## 1. Executive Summary + +**Anima** is a 2-billion-parameter anime-focused text-to-image model created by CircleStone Labs and Comfy Org, built on top of NVIDIA's Cosmos Predict2 architecture. It uses a **Cosmos DiT backbone** (`MiniTrainDIT`), a **Qwen3 0.6B text encoder**, a custom **LLM Adapter** (6-layer cross-attention transformer that fuses Qwen3 hidden states with learned T5-XXL token embeddings), and a **Qwen-Image VAE** (`AutoencoderKLQwenImage` — a fine-tuned Wan 2.1 VAE with 16 latent channels). + +The model uses **rectified flow** sampling (shift=3.0, multiplier=1000) — the same `CONST` + `ModelSamplingDiscreteFlow` formulation used by Flux and Z-Image, meaning the existing `FlowMatchEulerDiscreteScheduler` and `FlowMatchHeunDiscreteScheduler` can be reused. The initial implementation targets **basic text-to-image generation only** — LoRA, ControlNet, inpainting, regional prompting, and img2img will come later. + +**Key architectural difference from all existing InvokeAI models**: The LLM Adapter is a custom component embedded inside the diffusion model that cross-attends between Qwen3 encoder hidden states and learned T5-XXL token ID embeddings. This means the text encoding pipeline produces *two* outputs (Qwen3 hidden states + T5 token IDs) that are both fed into the transformer. + +--- + +## 2. Model Architecture Reference + +### 2.1 Components Overview + +| Component | Architecture | Source | Size | +|-----------|-------------|--------|------| +| **Diffusion Transformer** | `MiniTrainDIT` (Cosmos Predict2 DiT) + `LLMAdapter` | Single-file checkpoint (`anima-preview2.safetensors`) | ~2B params | +| **Text Encoder** | Qwen3 0.6B (causal LM, hidden states extracted) | Single-file (`qwen_3_06b_base.safetensors`) | ~0.6B params | +| **T5-XXL Tokenizer** | SentencePiece tokenizer only (no T5 model weights needed) | Bundled with transformers library | ~2MB | +| **VAE** | `AutoencoderKLQwenImage` (fine-tuned Wan 2.1 VAE) | Single-file (`qwen_image_vae.safetensors`) | ~100M params | + +### 2.2 Text Conditioning Pipeline + +``` +User Prompt + ├──> Qwen3 0.6B Tokenizer (Qwen2Tokenizer) + │ └──> Qwen3 0.6B Model → second-to-last hidden states [seq_len, 1024] + │ + ├──> T5-XXL Tokenizer (T5TokenizerFast) → token IDs [seq_len] (no T5 model needed) + │ + └──> LLM Adapter (inside transformer) + ├── Embed T5 token IDs via learned Embedding(32128, 1024) + ├── Cross-attend T5 embeddings ← Qwen3 hidden states (6 transformer layers with RoPE) + └── Output: [512, 1024] conditioning tensor (zero-padded if < 512 tokens) + └──> Fed to Cosmos DiT cross-attention layers +``` + +### 2.3 Latent Space + +- **Channels**: 16 +- **Spatial compression**: 8× (VAE downsamples by 2^3) +- **Dimensions**: 3D (`[B, C, T, H, W]`) — temporal dim is 1 for single images +- **Normalization**: Mean/std normalization using Wan 2.1 constants (not simple scaling) + - `process_in(latent) = (latent - latents_mean) / latents_std` + - `process_out(latent) = latent * latents_std + latents_mean` + +### 2.4 Sampling / Noise Schedule + +- **Type**: Rectified Flow (`CONST` model — `denoised = input - output * sigma`) +- **Shift**: 3.0 (via `time_snr_shift(alpha=3.0, t)` — same formula as Flux) +- **Multiplier**: 1000 +- **Sigma range**: 0.0 (clean) to 1.0 (noise), shifted by factor 3.0 +- **Compatible schedulers**: `FlowMatchEulerDiscreteScheduler`, `FlowMatchHeunDiscreteScheduler` (already in InvokeAI for Z-Image/Flux) +- **Recommended settings**: CFG 4–5, 30–50 steps + +### 2.5 Default Model Configuration (from ComfyUI) + +```python +# MiniTrainDIT default constructor args for Anima: +model_channels = 2048 # Transformer hidden dim +num_blocks = 28 # Number of DiT blocks +num_heads = 32 # Attention heads +crossattn_emb_channels = 1024 # Must match LLM Adapter output dim +patch_spatial = 2 # Spatial patch size +patch_temporal = 1 # Temporal patch size (1 for images) +in_channels = 16 # Latent channels +out_channels = 16 # Output channels +max_img_h = 240 # Max height in patches (240 * 2 * 8 = 3840px) +max_img_w = 240 # Max width in patches +max_frames = 1 # Single image +``` + +--- + +## 3. ComfyUI Reference Implementation + +The following ComfyUI source files contain the complete Anima implementation and should be reverse-engineered: + +| File | URL | Purpose | +|------|-----|---------| +| **Anima model** | [comfy/ldm/anima/model.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py) | `Anima(MiniTrainDIT)` + `LLMAdapter` + `RotaryEmbedding` + `TransformerBlock` + `Attention` | +| **Cosmos DiT base** | [comfy/ldm/cosmos/predict2.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/cosmos/predict2.py) | `MiniTrainDIT` — the Cosmos Predict2 backbone that Anima extends | +| **Text encoder** | [comfy/text_encoders/anima.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/text_encoders/anima.py) | Dual tokenizer (Qwen3 + T5-XXL), `AnimaTEModel` that outputs Qwen3 hidden states + T5 token IDs | +| **Model registration** | [comfy/supported_models.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/supported_models.py) | `Anima` config class: shift=3.0, `Wan21` latent format, dtype support | +| **Model base** | [comfy/model_base.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_base.py) | `Anima(BaseModel)` — `ModelType.FLOW`, pre-processes text embeds via LLM adapter in `extra_conds()` | +| **Latent format** | [comfy/latent_formats.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/latent_formats.py) | `Wan21` — 16ch, 3D, mean/std normalization constants | +| **Sampling** | [comfy/model_sampling.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_sampling.py) | `CONST` + `ModelSamplingDiscreteFlow` — rectified flow with shift | + +### 3.1 LLM Adapter Architecture (from ComfyUI source) + +The `LLMAdapter` is the critical custom component. From [comfy/ldm/anima/model.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py): + +- **Input**: `source_hidden_states` (Qwen3 output, dim=1024) + `target_input_ids` (T5-XXL token IDs) +- **Embedding**: `Embedding(32128, 1024)` — maps T5 token IDs to dense vectors +- **Projection**: `in_proj` (identity when `model_dim == target_dim`) +- **Positional encoding**: `RotaryEmbedding(head_dim=64)` — applied separately to query (target) and key (source) sequences +- **Blocks**: 6 × `TransformerBlock` each containing: + - Self-attention on the target (T5 embedding) sequence + - Cross-attention: target queries attend to source (Qwen3) keys/values + - MLP with GELU activation (4× expansion) + - RMSNorm (eps=1e-6) before each sub-layer +- **Output**: `norm(out_proj(x))` → `[batch, seq_len, 1024]`, zero-padded to 512 tokens + +### 3.2 Key Insight: LLM Adapter Lives Inside the Checkpoint + +The LLM Adapter weights are stored under the `llm_adapter.*` prefix in the main checkpoint file (`anima-preview2.safetensors`). They are **not** a separate file. The Anima class's `forward()` calls `preprocess_text_embeds()` which runs the adapter before passing to `MiniTrainDIT.forward()`. + +### 3.3 Full ComfyUI Anima Model Source + +```python +# comfy/ldm/anima/model.py — FULL SOURCE +from comfy.ldm.cosmos.predict2 import MiniTrainDIT +import torch +from torch import nn +import torch.nn.functional as F + + +def rotate_half(x): + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(x, cos, sin, unsqueeze_dim=1): + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + x_embed = (x * cos) + (rotate_half(x) * sin) + return x_embed + + +class RotaryEmbedding(nn.Module): + def __init__(self, head_dim): + super().__init__() + self.rope_theta = 10000 + inv_freq = 1.0 / (self.rope_theta ** (torch.arange(0, head_dim, 2, dtype=torch.int64).to(dtype=torch.float) / head_dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + @torch.no_grad() + def forward(self, x, position_ids): + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) + position_ids_expanded = position_ids[:, None, :].float() + + device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): # Force float32 + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +class Attention(nn.Module): + def __init__(self, query_dim, context_dim, n_heads, head_dim, device=None, dtype=None, operations=None): + super().__init__() + + inner_dim = head_dim * n_heads + self.n_heads = n_heads + self.head_dim = head_dim + self.query_dim = query_dim + self.context_dim = context_dim + + self.q_proj = operations.Linear(query_dim, inner_dim, bias=False, device=device, dtype=dtype) + self.q_norm = operations.RMSNorm(self.head_dim, eps=1e-6, device=device, dtype=dtype) + + self.k_proj = operations.Linear(context_dim, inner_dim, bias=False, device=device, dtype=dtype) + self.k_norm = operations.RMSNorm(self.head_dim, eps=1e-6, device=device, dtype=dtype) + + self.v_proj = operations.Linear(context_dim, inner_dim, bias=False, device=device, dtype=dtype) + + self.o_proj = operations.Linear(inner_dim, query_dim, bias=False, device=device, dtype=dtype) + + def forward(self, x, mask=None, context=None, position_embeddings=None, position_embeddings_context=None): + context = x if context is None else context + input_shape = x.shape[:-1] + q_shape = (*input_shape, self.n_heads, self.head_dim) + context_shape = context.shape[:-1] + kv_shape = (*context_shape, self.n_heads, self.head_dim) + + query_states = self.q_norm(self.q_proj(x).view(q_shape)).transpose(1, 2) + key_states = self.k_norm(self.k_proj(context).view(kv_shape)).transpose(1, 2) + value_states = self.v_proj(context).view(kv_shape).transpose(1, 2) + + if position_embeddings is not None: + assert position_embeddings_context is not None + cos, sin = position_embeddings + query_states = apply_rotary_pos_emb(query_states, cos, sin) + cos, sin = position_embeddings_context + key_states = apply_rotary_pos_emb(key_states, cos, sin) + + attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=mask) + + attn_output = attn_output.transpose(1, 2).reshape(*input_shape, -1).contiguous() + attn_output = self.o_proj(attn_output) + return attn_output + + def init_weights(self): + torch.nn.init.zeros_(self.o_proj.weight) + + +class TransformerBlock(nn.Module): + def __init__(self, source_dim, model_dim, num_heads=16, mlp_ratio=4.0, use_self_attn=False, layer_norm=False, device=None, dtype=None, operations=None): + super().__init__() + self.use_self_attn = use_self_attn + + if self.use_self_attn: + self.norm_self_attn = operations.LayerNorm(model_dim, device=device, dtype=dtype) if layer_norm else operations.RMSNorm(model_dim, eps=1e-6, device=device, dtype=dtype) + self.self_attn = Attention( + query_dim=model_dim, + context_dim=model_dim, + n_heads=num_heads, + head_dim=model_dim//num_heads, + device=device, + dtype=dtype, + operations=operations, + ) + + self.norm_cross_attn = operations.LayerNorm(model_dim, device=device, dtype=dtype) if layer_norm else operations.RMSNorm(model_dim, eps=1e-6, device=device, dtype=dtype) + self.cross_attn = Attention( + query_dim=model_dim, + context_dim=source_dim, + n_heads=num_heads, + head_dim=model_dim//num_heads, + device=device, + dtype=dtype, + operations=operations, + ) + + self.norm_mlp = operations.LayerNorm(model_dim, device=device, dtype=dtype) if layer_norm else operations.RMSNorm(model_dim, eps=1e-6, device=device, dtype=dtype) + self.mlp = nn.Sequential( + operations.Linear(model_dim, int(model_dim * mlp_ratio), device=device, dtype=dtype), + nn.GELU(), + operations.Linear(int(model_dim * mlp_ratio), model_dim, device=device, dtype=dtype) + ) + + def forward(self, x, context, target_attention_mask=None, source_attention_mask=None, position_embeddings=None, position_embeddings_context=None): + if self.use_self_attn: + normed = self.norm_self_attn(x) + attn_out = self.self_attn(normed, mask=target_attention_mask, position_embeddings=position_embeddings, position_embeddings_context=position_embeddings) + x = x + attn_out + + normed = self.norm_cross_attn(x) + attn_out = self.cross_attn(normed, mask=source_attention_mask, context=context, position_embeddings=position_embeddings, position_embeddings_context=position_embeddings_context) + x = x + attn_out + + x = x + self.mlp(self.norm_mlp(x)) + return x + + def init_weights(self): + torch.nn.init.zeros_(self.mlp[2].weight) + self.cross_attn.init_weights() + + +class LLMAdapter(nn.Module): + def __init__( + self, + source_dim=1024, + target_dim=1024, + model_dim=1024, + num_layers=6, + num_heads=16, + use_self_attn=True, + layer_norm=False, + device=None, + dtype=None, + operations=None, + ): + super().__init__() + + self.embed = operations.Embedding(32128, target_dim, device=device, dtype=dtype) + if model_dim != target_dim: + self.in_proj = operations.Linear(target_dim, model_dim, device=device, dtype=dtype) + else: + self.in_proj = nn.Identity() + self.rotary_emb = RotaryEmbedding(model_dim//num_heads) + self.blocks = nn.ModuleList([ + TransformerBlock(source_dim, model_dim, num_heads=num_heads, use_self_attn=use_self_attn, layer_norm=layer_norm, device=device, dtype=dtype, operations=operations) + for _ in range(num_layers) + ]) + self.out_proj = operations.Linear(model_dim, target_dim, device=device, dtype=dtype) + self.norm = operations.RMSNorm(target_dim, eps=1e-6, device=device, dtype=dtype) + + def forward(self, source_hidden_states, target_input_ids, target_attention_mask=None, source_attention_mask=None): + if target_attention_mask is not None: + target_attention_mask = target_attention_mask.to(torch.bool) + if target_attention_mask.ndim == 2: + target_attention_mask = target_attention_mask.unsqueeze(1).unsqueeze(1) + + if source_attention_mask is not None: + source_attention_mask = source_attention_mask.to(torch.bool) + if source_attention_mask.ndim == 2: + source_attention_mask = source_attention_mask.unsqueeze(1).unsqueeze(1) + + context = source_hidden_states + x = self.in_proj(self.embed(target_input_ids, out_dtype=context.dtype)) + position_ids = torch.arange(x.shape[1], device=x.device).unsqueeze(0) + position_ids_context = torch.arange(context.shape[1], device=x.device).unsqueeze(0) + position_embeddings = self.rotary_emb(x, position_ids) + position_embeddings_context = self.rotary_emb(x, position_ids_context) + for block in self.blocks: + x = block(x, context, + target_attention_mask=target_attention_mask, + source_attention_mask=source_attention_mask, + position_embeddings=position_embeddings, + position_embeddings_context=position_embeddings_context) + return self.norm(self.out_proj(x)) + + +class Anima(MiniTrainDIT): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.llm_adapter = LLMAdapter(device=kwargs.get("device"), dtype=kwargs.get("dtype"), operations=kwargs.get("operations")) + + def preprocess_text_embeds(self, text_embeds, text_ids, t5xxl_weights=None): + if text_ids is not None: + out = self.llm_adapter(text_embeds, text_ids) + if t5xxl_weights is not None: + out = out * t5xxl_weights + + if out.shape[1] < 512: + out = torch.nn.functional.pad(out, (0, 0, 0, 512 - out.shape[1])) + return out + else: + return text_embeds + + def forward(self, x, timesteps, context, **kwargs): + t5xxl_ids = kwargs.pop("t5xxl_ids", None) + if t5xxl_ids is not None: + context = self.preprocess_text_embeds(context, t5xxl_ids, t5xxl_weights=kwargs.pop("t5xxl_weights", None)) + return super().forward(x, timesteps, context, **kwargs) +``` + +### 3.4 ComfyUI Text Encoder Source + +```python +# comfy/text_encoders/anima.py — FULL SOURCE +from transformers import Qwen2Tokenizer, T5TokenizerFast +import comfy.text_encoders.llama +from comfy import sd1_clip +import os +import torch + + +class Qwen3Tokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer") + super().__init__(tokenizer_path, + pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024, + embedding_key='qwen3_06b', tokenizer_class=Qwen2Tokenizer, + has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, + min_length=1, pad_token=151643, tokenizer_data=tokenizer_data) + +class T5XXLTokenizer(sd1_clip.SDTokenizer): + def __init__(self, embedding_directory=None, tokenizer_data={}): + tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") + super().__init__(tokenizer_path, + embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, + embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, + pad_to_max_length=False, max_length=99999999, min_length=1, + tokenizer_data=tokenizer_data) + +class AnimaTokenizer: + def __init__(self, embedding_directory=None, tokenizer_data={}): + self.qwen3_06b = Qwen3Tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) + + def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): + out = {} + qwen_ids = self.qwen3_06b.tokenize_with_weights(text, return_word_ids, **kwargs) + out["qwen3_06b"] = [[(k[0], 1.0, k[2]) if return_word_ids else (k[0], 1.0) for k in inner_list] for inner_list in qwen_ids] + out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids, **kwargs) + return out + + def untokenize(self, token_weight_pair): + return self.t5xxl.untokenize(token_weight_pair) + + def state_dict(self): + return {} + + def decode(self, token_ids, **kwargs): + return self.qwen3_06b.decode(token_ids, **kwargs) + +class Qwen3_06BModel(sd1_clip.SDClipModel): + def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, + dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, + model_class=comfy.text_encoders.llama.Qwen3_06B, enable_attention_masks=attention_mask, + return_attention_masks=attention_mask, model_options=model_options) + + +class AnimaTEModel(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + super().__init__(device=device, dtype=dtype, + name="qwen3_06b", clip_model=Qwen3_06BModel, model_options=model_options) + + def encode_token_weights(self, token_weight_pairs): + out = super().encode_token_weights(token_weight_pairs) + out[2]["t5xxl_ids"] = torch.tensor(list(map(lambda a: a[0], token_weight_pairs["t5xxl"][0])), dtype=torch.int) + out[2]["t5xxl_weights"] = torch.tensor(list(map(lambda a: a[1], token_weight_pairs["t5xxl"][0]))) + return out + +def te(dtype_llama=None, llama_quantization_metadata=None): + class AnimaTEModel_(AnimaTEModel): + def __init__(self, device="cpu", dtype=None, model_options={}): + if dtype_llama is not None: + dtype = dtype_llama + if llama_quantization_metadata is not None: + model_options = model_options.copy() + model_options["quantization_metadata"] = llama_quantization_metadata + super().__init__(device=device, dtype=dtype, model_options=model_options) + return AnimaTEModel_ +``` + +### 3.5 ComfyUI Model Registration and Base + +```python +# comfy/supported_models.py — Anima class (excerpt) +class Anima(supported_models_base.BASE): + unet_config = { + "image_model": "anima", + } + + sampling_settings = { + "multiplier": 1.0, + "shift": 3.0, + } + + unet_extra_config = {} + latent_format = latent_formats.Wan21 + + memory_usage_factor = 1.0 + + supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.Anima(self, device=device) + return out + + def clip_target(self, state_dict={}): + pref = self.text_encoder_key_prefix[0] + detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, + "{}qwen3_06b.transformer.".format(pref)) + return supported_models_base.ClipTarget( + comfy.text_encoders.anima.AnimaTokenizer, + comfy.text_encoders.anima.te(**detect)) + + def set_inference_dtype(self, dtype, manual_cast_dtype, **kwargs): + self.memory_usage_factor = (self.unet_config.get("model_channels", 2048) / 2048) * 0.95 + if dtype is torch.float16: + self.memory_usage_factor *= 1.4 + return super().set_inference_dtype(dtype, manual_cast_dtype, **kwargs) +``` + +```python +# comfy/model_base.py — Anima class (excerpt) +class Anima(BaseModel): + def __init__(self, model_config, model_type=ModelType.FLOW, device=None): + super().__init__(model_config, model_type, device=device, + unet_model=comfy.ldm.anima.model.Anima) + + def extra_conds(self, **kwargs): + out = super().extra_conds(**kwargs) + cross_attn = kwargs.get("cross_attn", None) + t5xxl_ids = kwargs.get("t5xxl_ids", None) + t5xxl_weights = kwargs.get("t5xxl_weights", None) + device = kwargs["device"] + if cross_attn is not None: + if t5xxl_ids is not None: + if t5xxl_weights is not None: + t5xxl_weights = t5xxl_weights.unsqueeze(0).unsqueeze(-1).to(cross_attn) + t5xxl_ids = t5xxl_ids.unsqueeze(0) + + if torch.is_inference_mode_enabled(): # if not we are training + cross_attn = self.diffusion_model.preprocess_text_embeds( + cross_attn.to(device=device, dtype=self.get_dtype_inference()), + t5xxl_ids.to(device=device), + t5xxl_weights=t5xxl_weights.to(device=device, dtype=self.get_dtype_inference())) + else: + out['t5xxl_ids'] = comfy.conds.CONDRegular(t5xxl_ids) + out['t5xxl_weights'] = comfy.conds.CONDRegular(t5xxl_weights) + + out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) + return out +``` + +### 3.6 ComfyUI Latent Format (Wan21) + +```python +# comfy/latent_formats.py — Wan21 class (used by Anima) +class Wan21(LatentFormat): + latent_channels = 16 + latent_dimensions = 3 + + latent_rgb_factors = [ + [-0.1299, -0.1692, 0.2932], + [ 0.0671, 0.0406, 0.0442], + [ 0.3568, 0.2548, 0.1747], + [ 0.0372, 0.2344, 0.1420], + [ 0.0313, 0.0189, -0.0328], + [ 0.0296, -0.0956, -0.0665], + [-0.3477, -0.4059, -0.2925], + [ 0.0166, 0.1902, 0.1975], + [-0.0412, 0.0267, -0.1364], + [-0.1293, 0.0740, 0.1636], + [ 0.0680, 0.3019, 0.1128], + [ 0.0032, 0.0581, 0.0639], + [-0.1251, 0.0927, 0.1699], + [ 0.0060, -0.0633, 0.0005], + [ 0.3477, 0.2275, 0.2950], + [ 0.1984, 0.0913, 0.1861] + ] + + latent_rgb_factors_bias = [-0.1835, -0.0868, -0.3360] + + def __init__(self): + self.scale_factor = 1.0 + self.latents_mean = torch.tensor([ + -0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, + 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921 + ]).view(1, 16, 1, 1, 1) + self.latents_std = torch.tensor([ + 2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, + 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160 + ]).view(1, 16, 1, 1, 1) + + self.taesd_decoder_name = "lighttaew2_1" + + def process_in(self, latent): + latents_mean = self.latents_mean.to(latent.device, latent.dtype) + latents_std = self.latents_std.to(latent.device, latent.dtype) + return (latent - latents_mean) * self.scale_factor / latents_std + + def process_out(self, latent): + latents_mean = self.latents_mean.to(latent.device, latent.dtype) + latents_std = self.latents_std.to(latent.device, latent.dtype) + return latent * latents_std / self.scale_factor + latents_mean +``` + +### 3.7 ComfyUI Sampling Constants + +```python +# comfy/model_sampling.py — relevant classes + +def time_snr_shift(alpha, t): + if alpha == 1.0: + return t + return alpha * t / (1 + (alpha - 1) * t) + +class CONST: + def calculate_input(self, sigma, noise): + return noise + + def calculate_denoised(self, sigma, model_output, model_input): + sigma = reshape_sigma(sigma, model_output.ndim) + return model_input - model_output * sigma + + def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): + sigma = reshape_sigma(sigma, noise.ndim) + return sigma * noise + (1.0 - sigma) * latent_image + + def inverse_noise_scaling(self, sigma, latent): + sigma = reshape_sigma(sigma, latent.ndim) + return latent / (1.0 - sigma) + +class ModelSamplingDiscreteFlow(torch.nn.Module): + def __init__(self, model_config=None): + super().__init__() + if model_config is not None: + sampling_settings = model_config.sampling_settings + else: + sampling_settings = {} + self.set_parameters(shift=sampling_settings.get("shift", 1.0), multiplier=sampling_settings.get("multiplier", 1000)) + + def set_parameters(self, shift=1.0, timesteps=1000, multiplier=1000): + self.shift = shift + self.multiplier = multiplier + ts = self.sigma((torch.arange(1, timesteps + 1, 1) / timesteps) * multiplier) + self.register_buffer('sigmas', ts) + + def timestep(self, sigma): + return sigma * self.multiplier + + def sigma(self, timestep): + return time_snr_shift(self.shift, timestep / self.multiplier) + + def percent_to_sigma(self, percent): + if percent <= 0.0: + return 1.0 + if percent >= 1.0: + return 0.0 + return time_snr_shift(self.shift, 1.0 - percent) +``` + +--- + +## 4. Existing InvokeAI Patterns (Z-Image as Template) + +The Z-Image integration is the closest architectural template. Here are the key files and patterns: + +### 4.1 Taxonomy / Enums + +- `invokeai/backend/model_manager/config/enums.py` — `BaseModelType` enum (Z-Image at line ~32), `ModelType` enum, `Qwen3Variant` enum (line ~75, has `Qwen3_4B` and `Qwen3_8B`) + +### 4.2 Model Configs + +- `invokeai/backend/model_manager/config/configs/main.py` — Z-Image configs at lines 1079–1167: `Main_Diffusers_ZImage_Config`, `Main_Checkpoint_ZImage_Config`, `Main_GGUFQuantized_ZImage_Config`, each with `probe()` and `_validate_z_image_checkpoint()` helper +- `invokeai/backend/model_manager/config/configs/qwen3_encoder.py` — Qwen3 encoder configs (lines 19–269): directory, checkpoint, and GGUF formats, with variant detection by `hidden_size` +- `invokeai/backend/model_manager/config/configs/factory.py` — `AnyModelConfig` discriminated union (lines 149–255) + +### 4.3 Model Loaders + +- `invokeai/backend/model_manager/load/model_loaders/z_image.py` — 1063 lines: `ZImageDiffusersLoader`, `ZImageCheckpointLoader`, `ZImageGGUFLoader`, plus Qwen3 encoder loaders and ControlNet loader + +### 4.4 Invocation Nodes + +| File | Node | Purpose | +|------|------|---------| +| `invokeai/app/invocations/z_image_model_loader.py` | `ZImageModelLoaderInvocation` | Loads transformer + Qwen3 encoder + VAE | +| `invokeai/app/invocations/z_image_text_encoder.py` | `ZImageTextEncoderInvocation` | Qwen3 with chat template → second-to-last hidden state | +| `invokeai/app/invocations/z_image_denoise.py` | `ZImageDenoiseInvocation` | Full denoising loop (771 lines) with flow matching | +| `invokeai/app/invocations/z_image_latents_to_image.py` | `ZImageLatentsToImageInvocation` | VAE decode (supports both AutoencoderKL and FluxAutoEncoder) | +| `invokeai/app/invocations/z_image_image_to_latents.py` | `ZImageImageToLatentsInvocation` | VAE encode | + +### 4.5 Backend Module + +- `invokeai/backend/z_image/` — `conditioning_data.py`, `z_image_patchify.py`, `z_image_regional_prompting.py`, control extensions, etc. + +### 4.6 Frontend + +- `frontend/web/src/features/nodes/util/graph/generation/buildZImageGraph.ts` — Graph builder for Z-Image generation +- `frontend/web/src/features/nodes/types/constants.ts` — UI constants (color, display name, grid size, features per base) +- `frontend/web/src/features/nodes/util/graph/generation/buildGraph.ts` — Main dispatch switch + +### 4.7 Schedulers + +- `invokeai/backend/flux/flow_match_schedulers.py` — `FLOW_MATCH_SCHEDULER_MAP`, `FLOW_MATCH_SCHEDULER_LABEL_MAP`: Euler, Heun, LCM — shared by Flux and Z-Image + +### 4.8 Starter Models + +- `invokeai/app/services/model_install/model_install_default.py` — Z-Image starter models at lines 803–860, Qwen3 encoder starters at lines 1017+ + +--- + +## 5. Diffusers Compatibility + +**Critical finding**: All needed diffusers classes exist in the pinned **v0.36.0**: + +| Class | Module | Purpose | +|-------|--------|---------| +| `CosmosTransformer3DModel` | `diffusers.models.transformers.transformer_cosmos` | Backbone transformer (but see note below) | +| `AutoencoderKLQwenImage` | `diffusers.models.autoencoders.autoencoder_kl_qwenimage` | VAE (fine-tuned Wan 2.1) | +| `AutoencoderKLWan` | `diffusers.models.autoencoders.autoencoder_kl_wan` | Alternative VAE class | + +**⚠️ Important caveat**: The diffusers `CosmosTransformer3DModel` is the *vanilla* Cosmos Predict2 model. Anima extends it with the custom `LLMAdapter`. We have two options: + +1. **Don't use diffusers' `CosmosTransformer3DModel` at all** — implement the full `MiniTrainDIT` + `LLMAdapter` as custom PyTorch modules (reverse-engineered from ComfyUI). This is the safer approach since the ComfyUI implementation is the reference. +2. **Use diffusers' `CosmosTransformer3DModel` for the backbone** and bolt on the `LLMAdapter` separately — requires key remapping between ComfyUI checkpoint format and diffusers' expected format. + +**Recommendation**: Option 1 (custom implementation) is recommended for the initial version. The checkpoint is in ComfyUI format and guaranteed to load. Key remapping is error-prone and the model is not officially in diffusers anyway. Diffusers compatibility can be added later as a second format option. + +--- + +## 6. Implementation Steps + +### Step 1: Register Anima Base Type and Qwen3 0.6B Variant + +**Files to modify:** + +- `invokeai/backend/model_manager/config/enums.py` + - Add `Anima = "anima"` to `BaseModelType` enum (after `ZImage`) + - Add `Qwen3_06B = "qwen3-0.6b"` to `Qwen3Variant` enum + +- `invokeai/backend/model_manager/config/configs/qwen3_encoder.py` + - Update the variant detection logic to recognize hidden_size ~1024 → `Qwen3_06B` + - The existing logic maps 2560 → `Qwen3_4B` and 4096 → `Qwen3_8B`; add 1024 → `Qwen3_06B` + +### Step 2: Create Model Config Classes + +**Files to modify:** + +- `invokeai/backend/model_manager/config/configs/main.py` + - Add `Main_Checkpoint_Anima_Config` class with: + - `base = BaseModelType.Anima`, `type = ModelType.Main`, `format = ModelFormat.Checkpoint` + - `probe()` method that validates state dict keys: look for `llm_adapter.` prefix (unique to Anima) plus Cosmos-style keys (`blocks.`, `t_embedder.`, `x_embedder.`, `final_layer.`) + - Default generation settings: `width=1024`, `height=1024`, `steps=35`, `cfg_scale=4.5` + +- `invokeai/backend/model_manager/config/configs/vae.py` + - Add a config class for the QwenImage VAE (if needed as a standalone model type), or handle it within the main loader + +- `invokeai/backend/model_manager/config/configs/factory.py` + - Add `Main_Checkpoint_Anima_Config` (and any VAE configs) to the `AnyModelConfig` union + +### Step 3: Create Backend Module + +**New directory**: `invokeai/backend/anima/` + +**New files:** + +- `invokeai/backend/anima/__init__.py` + +- `invokeai/backend/anima/llm_adapter.py` + - Port the `LLMAdapter`, `TransformerBlock`, `Attention`, and `RotaryEmbedding` classes from [comfy/ldm/anima/model.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py) + - These are standard PyTorch `nn.Module` classes using `nn.Linear`, `nn.Embedding`, `nn.RMSNorm`, `F.scaled_dot_product_attention` + - Replace ComfyUI's `operations.Linear` / `operations.RMSNorm` / `operations.Embedding` / `operations.LayerNorm` with standard `torch.nn` equivalents + - Key architecture: `Embedding(32128, 1024)` → `in_proj` → 6 × `TransformerBlock(source_dim=1024, model_dim=1024, num_heads=16, use_self_attn=True)` → `out_proj` → `RMSNorm` + +- `invokeai/backend/anima/anima_transformer.py` + - Two approaches (see Section 5 recommendation): + - **Option A (recommended)**: Port `MiniTrainDIT` from [comfy/ldm/cosmos/predict2.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/cosmos/predict2.py), create `AnimaTransformer` that extends it and adds the `LLMAdapter` + - **Option B**: Use `CosmosTransformer3DModel` from diffusers as backbone, wrap it with the `LLMAdapter`, implement key remapping + - Whichever approach: the `forward()` must accept `(x, timesteps, context, t5xxl_ids=None, t5xxl_weights=None)` and run `preprocess_text_embeds()` before the DiT forward pass + +- `invokeai/backend/anima/conditioning_data.py` + - Define `AnimaConditioningData` dataclass holding: + - `qwen3_embeds: torch.Tensor` — shape `[seq_len, 1024]` + - `t5xxl_ids: torch.Tensor` — shape `[seq_len]` (T5 token IDs) + - `t5xxl_weights: Optional[torch.Tensor]` — shape `[seq_len]` (token weights for prompt weighting) + - Follow the pattern in `invokeai/backend/z_image/conditioning_data.py` + +### Step 4: Create Model Loader + +**New file**: `invokeai/backend/model_manager/load/model_loaders/anima.py` + +- Register `AnimaCheckpointLoader` via `@ModelLoaderRegistry.register(base=BaseModelType.Anima, type=ModelType.Main, format=ModelFormat.Checkpoint)` +- **Loading logic**: + 1. Load the safetensors state dict + 2. Separate keys into two groups by prefix: + - `llm_adapter.*` → `LLMAdapter` weights + - Everything else (`blocks.*`, `t_embedder.*`, `x_embedder.*`, `final_layer.*`, etc.) → `MiniTrainDIT` / `CosmosTransformer3DModel` weights + 3. Instantiate the `AnimaTransformer` (which contains both components) + 4. Load state dict +- **VAE loading**: Register a loader for `AutoencoderKLQwenImage` from diffusers + - Load from single-file safetensors + - The VAE is a 3D causal conv VAE (processes single images as `[B, C, 1, H, W]`) + - Latent normalization uses the Wan 2.1 `latents_mean` / `latents_std` constants +- **Qwen3 0.6B**: Reuse the existing `Qwen3EncoderCheckpointLoader` from the Z-Image loader — it already handles single-file Qwen3 encoders via `Qwen3ForCausalLM`. Just ensure the config detection maps `hidden_size=1024` to the new `Qwen3_06B` variant. + +### Step 5: Create Invocation Nodes + +**New files in `invokeai/app/invocations/`:** + +- **`anima_model_loader.py`** — `AnimaModelLoaderInvocation` + - Inputs: `model` (Anima main model identifier), optional `qwen3_encoder` (standalone Qwen3 0.6B), optional `vae` (standalone QwenImage VAE) + - Outputs: `AnimaModelLoaderOutput` with `transformer: TransformerField`, `qwen3_encoder: Qwen3EncoderField`, `vae: VAEField` + - Follow pattern of `invokeai/app/invocations/z_image_model_loader.py` + +- **`anima_text_encoder.py`** — `AnimaTextEncoderInvocation` + - Inputs: `prompt` (string), `qwen3_encoder` (Qwen3EncoderField) + - Processing: + 1. Tokenize prompt with Qwen3 tokenizer (using chat template: `[{"role": "user", "content": prompt}]`) + 2. Run Qwen3 0.6B model → extract second-to-last hidden state → filter by attention mask + 3. Tokenize same prompt with T5-XXL tokenizer → get token IDs (no T5 model needed) + 4. Store both as conditioning tensors + - Output: conditioning info containing `qwen3_embeds`, `t5xxl_ids`, `t5xxl_weights` + - Follow pattern of `invokeai/app/invocations/z_image_text_encoder.py` for Qwen3 encoding + - **New aspect**: Must also produce T5 token IDs. Need to bundle `T5TokenizerFast` — the `sentencepiece` dependency is already in `pyproject.toml` (line 46), and `T5TokenizerFast` is used elsewhere in InvokeAI (for Flux/SD3 text encoding) + +- **`anima_denoise.py`** — `AnimaDenoiseInvocation` + - Inputs: `transformer`, `positive_conditioning`, `negative_conditioning`, `width`, `height`, `num_steps`, `guidance_scale`, `seed`, `scheduler` (Euler/Heun from existing flow match scheduler map) + - Processing: + 1. Generate random noise in latent space: `[1, 16, 1, H//8, W//8]` (note: 3D latents with T=1) + 2. Apply Wan 2.1 `process_in()` normalization if doing img2img (for txt2img, start from pure noise) + 3. Create sigma schedule using rectified flow with shift=3.0 (same `time_snr_shift` as Flux/Z-Image) + 4. Denoising loop: for each timestep, run transformer forward with conditioning, compute `denoised = input - output * sigma` + 5. CFG: when `guidance_scale > 1.0`, run both conditional and unconditional forward passes, blend: `output = uncond + guidance * (cond - uncond)` + 6. Apply scheduler step (Euler or Heun) + - Output: latents tensor + - Follow the flow-matching denoising pattern from `invokeai/app/invocations/z_image_denoise.py` (simplified: no regional prompting, no ControlNet, no inpainting for initial version) + - **Key difference from Z-Image**: The transformer expects `[B, C, T, H, W]` 5D input (Cosmos format), not `[B, C, H, W]` 4D. Temporal dim = 1 for images. + +- **`anima_latents_to_image.py`** — `AnimaLatentsToImageInvocation` + - Inputs: `latents`, `vae` (VAEField) + - Processing: + 1. Load `AutoencoderKLQwenImage` from diffusers + 2. Apply Wan 2.1 `process_out()` denormalization: `latent * latents_std + latents_mean` + 3. Decode: VAE expects `[B, C, T, H, W]` → outputs `[B, C, T, H, W]` → squeeze temporal dim → convert to image + - Output: PIL Image + - Follow pattern of `invokeai/app/invocations/z_image_latents_to_image.py`, but adapted for `AutoencoderKLQwenImage` instead of `FluxAutoEncoder`/`AutoencoderKL` + +### Step 6: Update Frontend + +**Files to modify:** + +- `frontend/web/src/features/nodes/types/constants.ts` + - Add `'anima'` to `BASE_COLOR_MAP` (suggest a unique color, e.g., `'pink'` or `'rose'` for anime association) + - Add `'anima'` to `BASE_LABEL_MAP` with display name `'Anima'` + - Add `'anima'` to feature support arrays (only `SUPPORTS_CFG_RESCALE_BASE_MODELS` and similar that apply; omit from LoRA/ControlNet/IP-Adapter arrays initially) + +- `frontend/web/src/features/parameters/hooks/useMainModelDefaultSettings.ts` (or equivalent) + - Add `'anima'` with defaults: width=1024, height=1024, steps=35, cfg_scale=4.5 + +- **New file**: `frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts` + - Create the graph builder function that assembles: `anima_model_loader` → `anima_text_encoder` (positive + negative) → `anima_denoise` → `anima_latents_to_image` + - Follow pattern of `frontend/web/src/features/nodes/util/graph/generation/buildZImageGraph.ts` but simplified (no ControlNet, no regional prompting, no img2img initially) + +- `frontend/web/src/features/nodes/util/graph/generation/buildGraph.ts` + - Add `case 'anima': return await buildAnimaGraph(arg);` to the dispatch switch + +- Various Zod schema files and node type union files — these will need `'anima'` added wherever `'z-image'` appears, following the same pattern + +- Grid size / scale factor mappings: + - Grid size: **8** (spatial compression is 8×, unlike Flux/Z-Image's 16×) + - Default dimensions: 1024×1024 + +### Step 7: Register Starter Models + +**File to modify:** + +- `invokeai/app/services/model_install/model_install_default.py` + - Add starter model entries for: + - Anima Preview2 transformer: `circlestone-labs/Anima` → `anima-preview2.safetensors` + - Qwen3 0.6B text encoder: `circlestone-labs/Anima` → `qwen_3_06b_base.safetensors` + - QwenImage VAE: `circlestone-labs/Anima` → `qwen_image_vae.safetensors` + - Follow the pattern of Z-Image starter models at lines 803–860 + +### Step 8: Regenerate OpenAPI Schema + +- After all backend changes, run the schema generation script to update the auto-generated OpenAPI schema that the frontend consumes +- This is typically done via `python scripts/generate_openapi_schema.py` + +--- + +## 7. Key Technical Challenges & Decisions + +### 7.1 Cosmos DiT Implementation Strategy + +**Decision needed**: Use diffusers' `CosmosTransformer3DModel` or port ComfyUI's `MiniTrainDIT`? + +| Approach | Pros | Cons | +|----------|------|------| +| **Port MiniTrainDIT from ComfyUI** | Exact checkpoint compatibility, no key remapping, reference implementation | More code to maintain, must port supporting classes (`Block`, `PatchEmbed`, `FinalLayer`, `Timesteps`, etc.) | +| **Use diffusers CosmosTransformer3DModel** | Less custom code, maintained by diffusers team | Key names may differ from checkpoint, needs investigation, may have subtle behavioral differences | + +**Recommendation**: Start with porting from ComfyUI. The checkpoint is in ComfyUI format and guaranteed to load. Diffusers compatibility can be added later as a second format option. + +### 7.2 T5 Tokenizer Handling + +The LLM Adapter needs T5-XXL token IDs but *not* the T5-XXL model. InvokeAI already has `T5TokenizerFast` usage for Flux/SD3 (see `invokeai/backend/flux/text_conditioning.py`). The tokenizer files are small (~2MB) and can be loaded from the `transformers` library cache. + +**Approach**: Load `T5TokenizerFast` in the text encoder invocation using `T5TokenizerFast.from_pretrained("google/t5-v1_1-xxl")` (or bundle tokenizer files). No T5 model weights are needed. + +### 7.3 VAE 3D Tensor Handling + +The `AutoencoderKLQwenImage` is a 3D causal conv VAE that expects `[B, C, T, H, W]` tensors. For single images, `T=1`. The encode/decode calls must: +- **Encode**: `image_tensor.unsqueeze(2)` → `[B, C, 1, H, W]` → VAE encode → latents `[B, 16, 1, H//8, W//8]` +- **Decode**: latents `[B, 16, 1, H//8, W//8]` → VAE decode → `[B, C, 1, H, W]` → `.squeeze(2)` → `[B, C, H, W]` + +Apply Wan 2.1 mean/std normalization (not simple scaling): +```python +latents_mean = torch.tensor([-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, + 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921]) +latents_std = torch.tensor([2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, + 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160]) +``` + +### 7.4 Noise Schedule: Rectified Flow with Shift=3.0 + +The sigma schedule uses the same `time_snr_shift` formula as Flux: +```python +def time_snr_shift(alpha, t): + if alpha == 1.0: + return t + return alpha * t / (1 + (alpha - 1) * t) +``` + +With `alpha=3.0` and `multiplier=1000`. The existing `FlowMatchEulerDiscreteScheduler` in `invokeai/backend/flux/flow_match_schedulers.py` should work, but may need the shift parameter exposed or configured. Check if the scheduler's `shift` parameter matches Anima's 3.0 (Flux uses a different shift value). + +### 7.5 Qwen3 0.6B vs 4B/8B Differences + +The Qwen3 0.6B model has `hidden_size=1024` compared to 4B's 2560 and 8B's 4096. The existing Qwen3 encoder infrastructure in InvokeAI handles 4B and 8B. Adding 0.6B requires: +- New variant enum value +- Updated variant detection (hidden_size → variant mapping) +- The model class (`Qwen3ForCausalLM` from transformers) should work for any size — it's architecture-agnostic + +### 7.6 State Dict Key Mapping (Checkpoint → Model) + +The Anima checkpoint likely uses keys like: +``` +llm_adapter.embed.weight +llm_adapter.blocks.0.self_attn.q_proj.weight +llm_adapter.blocks.0.cross_attn.k_proj.weight +llm_adapter.blocks.0.mlp.0.weight +llm_adapter.out_proj.weight +llm_adapter.norm.weight +llm_adapter.rotary_emb.inv_freq +blocks.0.attn.to_q.weight (Cosmos DiT attention) +blocks.0.attn.to_k.weight +blocks.0.crossattn.to_q.weight (Cosmos DiT cross-attention) +t_embedder.0.freqs (Timestep embedding) +t_embedder.1.linear_1.weight +x_embedder.proj.weight (Patch embedding) +final_layer.linear.weight +``` + +**This key structure must be verified by inspecting the actual checkpoint file.** The loader must correctly instantiate the model architecture and load these keys. If using the ComfyUI `MiniTrainDIT` port, keys should match directly. If using diffusers' `CosmosTransformer3DModel`, a key remapping function will be needed. + +--- + +## 8. File Change Summary + +### New Files (Backend — Python) + +| File | Purpose | +|------|---------| +| `invokeai/backend/anima/__init__.py` | Package init | +| `invokeai/backend/anima/llm_adapter.py` | `LLMAdapter`, `TransformerBlock`, `Attention`, `RotaryEmbedding` | +| `invokeai/backend/anima/anima_transformer.py` | `AnimaTransformer` (MiniTrainDIT + LLMAdapter) or wrapper around `CosmosTransformer3DModel` | +| `invokeai/backend/anima/conditioning_data.py` | `AnimaConditioningData` dataclass | +| `invokeai/backend/model_manager/load/model_loaders/anima.py` | `AnimaCheckpointLoader`, VAE loader | +| `invokeai/app/invocations/anima_model_loader.py` | `AnimaModelLoaderInvocation` | +| `invokeai/app/invocations/anima_text_encoder.py` | `AnimaTextEncoderInvocation` | +| `invokeai/app/invocations/anima_denoise.py` | `AnimaDenoiseInvocation` | +| `invokeai/app/invocations/anima_latents_to_image.py` | `AnimaLatentsToImageInvocation` | + +### Modified Files (Backend — Python) + +| File | Change | +|------|--------| +| `invokeai/backend/model_manager/config/enums.py` | Add `Anima` to `BaseModelType`, `Qwen3_06B` to `Qwen3Variant` | +| `invokeai/backend/model_manager/config/configs/main.py` | Add `Main_Checkpoint_Anima_Config` | +| `invokeai/backend/model_manager/config/configs/qwen3_encoder.py` | Add hidden_size=1024 → `Qwen3_06B` detection | +| `invokeai/backend/model_manager/config/configs/factory.py` | Add Anima configs to `AnyModelConfig` union | +| `invokeai/app/services/model_install/model_install_default.py` | Add Anima starter models | + +### New Files (Frontend — TypeScript) + +| File | Purpose | +|------|---------| +| `frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts` | Anima graph builder | + +### Modified Files (Frontend — TypeScript) + +| File | Change | +|------|--------| +| `frontend/web/src/features/nodes/types/constants.ts` | Add `'anima'` to all base model maps | +| `frontend/web/src/features/nodes/util/graph/generation/buildGraph.ts` | Add `'anima'` case to dispatch switch | +| Default settings hook | Add Anima defaults (1024×1024, CFG 4.5, 35 steps) | +| Zod schemas / node type unions | Add `'anima'` entries | + +--- + +## 9. Out of Scope (Future Work) + +The following features are explicitly deferred to follow-up implementations: + +- **LoRA support** — requires LoRA config classes, patcher logic, and a loader node +- **ControlNet** — requires Cosmos ControlNet support (available in diffusers 0.37.0 as `CosmosControlNetModel`) +- **Inpainting / Outpainting** — requires latent masking and noise injection logic +- **Image-to-Image** — requires VAE encode path + denoising from partial noise +- **Regional Prompting** — requires mask-based attention manipulation +- **IP Adapter** — architecture-specific, if even applicable to Cosmos-based models +- **GGUF / Quantized model support** — can be added later following Z-Image's GGUF loader pattern +- **Diffusers format loading** — if/when an official Anima diffusers pipeline is created diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 339a0ceadb4..65522637a27 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -46,6 +46,7 @@ from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_disk import WorkflowThumbnailFileStorageDisk from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( + AnimaConditioningInfo, BasicConditioningInfo, CogView4ConditioningInfo, ConditioningFieldData, @@ -140,6 +141,7 @@ def initialize( SD3ConditioningInfo, CogView4ConditioningInfo, ZImageConditioningInfo, + AnimaConditioningInfo, ], ephemeral=True, ), diff --git a/invokeai/app/invocations/anima_denoise.py b/invokeai/app/invocations/anima_denoise.py new file mode 100644 index 00000000000..b16ad97b03d --- /dev/null +++ b/invokeai/app/invocations/anima_denoise.py @@ -0,0 +1,351 @@ +"""Anima denoising invocation. + +Implements the rectified flow denoising loop for Anima models: +- CONST model type: denoised = input - output * sigma +- Fixed shift=3.0 via time_snr_shift (same formula as Flux) +- Timestep convention: timestep = sigma * 1.0 (raw sigma, NOT 1-sigma like Z-Image) +- NO v-prediction negation (unlike Z-Image) +- 3D latent space: [B, C, T, H, W] with T=1 for images +- 16 latent channels, 8x spatial compression + +Key differences from Z-Image denoise: +- Anima uses fixed shift=3.0, Z-Image uses dynamic shift based on resolution +- Anima: timestep = sigma (raw), Z-Image: model_t = 1.0 - sigma +- Anima: noise_pred = model_output (CONST), Z-Image: noise_pred = -model_output (v-pred) +- Anima transformer takes (x, timesteps, context, t5xxl_ids, t5xxl_weights) +- Anima uses 3D latents directly, Z-Image converts 4D -> list of 5D +""" + +import inspect +import math +from contextlib import ExitStack +from typing import Callable + +import torch +from diffusers.schedulers.scheduling_utils import SchedulerMixin +from tqdm import tqdm + +from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.fields import ( + AnimaConditioningField, + FieldDescriptions, + Input, + InputField, + LatentsField, +) +from invokeai.app.invocations.model import TransformerField +from invokeai.app.invocations.primitives import LatentsOutput +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.flux.schedulers import ANIMA_SCHEDULER_LABELS, ANIMA_SCHEDULER_MAP, ANIMA_SCHEDULER_NAME_VALUES +from invokeai.backend.model_manager.taxonomy import BaseModelType +from invokeai.backend.stable_diffusion.diffusion.conditioning_data import AnimaConditioningInfo +from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState +from invokeai.backend.util.devices import TorchDevice + +# Anima uses 8x spatial compression (VAE downsamples by 2^3) +ANIMA_LATENT_SCALE_FACTOR = 8 +# Anima uses 16 latent channels +ANIMA_LATENT_CHANNELS = 16 +# Anima uses fixed shift=3.0 for the rectified flow schedule +ANIMA_SHIFT = 3.0 +# Anima uses multiplier=1.0 (raw sigma values as timesteps, per ComfyUI config) +ANIMA_MULTIPLIER = 1.0 + + +def time_snr_shift(alpha: float, t: float) -> float: + """Apply time-SNR shift to a timestep value. + + This is the same formula used by Flux and ComfyUI's ModelSamplingDiscreteFlow. + With alpha=3.0, this shifts the noise schedule to spend more time at higher noise levels. + + Args: + alpha: Shift factor (3.0 for Anima). + t: Timestep value in [0, 1]. + + Returns: + Shifted timestep value. + """ + if alpha == 1.0: + return t + return alpha * t / (1 + (alpha - 1) * t) + + +@invocation( + "anima_denoise", + title="Denoise - Anima", + tags=["image", "anima"], + category="image", + version="1.0.0", + classification=Classification.Prototype, +) +class AnimaDenoiseInvocation(BaseInvocation): + """Run the denoising process with an Anima model. + + Uses rectified flow sampling with shift=3.0 and the Cosmos Predict2 DiT + backbone with integrated LLM Adapter for text conditioning. + """ + + transformer: TransformerField = InputField( + description="Anima transformer model.", input=Input.Connection, title="Transformer" + ) + positive_conditioning: AnimaConditioningField = InputField( + description=FieldDescriptions.positive_cond, input=Input.Connection + ) + negative_conditioning: AnimaConditioningField | None = InputField( + default=None, description=FieldDescriptions.negative_cond, input=Input.Connection + ) + guidance_scale: float = InputField( + default=4.5, + ge=1.0, + description="Guidance scale for classifier-free guidance. Recommended: 4.0-5.0 for Anima.", + title="Guidance Scale", + ) + width: int = InputField(default=1024, multiple_of=8, description="Width of the generated image.") + height: int = InputField(default=1024, multiple_of=8, description="Height of the generated image.") + steps: int = InputField(default=30, gt=0, description="Number of denoising steps. 30 recommended for Anima.") + seed: int = InputField(default=0, description="Randomness seed for reproducibility.") + scheduler: ANIMA_SCHEDULER_NAME_VALUES = InputField( + default="euler", + description="Scheduler (sampler) for the denoising process.", + ui_choice_labels=ANIMA_SCHEDULER_LABELS, + ) + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> LatentsOutput: + latents = self._run_diffusion(context) + latents = latents.detach().to("cpu") + name = context.tensors.save(tensor=latents) + return LatentsOutput.build(latents_name=name, latents=latents, seed=None) + + def _get_noise( + self, + height: int, + width: int, + dtype: torch.dtype, + device: torch.device, + seed: int, + ) -> torch.Tensor: + """Generate initial noise tensor in 3D latent space [B, C, T, H, W].""" + rand_device = "cpu" + return torch.randn( + 1, + ANIMA_LATENT_CHANNELS, + 1, # T=1 for single image + height // ANIMA_LATENT_SCALE_FACTOR, + width // ANIMA_LATENT_SCALE_FACTOR, + device=rand_device, + dtype=torch.float32, + generator=torch.Generator(device=rand_device).manual_seed(seed), + ).to(device=device, dtype=dtype) + + def _get_sigmas(self, num_steps: int) -> list[float]: + """Generate sigma schedule with fixed shift=3.0. + + Uses the same time_snr_shift formula as Flux/ComfyUI but with + a fixed shift factor of 3.0 (no dynamic resolution-based shift). + + Returns: + List of num_steps + 1 sigma values from ~1.0 (noise) to 0.0 (clean). + """ + sigmas = [] + for i in range(num_steps + 1): + t = 1.0 - i / num_steps + sigma = time_snr_shift(ANIMA_SHIFT, t) + sigmas.append(sigma) + return sigmas + + def _load_conditioning( + self, + context: InvocationContext, + cond_field: AnimaConditioningField, + dtype: torch.dtype, + device: torch.device, + ) -> AnimaConditioningInfo: + """Load Anima conditioning data from storage.""" + cond_data = context.conditioning.load(cond_field.conditioning_name) + assert len(cond_data.conditionings) == 1 + cond_info = cond_data.conditionings[0] + assert isinstance(cond_info, AnimaConditioningInfo) + return cond_info.to(dtype=dtype, device=device) + + def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: + device = TorchDevice.choose_torch_device() + inference_dtype = TorchDevice.choose_bfloat16_safe_dtype(device) + + transformer_info = context.models.load(self.transformer.transformer) + + # Load positive conditioning + pos_cond = self._load_conditioning(context, self.positive_conditioning, inference_dtype, device) + pos_qwen3_embeds = pos_cond.qwen3_embeds.unsqueeze(0) # Add batch dim: (1, seq_len, 1024) + pos_t5xxl_ids = pos_cond.t5xxl_ids.unsqueeze(0) # Add batch dim: (1, seq_len) + pos_t5xxl_weights = None + if pos_cond.t5xxl_weights is not None: + pos_t5xxl_weights = pos_cond.t5xxl_weights.unsqueeze(0).unsqueeze(-1) # (1, seq_len, 1) + + # Load negative conditioning if CFG is enabled + do_cfg = not math.isclose(self.guidance_scale, 1.0) and self.negative_conditioning is not None + neg_qwen3_embeds = None + neg_t5xxl_ids = None + neg_t5xxl_weights = None + if do_cfg: + assert self.negative_conditioning is not None + neg_cond = self._load_conditioning(context, self.negative_conditioning, inference_dtype, device) + neg_qwen3_embeds = neg_cond.qwen3_embeds.unsqueeze(0) + neg_t5xxl_ids = neg_cond.t5xxl_ids.unsqueeze(0) + if neg_cond.t5xxl_weights is not None: + neg_t5xxl_weights = neg_cond.t5xxl_weights.unsqueeze(0).unsqueeze(-1) + + # Generate sigma schedule + sigmas = self._get_sigmas(self.steps) + total_steps = len(sigmas) - 1 + + # Generate initial noise (3D latent: [B, C, T, H, W]) + latents = self._get_noise(self.height, self.width, inference_dtype, device, self.seed) + + if total_steps <= 0: + return latents.squeeze(2) # Remove temporal dim for output + + step_callback = self._build_step_callback(context) + + # Initialize diffusers scheduler if not using built-in Euler + scheduler: SchedulerMixin | None = None + use_scheduler = self.scheduler != "euler" + + if use_scheduler: + scheduler_class = ANIMA_SCHEDULER_MAP[self.scheduler] + scheduler = scheduler_class(num_train_timesteps=1000, shift=1.0) + is_lcm = self.scheduler == "lcm" + set_timesteps_sig = inspect.signature(scheduler.set_timesteps) + if not is_lcm and "sigmas" in set_timesteps_sig.parameters: + scheduler.set_timesteps(sigmas=sigmas, device=device) + else: + scheduler.set_timesteps(num_inference_steps=total_steps, device=device) + num_scheduler_steps = len(scheduler.timesteps) + else: + num_scheduler_steps = total_steps + + with ExitStack() as exit_stack: + (cached_weights, transformer) = exit_stack.enter_context(transformer_info.model_on_device()) + + if use_scheduler and scheduler is not None: + # Scheduler-based denoising + user_step = 0 + pbar = tqdm(total=total_steps, desc="Denoising (Anima)") + for step_index in range(num_scheduler_steps): + sched_timestep = scheduler.timesteps[step_index] + sigma_curr = sched_timestep.item() / scheduler.config.num_train_timesteps + + is_heun = hasattr(scheduler, "state_in_first_order") + in_first_order = scheduler.state_in_first_order if is_heun else True + + # Anima timestep convention: timestep = sigma * multiplier (1.0) + timestep = torch.tensor( + [sigma_curr * ANIMA_MULTIPLIER], device=device, dtype=inference_dtype + ).expand(latents.shape[0]) + + # Run transformer (positive) + model_output = transformer( + x=latents.to(transformer.dtype if hasattr(transformer, 'dtype') else inference_dtype), + timesteps=timestep, + context=pos_qwen3_embeds, + t5xxl_ids=pos_t5xxl_ids, + t5xxl_weights=pos_t5xxl_weights, + ) + noise_pred_cond = model_output.float() + + # Apply CFG + if do_cfg and neg_qwen3_embeds is not None: + model_output_uncond = transformer( + x=latents.to(transformer.dtype if hasattr(transformer, 'dtype') else inference_dtype), + timesteps=timestep, + context=neg_qwen3_embeds, + t5xxl_ids=neg_t5xxl_ids, + t5xxl_weights=neg_t5xxl_weights, + ) + noise_pred_uncond = model_output_uncond.float() + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) + else: + noise_pred = noise_pred_cond + + step_output = scheduler.step(model_output=noise_pred, timestep=sched_timestep, sample=latents) + latents = step_output.prev_sample + + if is_heun: + if not in_first_order: + user_step += 1 + if user_step <= total_steps: + pbar.update(1) + step_callback(PipelineIntermediateState( + step=user_step, order=2, total_steps=total_steps, + timestep=int(sigma_curr * 1000), latents=latents.squeeze(2), + )) + else: + user_step += 1 + if user_step <= total_steps: + pbar.update(1) + step_callback(PipelineIntermediateState( + step=user_step, order=1, total_steps=total_steps, + timestep=int(sigma_curr * 1000), latents=latents.squeeze(2), + )) + pbar.close() + else: + # Built-in Euler implementation (default for Anima) + for step_idx in tqdm(range(total_steps), desc="Denoising (Anima)"): + sigma_curr = sigmas[step_idx] + sigma_prev = sigmas[step_idx + 1] + + # Anima timestep: sigma * multiplier (1.0 = raw sigma) + timestep = torch.tensor( + [sigma_curr * ANIMA_MULTIPLIER], device=device, dtype=inference_dtype + ).expand(latents.shape[0]) + + # Run transformer (positive) + model_output = transformer( + x=latents.to(transformer.dtype if hasattr(transformer, 'dtype') else inference_dtype), + timesteps=timestep, + context=pos_qwen3_embeds, + t5xxl_ids=pos_t5xxl_ids, + t5xxl_weights=pos_t5xxl_weights, + ) + + # CONST model: noise_pred = model_output (NO negation, unlike Z-Image v-pred) + noise_pred_cond = model_output.float() + + # Apply CFG + if do_cfg and neg_qwen3_embeds is not None: + model_output_uncond = transformer( + x=latents.to(transformer.dtype if hasattr(transformer, 'dtype') else inference_dtype), + timesteps=timestep, + context=neg_qwen3_embeds, + t5xxl_ids=neg_t5xxl_ids, + t5xxl_weights=neg_t5xxl_weights, + ) + noise_pred_uncond = model_output_uncond.float() + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) + else: + noise_pred = noise_pred_cond + + # Euler step: x_{t-1} = x_t + (sigma_{t-1} - sigma_t) * model_output + latents_dtype = latents.dtype + latents = latents.to(dtype=torch.float32) + latents = latents + (sigma_prev - sigma_curr) * noise_pred + latents = latents.to(dtype=latents_dtype) + + step_callback( + PipelineIntermediateState( + step=step_idx + 1, + order=1, + total_steps=total_steps, + timestep=int(sigma_curr * 1000), + latents=latents.squeeze(2), # Remove temporal dim for preview + ), + ) + + # Remove temporal dimension for output: [B, C, 1, H, W] -> [B, C, H, W] + return latents.squeeze(2) + + def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]: + def step_callback(state: PipelineIntermediateState) -> None: + context.util.sd_step_callback(state, BaseModelType.Anima) + + return step_callback diff --git a/invokeai/app/invocations/anima_latents_to_image.py b/invokeai/app/invocations/anima_latents_to_image.py new file mode 100644 index 00000000000..7eb03ebd766 --- /dev/null +++ b/invokeai/app/invocations/anima_latents_to_image.py @@ -0,0 +1,111 @@ +"""Anima latents-to-image invocation. + +Decodes Anima latents using the QwenImage VAE (AutoencoderKLWan) or +compatible FLUX VAE as fallback. + +Latents from the denoiser are in normalized space (zero-centered). Before +VAE decode, they must be denormalized using the Wan 2.1 per-channel +mean/std: latents = latents * std + mean (matching diffusers WanPipeline +and ComfyUI's Wan21 latent_format.process_out). + +The VAE expects 5D latents [B, C, T, H, W] — for single images, T=1. +""" + +import torch +from diffusers.models.autoencoders import AutoencoderKLWan +from einops import rearrange +from PIL import Image + +from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.fields import ( + FieldDescriptions, + Input, + InputField, + LatentsField, + WithBoard, + WithMetadata, +) +from invokeai.app.invocations.model import VAEField +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.flux.modules.autoencoder import AutoEncoder as FluxAutoEncoder +from invokeai.backend.util.devices import TorchDevice +from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux + + +@invocation( + "anima_l2i", + title="Latents to Image - Anima", + tags=["latents", "image", "vae", "l2i", "anima"], + category="latents", + version="1.0.2", + classification=Classification.Prototype, +) +class AnimaLatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard): + """Generates an image from latents using the Anima VAE. + + Supports the Wan 2.1 QwenImage VAE (AutoencoderKLWan) with explicit + latent denormalization, and FLUX VAE as fallback. + """ + + latents: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection) + vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection) + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> ImageOutput: + latents = context.tensors.load(self.latents.latents_name) + + vae_info = context.models.load(self.vae.vae) + if not isinstance(vae_info.model, (AutoencoderKLWan, FluxAutoEncoder)): + raise TypeError( + f"Expected AutoencoderKLWan or FluxAutoEncoder for Anima VAE, got {type(vae_info.model).__name__}." + ) + + is_flux_vae = isinstance(vae_info.model, FluxAutoEncoder) + + estimated_working_memory = estimate_vae_working_memory_flux( + operation="decode", + image_tensor=latents, + vae=vae_info.model, + ) + + with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae): + context.util.signal_progress("Running Anima VAE decode") + if not isinstance(vae, (AutoencoderKLWan, FluxAutoEncoder)): + raise TypeError(f"Expected AutoencoderKLWan or FluxAutoEncoder, got {type(vae).__name__}.") + + vae_dtype = next(iter(vae.parameters())).dtype + latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype) + + TorchDevice.empty_cache() + + with torch.inference_mode(): + if isinstance(vae, FluxAutoEncoder): + # FLUX VAE handles scaling internally, expects 4D [B, C, H, W] + img = vae.decode(latents) + else: + # Expects 5D latents [B, C, T, H, W] + if latents.ndim == 4: + latents = latents.unsqueeze(2) # [B, C, H, W] -> [B, C, 1, H, W] + + # Denormalize from denoiser space to raw VAE space + # (same as diffusers WanPipeline and ComfyUI Wan21.process_out) + latents_mean = torch.tensor(vae.config.latents_mean).view(1, -1, 1, 1, 1).to(latents) + latents_std = torch.tensor(vae.config.latents_std).view(1, -1, 1, 1, 1).to(latents) + latents = latents * latents_std + latents_mean + + decoded = vae.decode(latents, return_dict=False)[0] + + # Output is 5D [B, C, T, H, W] — squeeze temporal dim + if decoded.ndim == 5: + decoded = decoded.squeeze(2) + img = decoded + + img = img.clamp(-1, 1) + img = rearrange(img[0], "c h w -> h w c") + img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy()) + + TorchDevice.empty_cache() + + image_dto = context.images.save(image=img_pil) + return ImageOutput.build(image_dto) diff --git a/invokeai/app/invocations/anima_model_loader.py b/invokeai/app/invocations/anima_model_loader.py new file mode 100644 index 00000000000..3c134631805 --- /dev/null +++ b/invokeai/app/invocations/anima_model_loader.py @@ -0,0 +1,98 @@ +from typing import Optional + +from invokeai.app.invocations.baseinvocation import ( + BaseInvocation, + BaseInvocationOutput, + Classification, + invocation, + invocation_output, +) +from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField +from invokeai.app.invocations.model import ( + ModelIdentifierField, + Qwen3EncoderField, + TransformerField, + VAEField, +) +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType + + +@invocation_output("anima_model_loader_output") +class AnimaModelLoaderOutput(BaseInvocationOutput): + """Anima model loader output.""" + + transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer") + qwen3_encoder: Qwen3EncoderField = OutputField(description=FieldDescriptions.qwen3_encoder, title="Qwen3 Encoder") + vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE") + + +@invocation( + "anima_model_loader", + title="Main Model - Anima", + tags=["model", "anima"], + category="model", + version="1.0.0", + classification=Classification.Prototype, +) +class AnimaModelLoaderInvocation(BaseInvocation): + """Loads an Anima model, outputting its submodels. + + Anima uses: + - Transformer: Cosmos Predict2 DiT + LLM Adapter (from single-file checkpoint) + - Qwen3 Encoder: Qwen3 0.6B (standalone single-file) + - VAE: AutoencoderKLQwenImage / Wan 2.1 VAE (standalone single-file or FLUX VAE) + """ + + model: ModelIdentifierField = InputField( + description="Anima main model (transformer + LLM adapter).", + input=Input.Direct, + ui_model_base=BaseModelType.Anima, + ui_model_type=ModelType.Main, + title="Transformer", + ) + + vae_model: Optional[ModelIdentifierField] = InputField( + default=None, + description="Standalone VAE model. Anima uses a Wan 2.1 / QwenImage VAE (16-channel). " + "If not provided, a FLUX VAE can be used as a fallback.", + input=Input.Direct, + ui_model_type=ModelType.VAE, + title="VAE", + ) + + qwen3_encoder_model: Optional[ModelIdentifierField] = InputField( + default=None, + description="Standalone Qwen3 0.6B Encoder model.", + input=Input.Direct, + ui_model_type=ModelType.Qwen3Encoder, + title="Qwen3 Encoder", + ) + + def invoke(self, context: InvocationContext) -> AnimaModelLoaderOutput: + # Transformer always comes from the main model + transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer}) + + # VAE + if self.vae_model is not None: + vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE}) + else: + raise ValueError( + "No VAE source provided. Set 'VAE' to a compatible VAE model " + "(Wan 2.1 QwenImage VAE or FLUX VAE)." + ) + + # Qwen3 Encoder + if self.qwen3_encoder_model is not None: + qwen3_tokenizer = self.qwen3_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer}) + qwen3_encoder = self.qwen3_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder}) + else: + raise ValueError( + "No Qwen3 Encoder source provided. Set 'Qwen3 Encoder' to a Qwen3 0.6B model." + ) + + return AnimaModelLoaderOutput( + transformer=TransformerField(transformer=transformer, loras=[]), + qwen3_encoder=Qwen3EncoderField(tokenizer=qwen3_tokenizer, text_encoder=qwen3_encoder), + vae=VAEField(vae=vae), + ) diff --git a/invokeai/app/invocations/anima_text_encoder.py b/invokeai/app/invocations/anima_text_encoder.py new file mode 100644 index 00000000000..df724a66d11 --- /dev/null +++ b/invokeai/app/invocations/anima_text_encoder.py @@ -0,0 +1,189 @@ +"""Anima text encoder invocation. + +Encodes text using the dual-conditioning pipeline: +1. Qwen3 0.6B: Produces hidden states (last layer) +2. T5-XXL Tokenizer: Produces token IDs only (no T5 model needed) + +Both outputs are stored together in AnimaConditioningInfo and used by +the LLM Adapter inside the transformer during denoising. + +Key differences from Z-Image text encoder: +- Anima uses Qwen3 0.6B (base model, NOT instruct) — no chat template +- Anima additionally tokenizes with T5-XXL tokenizer to get token IDs +- Qwen3 output includes all positions (including padding) to match ComfyUI +""" + +from contextlib import ExitStack + +import torch +from transformers import PreTrainedModel, PreTrainedTokenizerBase, T5TokenizerFast + +from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.fields import ( + AnimaConditioningField, + FieldDescriptions, + Input, + InputField, + UIComponent, +) +from invokeai.app.invocations.model import Qwen3EncoderField +from invokeai.app.invocations.primitives import AnimaConditioningOutput +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( + AnimaConditioningInfo, + ConditioningFieldData, +) + +# Qwen3 max sequence length — ComfyUI's SDClipModel uses max_length=77 for Qwen3. +# We match this to ensure the LLM Adapter's cross-attention sees the same number of +# source positions (including padding) as during training. +QWEN3_MAX_SEQ_LEN = 77 + +# T5-XXL max sequence length for token IDs +T5_MAX_SEQ_LEN = 512 + +# T5-XXL tokenizer source (same vocabulary regardless of T5 model variant) +T5_TOKENIZER_NAME = "google/t5-v1_1-xxl" + + +@invocation( + "anima_text_encoder", + title="Prompt - Anima", + tags=["prompt", "conditioning", "anima"], + category="conditioning", + version="1.0.1", + classification=Classification.Prototype, +) +class AnimaTextEncoderInvocation(BaseInvocation): + """Encodes and preps a prompt for an Anima image. + + Uses Qwen3 0.6B for hidden state extraction and T5-XXL tokenizer for + token IDs (no T5 model weights needed). Both are combined by the + LLM Adapter inside the Anima transformer during denoising. + """ + + prompt: str = InputField(description="Text prompt to encode.", ui_component=UIComponent.Textarea) + qwen3_encoder: Qwen3EncoderField = InputField( + title="Qwen3 Encoder", + description=FieldDescriptions.qwen3_encoder, + input=Input.Connection, + ) + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> AnimaConditioningOutput: + qwen3_embeds, t5xxl_ids, t5xxl_weights = self._encode_prompt(context) + + # Move to CPU for storage + qwen3_embeds = qwen3_embeds.detach().to("cpu") + t5xxl_ids = t5xxl_ids.detach().to("cpu") + t5xxl_weights = t5xxl_weights.detach().to("cpu") if t5xxl_weights is not None else None + + conditioning_data = ConditioningFieldData( + conditionings=[ + AnimaConditioningInfo( + qwen3_embeds=qwen3_embeds, + t5xxl_ids=t5xxl_ids, + t5xxl_weights=t5xxl_weights, + ) + ] + ) + conditioning_name = context.conditioning.save(conditioning_data) + return AnimaConditioningOutput( + conditioning=AnimaConditioningField(conditioning_name=conditioning_name) + ) + + def _encode_prompt( + self, + context: InvocationContext, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None]: + """Encode prompt using Qwen3 0.6B and T5-XXL tokenizer. + + Returns: + Tuple of (qwen3_embeds, t5xxl_ids, t5xxl_weights). + - qwen3_embeds: Shape (max_seq_len, 1024) — includes all positions (including padding) + to match ComfyUI's SDClipModel behavior. + - t5xxl_ids: Shape (seq_len,) — T5-XXL token IDs (unpadded). + - t5xxl_weights: None (uniform weights for now). + """ + prompt = self.prompt + + # --- Step 1: Encode with Qwen3 0.6B --- + text_encoder_info = context.models.load(self.qwen3_encoder.text_encoder) + tokenizer_info = context.models.load(self.qwen3_encoder.tokenizer) + + with ExitStack() as exit_stack: + (_, text_encoder) = exit_stack.enter_context(text_encoder_info.model_on_device()) + (_, tokenizer) = exit_stack.enter_context(tokenizer_info.model_on_device()) + + device = text_encoder.device + + if not isinstance(text_encoder, PreTrainedModel): + raise TypeError( + f"Expected PreTrainedModel for text encoder, got {type(text_encoder).__name__}." + ) + if not isinstance(tokenizer, PreTrainedTokenizerBase): + raise TypeError( + f"Expected PreTrainedTokenizerBase for tokenizer, got {type(tokenizer).__name__}." + ) + + context.util.signal_progress("Running Qwen3 0.6B text encoder") + + # Anima uses base Qwen3 (not instruct) — tokenize directly, no chat template + # ComfyUI uses max_length=77 (SDClipModel default) for Qwen3 + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=QWEN3_MAX_SEQ_LEN, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + if not isinstance(text_input_ids, torch.Tensor) or not isinstance(attention_mask, torch.Tensor): + raise TypeError("Tokenizer returned unexpected types.") + + # Check for truncation + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, QWEN3_MAX_SEQ_LEN - 1 : -1]) + context.logger.warning( + f"Prompt truncated at {QWEN3_MAX_SEQ_LEN} tokens. Removed: {removed_text}" + ) + + # Get last hidden state from Qwen3 (ComfyUI uses layer="last") + # Pass attention mask so padding tokens don't attend to each other, + # but keep ALL positions in the output (including padding) to match + # ComfyUI's SDClipModel which returns full padded sequences. + prompt_mask = attention_mask.to(device).bool() + outputs = text_encoder( + text_input_ids.to(device), + attention_mask=prompt_mask, + output_hidden_states=True, + ) + + if not hasattr(outputs, "hidden_states") or outputs.hidden_states is None: + raise RuntimeError("Text encoder did not return hidden_states.") + if len(outputs.hidden_states) < 1: + raise RuntimeError(f"Expected at least 1 hidden state, got {len(outputs.hidden_states)}.") + + # Use last hidden state — keep all positions (including padding) + # ComfyUI's SDClipModel returns all positions without filtering. + qwen3_embeds = outputs.hidden_states[-1][0] # Shape: (QWEN3_MAX_SEQ_LEN, 1024) + + # --- Step 2: Tokenize with T5-XXL tokenizer (IDs only, no model) --- + context.util.signal_progress("Tokenizing with T5-XXL") + t5_tokenizer = T5TokenizerFast.from_pretrained(T5_TOKENIZER_NAME) + t5_tokens = t5_tokenizer( + prompt, + padding=False, + truncation=True, + max_length=T5_MAX_SEQ_LEN, + return_tensors="pt", + ) + t5xxl_ids = t5_tokens.input_ids[0] # Shape: (seq_len,) + + return qwen3_embeds, t5xxl_ids, None diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py index cca09a059d5..d888b8927b5 100644 --- a/invokeai/app/invocations/fields.py +++ b/invokeai/app/invocations/fields.py @@ -340,6 +340,16 @@ class ZImageConditioningField(BaseModel): ) +class AnimaConditioningField(BaseModel): + """An Anima conditioning tensor primitive value. + + Anima conditioning contains Qwen3 0.6B hidden states and T5-XXL token IDs, + which are combined by the LLM Adapter inside the transformer. + """ + + conditioning_name: str = Field(description="The name of conditioning tensor") + + class ConditioningField(BaseModel): """A conditioning tensor primitive value""" diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index dcb1fc6a45f..2f404d16ba8 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -12,6 +12,7 @@ ) from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.app.invocations.fields import ( + AnimaConditioningField, BoundingBoxField, CogView4ConditioningField, ColorField, @@ -473,6 +474,17 @@ def build(cls, conditioning_name: str) -> "ZImageConditioningOutput": return cls(conditioning=ZImageConditioningField(conditioning_name=conditioning_name)) +@invocation_output("anima_conditioning_output") +class AnimaConditioningOutput(BaseInvocationOutput): + """Base class for nodes that output an Anima text conditioning tensor.""" + + conditioning: AnimaConditioningField = OutputField(description=FieldDescriptions.cond) + + @classmethod + def build(cls, conditioning_name: str) -> "AnimaConditioningOutput": + return cls(conditioning=AnimaConditioningField(conditioning_name=conditioning_name)) + + @invocation_output("conditioning_output") class ConditioningOutput(BaseInvocationOutput): """Base class for nodes that output a single conditioning tensor""" diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py index 990fdd51d8b..0e2faeca391 100644 --- a/invokeai/app/util/step_callback.py +++ b/invokeai/app/util/step_callback.py @@ -133,6 +133,29 @@ FLUX2_LATENT_RGB_BIAS = [-0.0329, -0.0718, -0.0851] +# Anima uses Wan 2.1 VAE with 16 latent channels. +# Factors from ComfyUI: https://github.com/Comfy-Org/ComfyUI/blob/main/comfy/latent_formats.py +ANIMA_LATENT_RGB_FACTORS = [ + [-0.1299, -0.1692, 0.2932], + [0.0671, 0.0406, 0.0442], + [0.3568, 0.2548, 0.1747], + [0.0372, 0.2344, 0.1420], + [0.0313, 0.0189, -0.0328], + [0.0296, -0.0956, -0.0665], + [-0.3477, -0.4059, -0.2925], + [0.0166, 0.1902, 0.1975], + [-0.0412, 0.0267, -0.1364], + [-0.1293, 0.0740, 0.1636], + [0.0680, 0.3019, 0.1128], + [0.0032, 0.0581, 0.0639], + [-0.1251, 0.0927, 0.1699], + [0.0060, -0.0633, 0.0005], + [0.3477, 0.2275, 0.2950], + [0.1984, 0.0913, 0.1861], +] + +ANIMA_LATENT_RGB_BIAS = [-0.1835, -0.0868, -0.3360] + def sample_to_lowres_estimated_image( samples: torch.Tensor, @@ -217,6 +240,10 @@ def diffusion_step_callback( elif base_model == BaseModelType.ZImage: # Z-Image uses FLUX-compatible VAE with 16 latent channels latent_rgb_factors = FLUX_LATENT_RGB_FACTORS + elif base_model == BaseModelType.Anima: + # Anima uses Wan 2.1 VAE with 16 latent channels + latent_rgb_factors = ANIMA_LATENT_RGB_FACTORS + latent_rgb_bias = ANIMA_LATENT_RGB_BIAS else: raise ValueError(f"Unsupported base model: {base_model}") diff --git a/invokeai/backend/anima/__init__.py b/invokeai/backend/anima/__init__.py new file mode 100644 index 00000000000..380f67416b7 --- /dev/null +++ b/invokeai/backend/anima/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2024, Thhe InvokeAI Development Team +"""Anima model backend module. + +Anima is a 2B-parameter anime-focused text-to-image model built on NVIDIA's +Cosmos Predict2 DiT architecture with a custom LLM Adapter that bridges Qwen3 +0.6B text encoder outputs to the DiT backbone. +""" diff --git a/invokeai/backend/anima/anima_transformer.py b/invokeai/backend/anima/anima_transformer.py new file mode 100644 index 00000000000..e8779582b5c --- /dev/null +++ b/invokeai/backend/anima/anima_transformer.py @@ -0,0 +1,1058 @@ +"""Anima transformer model: Cosmos Predict2 MiniTrainDIT + LLM Adapter. + +Ported from the ComfyUI implementation: +- comfy/ldm/cosmos/predict2.py (MiniTrainDIT backbone) +- comfy/ldm/anima/model.py (LLMAdapter + Anima wrapper) + +The Anima architecture combines: +1. MiniTrainDIT: A Cosmos Predict2 DiT backbone with 28 blocks, 2048-dim hidden state, + and 3D RoPE positional embeddings. +2. LLMAdapter: A 6-layer cross-attention transformer that fuses Qwen3 0.6B hidden states + with learned T5-XXL token embeddings to produce conditioning for the DiT. + +References: +- https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/cosmos/predict2.py +- https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py +- https://github.com/nvidia-cosmos/cosmos-predict2 +""" + +import logging +import math +from typing import Optional, Tuple + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat +from einops.layers.torch import Rearrange +from torch import nn + +logger = logging.getLogger(__name__) + + +# ============================================================================ +# Positional Embeddings (from comfy/ldm/cosmos/position_embedding.py) +# ============================================================================ + + +class VideoRopePosition3DEmb(nn.Module): + """3D Rotary Position Embedding for video/image transformers. + + Generates rotary embeddings with separate frequency components for + height, width, and temporal dimensions. + """ + + def __init__( + self, + *, + head_dim: int, + len_h: int, + len_w: int, + len_t: int, + base_fps: int = 24, + h_extrapolation_ratio: float = 1.0, + w_extrapolation_ratio: float = 1.0, + t_extrapolation_ratio: float = 1.0, + enable_fps_modulation: bool = True, + device: Optional[torch.device] = None, + **kwargs, + ): + super().__init__() + self.base_fps = base_fps + self.max_h = len_h + self.max_w = len_w + self.enable_fps_modulation = enable_fps_modulation + + dim = head_dim + dim_h = dim // 6 * 2 + dim_w = dim_h + dim_t = dim - 2 * dim_h + assert dim == dim_h + dim_w + dim_t, f"bad dim: {dim} != {dim_h} + {dim_w} + {dim_t}" + + self.register_buffer( + "dim_spatial_range", + torch.arange(0, dim_h, 2, device=device)[: (dim_h // 2)].float() / dim_h, + persistent=False, + ) + self.register_buffer( + "dim_temporal_range", + torch.arange(0, dim_t, 2, device=device)[: (dim_t // 2)].float() / dim_t, + persistent=False, + ) + + self.h_ntk_factor = h_extrapolation_ratio ** (dim_h / (dim_h - 2)) + self.w_ntk_factor = w_extrapolation_ratio ** (dim_w / (dim_w - 2)) + self.t_ntk_factor = t_extrapolation_ratio ** (dim_t / (dim_t - 2)) + + def forward( + self, + x_B_T_H_W_C: torch.Tensor, + fps: Optional[torch.Tensor] = None, + device: Optional[torch.device] = None, + ) -> torch.Tensor: + return self.generate_embeddings(x_B_T_H_W_C.shape, fps=fps, device=device) + + def generate_embeddings( + self, + B_T_H_W_C: torch.Size, + fps: Optional[torch.Tensor] = None, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ) -> torch.Tensor: + h_theta = 10000.0 * self.h_ntk_factor + w_theta = 10000.0 * self.w_ntk_factor + t_theta = 10000.0 * self.t_ntk_factor + + h_spatial_freqs = 1.0 / (h_theta ** self.dim_spatial_range.to(device=device)) + w_spatial_freqs = 1.0 / (w_theta ** self.dim_spatial_range.to(device=device)) + temporal_freqs = 1.0 / (t_theta ** self.dim_temporal_range.to(device=device)) + + B, T, H, W, _ = B_T_H_W_C + seq = torch.arange(max(H, W, T), dtype=torch.float, device=device) + + half_emb_h = torch.outer(seq[:H].to(device=device), h_spatial_freqs) + half_emb_w = torch.outer(seq[:W].to(device=device), w_spatial_freqs) + + if fps is None or self.enable_fps_modulation is False: + half_emb_t = torch.outer(seq[:T].to(device=device), temporal_freqs) + else: + half_emb_t = torch.outer(seq[:T].to(device=device) / fps * self.base_fps, temporal_freqs) + + half_emb_h = torch.stack( + [torch.cos(half_emb_h), -torch.sin(half_emb_h), torch.sin(half_emb_h), torch.cos(half_emb_h)], dim=-1 + ) + half_emb_w = torch.stack( + [torch.cos(half_emb_w), -torch.sin(half_emb_w), torch.sin(half_emb_w), torch.cos(half_emb_w)], dim=-1 + ) + half_emb_t = torch.stack( + [torch.cos(half_emb_t), -torch.sin(half_emb_t), torch.sin(half_emb_t), torch.cos(half_emb_t)], dim=-1 + ) + + em_T_H_W_D = torch.cat( + [ + repeat(half_emb_t, "t d x -> t h w d x", h=H, w=W), + repeat(half_emb_h, "h d x -> t h w d x", t=T, w=W), + repeat(half_emb_w, "w d x -> t h w d x", t=T, h=H), + ], + dim=-2, + ) + + return rearrange(em_T_H_W_D, "t h w d (i j) -> (t h w) d i j", i=2, j=2).float() + + +def _normalize(x: torch.Tensor, dim: Optional[list[int]] = None, eps: float = 0) -> torch.Tensor: + if dim is None: + dim = list(range(1, x.ndim)) + norm = torch.linalg.vector_norm(x, dim=dim, keepdim=True, dtype=torch.float32) + norm = torch.add(eps, norm, alpha=math.sqrt(norm.numel() / x.numel())) + return x / norm.to(x.dtype) + + +class LearnablePosEmbAxis(nn.Module): + """Learnable per-axis positional embeddings.""" + + def __init__( + self, + *, + model_channels: int, + len_h: int, + len_w: int, + len_t: int, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + **kwargs, + ): + super().__init__() + self.pos_emb_h = nn.Parameter(torch.empty(len_h, model_channels, device=device, dtype=dtype)) + self.pos_emb_w = nn.Parameter(torch.empty(len_w, model_channels, device=device, dtype=dtype)) + self.pos_emb_t = nn.Parameter(torch.empty(len_t, model_channels, device=device, dtype=dtype)) + + def forward( + self, + x_B_T_H_W_C: torch.Tensor, + fps: Optional[torch.Tensor] = None, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ) -> torch.Tensor: + return self.generate_embeddings(x_B_T_H_W_C.shape, device=device, dtype=dtype) + + def generate_embeddings( + self, + B_T_H_W_C: torch.Size, + fps: Optional[torch.Tensor] = None, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ) -> torch.Tensor: + B, T, H, W, _ = B_T_H_W_C + emb_h_H = self.pos_emb_h[:H].to(device=device, dtype=dtype) + emb_w_W = self.pos_emb_w[:W].to(device=device, dtype=dtype) + emb_t_T = self.pos_emb_t[:T].to(device=device, dtype=dtype) + emb = ( + repeat(emb_t_T, "t d -> b t h w d", b=B, h=H, w=W) + + repeat(emb_h_H, "h d -> b t h w d", b=B, t=T, w=W) + + repeat(emb_w_W, "w d -> b t h w d", b=B, t=T, h=H) + ) + return _normalize(emb, dim=-1, eps=1e-6) + + +# ============================================================================ +# Cosmos Predict2 MiniTrainDIT (from comfy/ldm/cosmos/predict2.py) +# ============================================================================ + + +def apply_rotary_pos_emb_cosmos(t: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor: + """Apply rotary position embeddings in Cosmos format (2x2 rotation matrices).""" + t_ = t.reshape(*t.shape[:-1], 2, -1).movedim(-2, -1).unsqueeze(-2).float() + t_out = freqs[..., 0] * t_[..., 0] + freqs[..., 1] * t_[..., 1] + t_out = t_out.movedim(-1, -2).reshape(*t.shape).type_as(t) + return t_out + + +class GPT2FeedForward(nn.Module): + def __init__(self, d_model: int, d_ff: int) -> None: + super().__init__() + self.activation = nn.GELU() + self.layer1 = nn.Linear(d_model, d_ff, bias=False) + self.layer2 = nn.Linear(d_ff, d_model, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.layer2(self.activation(self.layer1(x))) + + +class CosmosAttention(nn.Module): + """Multi-head attention for the Cosmos DiT backbone. + + Supports both self-attention and cross-attention with QK normalization + and rotary position embeddings. + """ + + def __init__( + self, + query_dim: int, + context_dim: Optional[int] = None, + n_heads: int = 8, + head_dim: int = 64, + dropout: float = 0.0, + ) -> None: + super().__init__() + self.is_selfattn = context_dim is None + context_dim = query_dim if context_dim is None else context_dim + inner_dim = head_dim * n_heads + + self.n_heads = n_heads + self.head_dim = head_dim + + self.q_proj = nn.Linear(query_dim, inner_dim, bias=False) + self.q_norm = nn.RMSNorm(head_dim, eps=1e-6) + + self.k_proj = nn.Linear(context_dim, inner_dim, bias=False) + self.k_norm = nn.RMSNorm(head_dim, eps=1e-6) + + self.v_proj = nn.Linear(context_dim, inner_dim, bias=False) + self.v_norm = nn.Identity() + + self.output_proj = nn.Linear(inner_dim, query_dim, bias=False) + self.output_dropout = nn.Dropout(dropout) if dropout > 1e-4 else nn.Identity() + + def forward( + self, + x: torch.Tensor, + context: Optional[torch.Tensor] = None, + rope_emb: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + q = self.q_proj(x) + context = x if context is None else context + k = self.k_proj(context) + v = self.v_proj(context) + q, k, v = (rearrange(t, "b ... (h d) -> b ... h d", h=self.n_heads, d=self.head_dim) for t in (q, k, v)) + + q = self.q_norm(q) + k = self.k_norm(k) + v = self.v_norm(v) + + if self.is_selfattn and rope_emb is not None: + q = apply_rotary_pos_emb_cosmos(q, rope_emb) + k = apply_rotary_pos_emb_cosmos(k, rope_emb) + + # Reshape for scaled_dot_product_attention: (B, heads, seq, dim) + in_q_shape = q.shape + in_k_shape = k.shape + q = rearrange(q, "b ... h d -> b h ... d").reshape(in_q_shape[0], in_q_shape[-2], -1, in_q_shape[-1]) + k = rearrange(k, "b ... h d -> b h ... d").reshape(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) + v = rearrange(v, "b ... h d -> b h ... d").reshape(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) + + result = F.scaled_dot_product_attention(q, k, v) + result = rearrange(result, "b h s d -> b s (h d)") + return self.output_dropout(self.output_proj(result)) + + +class Timesteps(nn.Module): + """Sinusoidal timestep embeddings.""" + + def __init__(self, num_channels: int): + super().__init__() + self.num_channels = num_channels + + def forward(self, timesteps_B_T: torch.Tensor) -> torch.Tensor: + assert timesteps_B_T.ndim == 2 + timesteps = timesteps_B_T.flatten().float() + half_dim = self.num_channels // 2 + exponent = -math.log(10000) * torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) / half_dim + emb = timesteps[:, None].float() * torch.exp(exponent)[None, :] + emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=-1) + return rearrange(emb, "(b t) d -> b t d", b=timesteps_B_T.shape[0], t=timesteps_B_T.shape[1]) + + +class TimestepEmbedding(nn.Module): + """Projects sinusoidal timestep embeddings to model dimension.""" + + def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False): + super().__init__() + self.use_adaln_lora = use_adaln_lora + self.linear_1 = nn.Linear(in_features, out_features, bias=not use_adaln_lora) + self.activation = nn.SiLU() + if use_adaln_lora: + self.linear_2 = nn.Linear(out_features, 3 * out_features, bias=False) + else: + self.linear_2 = nn.Linear(out_features, out_features, bias=False) + + def forward(self, sample: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + emb = self.linear_2(self.activation(self.linear_1(sample))) + if self.use_adaln_lora: + return sample, emb + return emb, None + + +class PatchEmbed(nn.Module): + """Patchify input tensor via rearrange + linear projection.""" + + def __init__( + self, + spatial_patch_size: int, + temporal_patch_size: int, + in_channels: int = 3, + out_channels: int = 768, + ): + super().__init__() + self.spatial_patch_size = spatial_patch_size + self.temporal_patch_size = temporal_patch_size + self.proj = nn.Sequential( + Rearrange( + "b c (t r) (h m) (w n) -> b t h w (c r m n)", + r=temporal_patch_size, + m=spatial_patch_size, + n=spatial_patch_size, + ), + nn.Linear( + in_channels * spatial_patch_size * spatial_patch_size * temporal_patch_size, + out_channels, + bias=False, + ), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + assert x.dim() == 5 + return self.proj(x) + + +class FinalLayer(nn.Module): + """Final AdaLN-modulated output projection.""" + + def __init__( + self, + hidden_size: int, + spatial_patch_size: int, + temporal_patch_size: int, + out_channels: int, + use_adaln_lora: bool = False, + adaln_lora_dim: int = 256, + ): + super().__init__() + self.layer_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.linear = nn.Linear( + hidden_size, spatial_patch_size * spatial_patch_size * temporal_patch_size * out_channels, bias=False + ) + self.hidden_size = hidden_size + self.use_adaln_lora = use_adaln_lora + + if use_adaln_lora: + self.adaln_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(hidden_size, adaln_lora_dim, bias=False), + nn.Linear(adaln_lora_dim, 2 * hidden_size, bias=False), + ) + else: + self.adaln_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(hidden_size, 2 * hidden_size, bias=False), + ) + + def forward( + self, + x_B_T_H_W_D: torch.Tensor, + emb_B_T_D: torch.Tensor, + adaln_lora_B_T_3D: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if self.use_adaln_lora: + assert adaln_lora_B_T_3D is not None + shift, scale = ( + self.adaln_modulation(emb_B_T_D) + adaln_lora_B_T_3D[:, :, : 2 * self.hidden_size] + ).chunk(2, dim=-1) + else: + shift, scale = self.adaln_modulation(emb_B_T_D).chunk(2, dim=-1) + + shift = rearrange(shift, "b t d -> b t 1 1 d") + scale = rearrange(scale, "b t d -> b t 1 1 d") + + x_B_T_H_W_D = self.layer_norm(x_B_T_H_W_D) * (1 + scale) + shift + return self.linear(x_B_T_H_W_D) + + +class DiTBlock(nn.Module): + """Cosmos DiT transformer block with self-attention, cross-attention, and MLP. + + Each component uses AdaLN (Adaptive Layer Normalization) modulation from + the timestep embedding. + """ + + def __init__( + self, + x_dim: int, + context_dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + use_adaln_lora: bool = False, + adaln_lora_dim: int = 256, + ): + super().__init__() + self.x_dim = x_dim + self.use_adaln_lora = use_adaln_lora + + self.layer_norm_self_attn = nn.LayerNorm(x_dim, elementwise_affine=False, eps=1e-6) + self.self_attn = CosmosAttention(x_dim, None, num_heads, x_dim // num_heads) + + self.layer_norm_cross_attn = nn.LayerNorm(x_dim, elementwise_affine=False, eps=1e-6) + self.cross_attn = CosmosAttention(x_dim, context_dim, num_heads, x_dim // num_heads) + + self.layer_norm_mlp = nn.LayerNorm(x_dim, elementwise_affine=False, eps=1e-6) + self.mlp = GPT2FeedForward(x_dim, int(x_dim * mlp_ratio)) + + # AdaLN modulation layers (shift, scale, gate for each of 3 components) + if use_adaln_lora: + self.adaln_modulation_self_attn = nn.Sequential( + nn.SiLU(), nn.Linear(x_dim, adaln_lora_dim, bias=False), nn.Linear(adaln_lora_dim, 3 * x_dim, bias=False) + ) + self.adaln_modulation_cross_attn = nn.Sequential( + nn.SiLU(), nn.Linear(x_dim, adaln_lora_dim, bias=False), nn.Linear(adaln_lora_dim, 3 * x_dim, bias=False) + ) + self.adaln_modulation_mlp = nn.Sequential( + nn.SiLU(), nn.Linear(x_dim, adaln_lora_dim, bias=False), nn.Linear(adaln_lora_dim, 3 * x_dim, bias=False) + ) + else: + self.adaln_modulation_self_attn = nn.Sequential(nn.SiLU(), nn.Linear(x_dim, 3 * x_dim, bias=False)) + self.adaln_modulation_cross_attn = nn.Sequential(nn.SiLU(), nn.Linear(x_dim, 3 * x_dim, bias=False)) + self.adaln_modulation_mlp = nn.Sequential(nn.SiLU(), nn.Linear(x_dim, 3 * x_dim, bias=False)) + + def forward( + self, + x_B_T_H_W_D: torch.Tensor, + emb_B_T_D: torch.Tensor, + crossattn_emb: torch.Tensor, + rope_emb_L_1_1_D: Optional[torch.Tensor] = None, + adaln_lora_B_T_3D: Optional[torch.Tensor] = None, + extra_per_block_pos_emb: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + residual_dtype = x_B_T_H_W_D.dtype + compute_dtype = emb_B_T_D.dtype + + if extra_per_block_pos_emb is not None: + x_B_T_H_W_D = x_B_T_H_W_D + extra_per_block_pos_emb + + # Compute AdaLN modulations + if self.use_adaln_lora: + assert adaln_lora_B_T_3D is not None + shift_sa, scale_sa, gate_sa = ( + self.adaln_modulation_self_attn(emb_B_T_D) + adaln_lora_B_T_3D + ).chunk(3, dim=-1) + shift_ca, scale_ca, gate_ca = ( + self.adaln_modulation_cross_attn(emb_B_T_D) + adaln_lora_B_T_3D + ).chunk(3, dim=-1) + shift_mlp, scale_mlp, gate_mlp = ( + self.adaln_modulation_mlp(emb_B_T_D) + adaln_lora_B_T_3D + ).chunk(3, dim=-1) + else: + shift_sa, scale_sa, gate_sa = self.adaln_modulation_self_attn(emb_B_T_D).chunk(3, dim=-1) + shift_ca, scale_ca, gate_ca = self.adaln_modulation_cross_attn(emb_B_T_D).chunk(3, dim=-1) + shift_mlp, scale_mlp, gate_mlp = self.adaln_modulation_mlp(emb_B_T_D).chunk(3, dim=-1) + + # Reshape for broadcasting: (B, T, D) -> (B, T, 1, 1, D) + shift_sa, scale_sa, gate_sa = (rearrange(t, "b t d -> b t 1 1 d") for t in (shift_sa, scale_sa, gate_sa)) + shift_ca, scale_ca, gate_ca = (rearrange(t, "b t d -> b t 1 1 d") for t in (shift_ca, scale_ca, gate_ca)) + shift_mlp, scale_mlp, gate_mlp = (rearrange(t, "b t d -> b t 1 1 d") for t in (shift_mlp, scale_mlp, gate_mlp)) + + B, T, H, W, D = x_B_T_H_W_D.shape + + def _adaln(x: torch.Tensor, norm: nn.Module, scale: torch.Tensor, shift: torch.Tensor) -> torch.Tensor: + return norm(x) * (1 + scale) + shift + + # Self-attention + normed = _adaln(x_B_T_H_W_D, self.layer_norm_self_attn, scale_sa, shift_sa) + result = rearrange( + self.self_attn(rearrange(normed.to(compute_dtype), "b t h w d -> b (t h w) d"), None, rope_emb=rope_emb_L_1_1_D), + "b (t h w) d -> b t h w d", t=T, h=H, w=W, + ) + x_B_T_H_W_D = x_B_T_H_W_D + gate_sa.to(residual_dtype) * result.to(residual_dtype) + + # Cross-attention + normed = _adaln(x_B_T_H_W_D, self.layer_norm_cross_attn, scale_ca, shift_ca) + result = rearrange( + self.cross_attn(rearrange(normed.to(compute_dtype), "b t h w d -> b (t h w) d"), crossattn_emb, rope_emb=rope_emb_L_1_1_D), + "b (t h w) d -> b t h w d", t=T, h=H, w=W, + ) + x_B_T_H_W_D = result.to(residual_dtype) * gate_ca.to(residual_dtype) + x_B_T_H_W_D + + # MLP + normed = _adaln(x_B_T_H_W_D, self.layer_norm_mlp, scale_mlp, shift_mlp) + result = self.mlp(normed.to(compute_dtype)) + x_B_T_H_W_D = x_B_T_H_W_D + gate_mlp.to(residual_dtype) * result.to(residual_dtype) + + return x_B_T_H_W_D + + +class MiniTrainDIT(nn.Module): + """Cosmos Predict2 DiT backbone for video/image generation. + + This is the core transformer architecture that Anima extends. It processes + 3D latent tensors (B, C, T, H, W) with patch embedding, positional encoding, + and adaptive layer normalization. + + Args: + max_img_h: Maximum image height in pixels. + max_img_w: Maximum image width in pixels. + max_frames: Maximum number of video frames. + in_channels: Number of input latent channels. + out_channels: Number of output channels. + patch_spatial: Spatial patch size. + patch_temporal: Temporal patch size. + concat_padding_mask: Whether to concatenate a padding mask channel. + model_channels: Hidden dimension of the transformer. + num_blocks: Number of DiT blocks. + num_heads: Number of attention heads. + mlp_ratio: MLP expansion ratio. + crossattn_emb_channels: Cross-attention context dimension. + use_adaln_lora: Whether to use AdaLN-LoRA. + adaln_lora_dim: AdaLN-LoRA bottleneck dimension. + extra_per_block_abs_pos_emb: Whether to use extra learnable positional embeddings. + """ + + def __init__( + self, + max_img_h: int = 240, + max_img_w: int = 240, + max_frames: int = 1, + in_channels: int = 16, + out_channels: int = 16, + patch_spatial: int = 2, + patch_temporal: int = 1, + concat_padding_mask: bool = True, + model_channels: int = 2048, + num_blocks: int = 28, + num_heads: int = 16, + mlp_ratio: float = 4.0, + crossattn_emb_channels: int = 1024, + pos_emb_cls: str = "rope3d", + pos_emb_learnable: bool = False, + pos_emb_interpolation: str = "crop", + min_fps: int = 1, + max_fps: int = 30, + use_adaln_lora: bool = False, + adaln_lora_dim: int = 256, + rope_h_extrapolation_ratio: float = 1.0, + rope_w_extrapolation_ratio: float = 1.0, + rope_t_extrapolation_ratio: float = 1.0, + extra_per_block_abs_pos_emb: bool = False, + extra_h_extrapolation_ratio: float = 1.0, + extra_w_extrapolation_ratio: float = 1.0, + extra_t_extrapolation_ratio: float = 1.0, + rope_enable_fps_modulation: bool = True, + image_model: Optional[str] = None, + ) -> None: + super().__init__() + self.max_img_h = max_img_h + self.max_img_w = max_img_w + self.max_frames = max_frames + self.in_channels = in_channels + self.out_channels = out_channels + self.patch_spatial = patch_spatial + self.patch_temporal = patch_temporal + self.num_heads = num_heads + self.num_blocks = num_blocks + self.model_channels = model_channels + self.concat_padding_mask = concat_padding_mask + self.pos_emb_cls = pos_emb_cls + self.extra_per_block_abs_pos_emb = extra_per_block_abs_pos_emb + + # Positional embeddings + rope_kwargs = dict( + head_dim=model_channels // num_heads, + len_h=max_img_h // patch_spatial, + len_w=max_img_w // patch_spatial, + len_t=max_frames // patch_temporal, + max_fps=max_fps, + min_fps=min_fps, + h_extrapolation_ratio=rope_h_extrapolation_ratio, + w_extrapolation_ratio=rope_w_extrapolation_ratio, + t_extrapolation_ratio=rope_t_extrapolation_ratio, + enable_fps_modulation=rope_enable_fps_modulation, + ) + self.pos_embedder = VideoRopePosition3DEmb(**rope_kwargs) + + if extra_per_block_abs_pos_emb: + self.extra_pos_embedder = LearnablePosEmbAxis( + model_channels=model_channels, + len_h=max_img_h // patch_spatial, + len_w=max_img_w // patch_spatial, + len_t=max_frames // patch_temporal, + ) + + self.use_adaln_lora = use_adaln_lora + self.adaln_lora_dim = adaln_lora_dim + + # Timestep embedding + self.t_embedder = nn.Sequential( + Timesteps(model_channels), + TimestepEmbedding(model_channels, model_channels, use_adaln_lora=use_adaln_lora), + ) + self.t_embedding_norm = nn.RMSNorm(model_channels, eps=1e-6) + + # Patch embedding + embed_in_channels = in_channels + 1 if concat_padding_mask else in_channels + self.x_embedder = PatchEmbed( + spatial_patch_size=patch_spatial, + temporal_patch_size=patch_temporal, + in_channels=embed_in_channels, + out_channels=model_channels, + ) + + # Transformer blocks + self.blocks = nn.ModuleList([ + DiTBlock( + x_dim=model_channels, + context_dim=crossattn_emb_channels, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + use_adaln_lora=use_adaln_lora, + adaln_lora_dim=adaln_lora_dim, + ) + for _ in range(num_blocks) + ]) + + # Final output layer + self.final_layer = FinalLayer( + hidden_size=model_channels, + spatial_patch_size=patch_spatial, + temporal_patch_size=patch_temporal, + out_channels=out_channels, + use_adaln_lora=use_adaln_lora, + adaln_lora_dim=adaln_lora_dim, + ) + + def _pad_to_patch_size(self, x: torch.Tensor) -> torch.Tensor: + """Pad input tensor so dimensions are divisible by patch sizes.""" + _, _, T, H, W = x.shape + pad_t = (self.patch_temporal - T % self.patch_temporal) % self.patch_temporal + pad_h = (self.patch_spatial - H % self.patch_spatial) % self.patch_spatial + pad_w = (self.patch_spatial - W % self.patch_spatial) % self.patch_spatial + if pad_t > 0 or pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, pad_w, 0, pad_h, 0, pad_t)) + return x + + def prepare_embedded_sequence( + self, + x_B_C_T_H_W: torch.Tensor, + fps: Optional[torch.Tensor] = None, + padding_mask: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]: + if self.concat_padding_mask: + if padding_mask is None: + padding_mask = torch.zeros( + x_B_C_T_H_W.shape[0], 1, x_B_C_T_H_W.shape[3], x_B_C_T_H_W.shape[4], + dtype=x_B_C_T_H_W.dtype, device=x_B_C_T_H_W.device, + ) + x_B_C_T_H_W = torch.cat( + [x_B_C_T_H_W, padding_mask.unsqueeze(1).repeat(1, 1, x_B_C_T_H_W.shape[2], 1, 1)], dim=1 + ) + + x_B_T_H_W_D = self.x_embedder(x_B_C_T_H_W) + + extra_pos_emb = None + if self.extra_per_block_abs_pos_emb: + extra_pos_emb = self.extra_pos_embedder(x_B_T_H_W_D, fps=fps, device=x_B_C_T_H_W.device, dtype=x_B_C_T_H_W.dtype) + + if "rope" in self.pos_emb_cls.lower(): + return x_B_T_H_W_D, self.pos_embedder(x_B_T_H_W_D, fps=fps, device=x_B_C_T_H_W.device), extra_pos_emb + + return x_B_T_H_W_D, None, extra_pos_emb + + def unpatchify(self, x_B_T_H_W_M: torch.Tensor) -> torch.Tensor: + return rearrange( + x_B_T_H_W_M, + "B T H W (p1 p2 t C) -> B C (T t) (H p1) (W p2)", + p1=self.patch_spatial, + p2=self.patch_spatial, + t=self.patch_temporal, + ) + + def forward( + self, + x: torch.Tensor, + timesteps: torch.Tensor, + context: torch.Tensor, + fps: Optional[torch.Tensor] = None, + padding_mask: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + orig_shape = list(x.shape) + x = self._pad_to_patch_size(x) + + x_B_T_H_W_D, rope_emb_L_1_1_D, extra_pos_emb = self.prepare_embedded_sequence(x, fps=fps, padding_mask=padding_mask) + + if timesteps.ndim == 1: + timesteps = timesteps.unsqueeze(1) + t_emb, adaln_lora = self.t_embedder[1](self.t_embedder[0](timesteps).to(x_B_T_H_W_D.dtype)) + t_emb = self.t_embedding_norm(t_emb) + + block_kwargs = { + "rope_emb_L_1_1_D": rope_emb_L_1_1_D.unsqueeze(1).unsqueeze(0) if rope_emb_L_1_1_D is not None else None, + "adaln_lora_B_T_3D": adaln_lora, + "extra_per_block_pos_emb": extra_pos_emb, + } + + # Keep residual stream in fp32 for numerical stability with fp16 compute + if x_B_T_H_W_D.dtype == torch.float16: + x_B_T_H_W_D = x_B_T_H_W_D.float() + + for block in self.blocks: + x_B_T_H_W_D = block(x_B_T_H_W_D, t_emb, context, **block_kwargs) + + x_out = self.final_layer(x_B_T_H_W_D.to(context.dtype), t_emb, adaln_lora_B_T_3D=adaln_lora) + x_out = self.unpatchify(x_out)[:, :, : orig_shape[-3], : orig_shape[-2], : orig_shape[-1]] + return x_out + + +# ============================================================================ +# LLM Adapter (from comfy/ldm/anima/model.py) +# ============================================================================ + + +def _rotate_half(x: torch.Tensor) -> torch.Tensor: + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def _apply_rotary_pos_emb_llm( + x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, unsqueeze_dim: int = 1 +) -> torch.Tensor: + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + return (x * cos) + (_rotate_half(x) * sin) + + +class LLMAdapterRotaryEmbedding(nn.Module): + """Rotary position embedding for the LLM Adapter's attention layers.""" + + def __init__(self, head_dim: int): + super().__init__() + self.rope_theta = 10000 + inv_freq = 1.0 / (self.rope_theta ** (torch.arange(0, head_dim, 2, dtype=torch.int64).float() / head_dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + @torch.no_grad() + def forward(self, x: torch.Tensor, position_ids: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) + position_ids_expanded = position_ids[:, None, :].float() + + device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +class LLMAdapterAttention(nn.Module): + """Attention for the LLM Adapter's transformer blocks. + + Supports both self-attention and cross-attention with separate rotary + position embeddings for query and key sequences. + """ + + def __init__(self, query_dim: int, context_dim: int, n_heads: int, head_dim: int): + super().__init__() + inner_dim = head_dim * n_heads + self.n_heads = n_heads + self.head_dim = head_dim + + self.q_proj = nn.Linear(query_dim, inner_dim, bias=False) + self.q_norm = nn.RMSNorm(head_dim, eps=1e-6) + self.k_proj = nn.Linear(context_dim, inner_dim, bias=False) + self.k_norm = nn.RMSNorm(head_dim, eps=1e-6) + self.v_proj = nn.Linear(context_dim, inner_dim, bias=False) + self.o_proj = nn.Linear(inner_dim, query_dim, bias=False) + + def forward( + self, + x: torch.Tensor, + mask: Optional[torch.Tensor] = None, + context: Optional[torch.Tensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + position_embeddings_context: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> torch.Tensor: + context = x if context is None else context + input_shape = x.shape[:-1] + q_shape = (*input_shape, self.n_heads, self.head_dim) + context_shape = context.shape[:-1] + kv_shape = (*context_shape, self.n_heads, self.head_dim) + + query_states = self.q_norm(self.q_proj(x).view(q_shape)).transpose(1, 2) + key_states = self.k_norm(self.k_proj(context).view(kv_shape)).transpose(1, 2) + value_states = self.v_proj(context).view(kv_shape).transpose(1, 2) + + if position_embeddings is not None: + assert position_embeddings_context is not None + cos, sin = position_embeddings + query_states = _apply_rotary_pos_emb_llm(query_states, cos, sin) + cos, sin = position_embeddings_context + key_states = _apply_rotary_pos_emb_llm(key_states, cos, sin) + + attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=mask) + attn_output = attn_output.transpose(1, 2).reshape(*input_shape, -1).contiguous() + return self.o_proj(attn_output) + + +class LLMAdapterTransformerBlock(nn.Module): + """Single transformer block in the LLM Adapter. + + Each block contains: + - Optional self-attention on the target (T5 embedding) sequence + - Cross-attention: target queries attend to source (Qwen3) keys/values + - MLP with GELU activation + """ + + def __init__( + self, + source_dim: int, + model_dim: int, + num_heads: int = 16, + mlp_ratio: float = 4.0, + use_self_attn: bool = False, + ): + super().__init__() + self.use_self_attn = use_self_attn + head_dim = model_dim // num_heads + + if self.use_self_attn: + self.norm_self_attn = nn.RMSNorm(model_dim, eps=1e-6) + self.self_attn = LLMAdapterAttention(model_dim, model_dim, num_heads, head_dim) + + self.norm_cross_attn = nn.RMSNorm(model_dim, eps=1e-6) + self.cross_attn = LLMAdapterAttention(model_dim, source_dim, num_heads, head_dim) + + self.norm_mlp = nn.RMSNorm(model_dim, eps=1e-6) + self.mlp = nn.Sequential( + nn.Linear(model_dim, int(model_dim * mlp_ratio)), + nn.GELU(), + nn.Linear(int(model_dim * mlp_ratio), model_dim), + ) + + def forward( + self, + x: torch.Tensor, + context: torch.Tensor, + target_attention_mask: Optional[torch.Tensor] = None, + source_attention_mask: Optional[torch.Tensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + position_embeddings_context: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> torch.Tensor: + if self.use_self_attn: + normed = self.norm_self_attn(x) + attn_out = self.self_attn( + normed, mask=target_attention_mask, + position_embeddings=position_embeddings, position_embeddings_context=position_embeddings, + ) + x = x + attn_out + + normed = self.norm_cross_attn(x) + attn_out = self.cross_attn( + normed, mask=source_attention_mask, context=context, + position_embeddings=position_embeddings, position_embeddings_context=position_embeddings_context, + ) + x = x + attn_out + + x = x + self.mlp(self.norm_mlp(x)) + return x + + +class LLMAdapter(nn.Module): + """LLM Adapter: bridges Qwen3 hidden states and T5-XXL token embeddings. + + This is the key custom component in Anima. It takes: + - source_hidden_states: Qwen3 0.6B hidden states (dim=1024) + - target_input_ids: T5-XXL token IDs + + And produces conditioning embeddings for the Cosmos DiT via: + 1. Embedding T5 token IDs via learned Embedding(32128, 1024) + 2. Cross-attending T5 embeddings to Qwen3 hidden states through 6 transformer layers + 3. Projecting and normalizing the output + + The output is zero-padded to 512 tokens for the DiT cross-attention. + + Args: + source_dim: Dimension of source (Qwen3) hidden states. + target_dim: Dimension of target (T5) embeddings. + model_dim: Internal model dimension. + num_layers: Number of transformer layers. + num_heads: Number of attention heads. + use_self_attn: Whether to use self-attention in transformer blocks. + """ + + def __init__( + self, + source_dim: int = 1024, + target_dim: int = 1024, + model_dim: int = 1024, + num_layers: int = 6, + num_heads: int = 16, + use_self_attn: bool = True, + ): + super().__init__() + self.embed = nn.Embedding(32128, target_dim) + if model_dim != target_dim: + self.in_proj = nn.Linear(target_dim, model_dim) + else: + self.in_proj = nn.Identity() + self.rotary_emb = LLMAdapterRotaryEmbedding(model_dim // num_heads) + self.blocks = nn.ModuleList([ + LLMAdapterTransformerBlock(source_dim, model_dim, num_heads=num_heads, use_self_attn=use_self_attn) + for _ in range(num_layers) + ]) + self.out_proj = nn.Linear(model_dim, target_dim) + self.norm = nn.RMSNorm(target_dim, eps=1e-6) + + def forward( + self, + source_hidden_states: torch.Tensor, + target_input_ids: torch.Tensor, + target_attention_mask: Optional[torch.Tensor] = None, + source_attention_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if target_attention_mask is not None: + target_attention_mask = target_attention_mask.to(torch.bool) + if target_attention_mask.ndim == 2: + target_attention_mask = target_attention_mask.unsqueeze(1).unsqueeze(1) + + if source_attention_mask is not None: + source_attention_mask = source_attention_mask.to(torch.bool) + if source_attention_mask.ndim == 2: + source_attention_mask = source_attention_mask.unsqueeze(1).unsqueeze(1) + + context = source_hidden_states + # Standard nn.Embedding doesn't support out_dtype; cast after forward + x = self.in_proj(self.embed(target_input_ids).to(dtype=context.dtype)) + + position_ids = torch.arange(x.shape[1], device=x.device).unsqueeze(0) + position_ids_context = torch.arange(context.shape[1], device=x.device).unsqueeze(0) + position_embeddings = self.rotary_emb(x, position_ids) + position_embeddings_context = self.rotary_emb(x, position_ids_context) + + for block in self.blocks: + x = block( + x, context, + target_attention_mask=target_attention_mask, + source_attention_mask=source_attention_mask, + position_embeddings=position_embeddings, + position_embeddings_context=position_embeddings_context, + ) + return self.norm(self.out_proj(x)) + + +# ============================================================================ +# Anima: MiniTrainDIT + LLMAdapter (from comfy/ldm/anima/model.py) +# ============================================================================ + + +class AnimaTransformer(MiniTrainDIT): + """Anima transformer: Cosmos Predict2 DiT with integrated LLM Adapter. + + Extends MiniTrainDIT by adding the LLMAdapter component that preprocesses + text embeddings before they are fed to the DiT cross-attention layers. + + The forward pass: + 1. Runs the LLM Adapter to produce conditioning from Qwen3 hidden states + T5 token IDs + 2. Zero-pads the conditioning to 512 tokens + 3. Passes the conditioning to MiniTrainDIT's cross-attention + + Default configuration for Anima: + - model_channels=2048, num_blocks=28, num_heads=16 + - crossattn_emb_channels=1024, patch_spatial=2, patch_temporal=1 + - in_channels=16, out_channels=16 + - use_adaln_lora=True, adaln_lora_dim=256 + - extra_per_block_abs_pos_emb=True + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.llm_adapter = LLMAdapter() + + def preprocess_text_embeds( + self, + text_embeds: torch.Tensor, + text_ids: Optional[torch.Tensor], + t5xxl_weights: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """Run the LLM Adapter to produce conditioning for the DiT. + + Args: + text_embeds: Qwen3 hidden states. Shape: (batch, seq_len, 1024). + text_ids: T5-XXL token IDs. Shape: (batch, seq_len). If None, returns text_embeds directly. + t5xxl_weights: Optional per-token weights. Shape: (batch, seq_len, 1). + + Returns: + Conditioning tensor. Shape: (batch, 512, 1024), zero-padded if needed. + """ + if text_ids is not None: + out = self.llm_adapter(text_embeds, text_ids) + if t5xxl_weights is not None: + out = out * t5xxl_weights + if out.shape[1] < 512: + out = F.pad(out, (0, 0, 0, 512 - out.shape[1])) + return out + else: + return text_embeds + + def forward( + self, + x: torch.Tensor, + timesteps: torch.Tensor, + context: torch.Tensor, + t5xxl_ids: Optional[torch.Tensor] = None, + t5xxl_weights: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """Forward pass with LLM Adapter preprocessing. + + Args: + x: Input latent tensor. Shape: (B, C, T, H, W). + timesteps: Timestep values. Shape: (B,) or (B, T). + context: Qwen3 hidden states. Shape: (B, seq_len, 1024). + t5xxl_ids: T5-XXL token IDs. Shape: (B, seq_len). + t5xxl_weights: Per-token weights. Shape: (B, seq_len, 1). + + Returns: + Denoised output. Shape: (B, C, T, H, W). + """ + if t5xxl_ids is not None: + context = self.preprocess_text_embeds(context, t5xxl_ids, t5xxl_weights=t5xxl_weights) + return super().forward(x, timesteps, context, **kwargs) diff --git a/invokeai/backend/anima/conditioning_data.py b/invokeai/backend/anima/conditioning_data.py new file mode 100644 index 00000000000..067b1d29768 --- /dev/null +++ b/invokeai/backend/anima/conditioning_data.py @@ -0,0 +1,30 @@ +"""Anima text conditioning data structures. + +Anima uses a dual-conditioning scheme: +- Qwen3 0.6B hidden states (continuous embeddings) +- T5-XXL token IDs (discrete IDs, embedded by the LLM Adapter inside the transformer) + +Both are produced by the text encoder invocation and stored together. +""" + +from dataclasses import dataclass + +import torch + + +@dataclass +class AnimaTextConditioning: + """Anima text conditioning with Qwen3 hidden states and T5-XXL token IDs. + + Attributes: + qwen3_embeds: Text embeddings from Qwen3 0.6B encoder. + Shape: (seq_len, hidden_size) where hidden_size=1024. + t5xxl_ids: T5-XXL token IDs for the same prompt. + Shape: (seq_len,). + t5xxl_weights: Per-token weights for prompt weighting. + Shape: (seq_len,). Defaults to all ones if not provided. + """ + + qwen3_embeds: torch.Tensor + t5xxl_ids: torch.Tensor + t5xxl_weights: torch.Tensor | None = None diff --git a/invokeai/backend/flux/schedulers.py b/invokeai/backend/flux/schedulers.py index e5a8a7137c2..05e6bb085f0 100644 --- a/invokeai/backend/flux/schedulers.py +++ b/invokeai/backend/flux/schedulers.py @@ -60,3 +60,23 @@ if _HAS_LCM: ZIMAGE_SCHEDULER_MAP["lcm"] = FlowMatchLCMScheduler + + +# Anima scheduler types (same Flow Matching schedulers as Flux/Z-Image) +# Anima uses rectified flow with shift=3.0 and multiplier=1000. +# Recommended: 30 steps with Euler, CFG 4-5. +ANIMA_SCHEDULER_NAME_VALUES = Literal["euler", "heun", "lcm"] + +ANIMA_SCHEDULER_LABELS: dict[str, str] = { + "euler": "Euler", + "heun": "Heun (2nd order)", + "lcm": "LCM", +} + +ANIMA_SCHEDULER_MAP: dict[str, Type[SchedulerMixin]] = { + "euler": FlowMatchEulerDiscreteScheduler, + "heun": FlowMatchHeunDiscreteScheduler, +} + +if _HAS_LCM: + ANIMA_SCHEDULER_MAP["lcm"] = FlowMatchLCMScheduler diff --git a/invokeai/backend/model_manager/configs/factory.py b/invokeai/backend/model_manager/configs/factory.py index 7702d4a5d9b..e5b096a931f 100644 --- a/invokeai/backend/model_manager/configs/factory.py +++ b/invokeai/backend/model_manager/configs/factory.py @@ -58,6 +58,7 @@ ) from invokeai.backend.model_manager.configs.main import ( Main_BnBNF4_FLUX_Config, + Main_Checkpoint_Anima_Config, Main_Checkpoint_Flux2_Config, Main_Checkpoint_FLUX_Config, Main_Checkpoint_SD1_Config, @@ -101,6 +102,7 @@ ) from invokeai.backend.model_manager.configs.unknown import Unknown_Config from invokeai.backend.model_manager.configs.vae import ( + VAE_Checkpoint_Anima_Config, VAE_Checkpoint_Flux2_Config, VAE_Checkpoint_FLUX_Config, VAE_Checkpoint_SD1_Config, @@ -170,6 +172,7 @@ Annotated[Main_Checkpoint_Flux2_Config, Main_Checkpoint_Flux2_Config.get_tag()], Annotated[Main_Checkpoint_FLUX_Config, Main_Checkpoint_FLUX_Config.get_tag()], Annotated[Main_Checkpoint_ZImage_Config, Main_Checkpoint_ZImage_Config.get_tag()], + Annotated[Main_Checkpoint_Anima_Config, Main_Checkpoint_Anima_Config.get_tag()], # Main (Pipeline) - quantized formats # IMPORTANT: FLUX.2 must be checked BEFORE FLUX.1 because FLUX.2 has specific validation # that will reject FLUX.1 models, but FLUX.1 validation may incorrectly match FLUX.2 models @@ -183,6 +186,7 @@ Annotated[VAE_Checkpoint_SDXL_Config, VAE_Checkpoint_SDXL_Config.get_tag()], Annotated[VAE_Checkpoint_FLUX_Config, VAE_Checkpoint_FLUX_Config.get_tag()], Annotated[VAE_Checkpoint_Flux2_Config, VAE_Checkpoint_Flux2_Config.get_tag()], + Annotated[VAE_Checkpoint_Anima_Config, VAE_Checkpoint_Anima_Config.get_tag()], # VAE - diffusers format Annotated[VAE_Diffusers_SD1_Config, VAE_Diffusers_SD1_Config.get_tag()], Annotated[VAE_Diffusers_SDXL_Config, VAE_Diffusers_SDXL_Config.get_tag()], diff --git a/invokeai/backend/model_manager/configs/main.py b/invokeai/backend/model_manager/configs/main.py index 6f737ceb92d..b46451dbd77 100644 --- a/invokeai/backend/model_manager/configs/main.py +++ b/invokeai/backend/model_manager/configs/main.py @@ -76,6 +76,8 @@ def from_base( else: # Turbo (distilled) uses fewer steps, no CFG return cls(steps=9, cfg_scale=1.0, width=1024, height=1024) + case BaseModelType.Anima: + return cls(steps=35, cfg_scale=4.5, width=1024, height=1024) case BaseModelType.Flux2: # Different defaults based on variant if variant == Flux2VariantType.Klein9BBase: @@ -1084,6 +1086,38 @@ def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) - ) +def _has_anima_keys(state_dict: dict[str | int, Any]) -> bool: + """Check if state dict contains Anima model keys. + + Anima models are identified by the presence of `llm_adapter` keys + (unique to Anima - the LLM Adapter that bridges Qwen3 text encoder to the Cosmos DiT) + alongside Cosmos Predict2 DiT keys (blocks, t_embedder, x_embedder, final_layer). + + The checkpoint keys may have a `net.` prefix (e.g. `net.llm_adapter.`, `net.blocks.`). + """ + has_llm_adapter = False + has_cosmos_dit = False + + # Cosmos DiT key prefixes — support both with and without `net.` prefix + cosmos_prefixes = ( + "blocks.", "t_embedder.", "x_embedder.", "final_layer.", + "net.blocks.", "net.t_embedder.", "net.x_embedder.", "net.final_layer.", + ) + + for key in state_dict.keys(): + if isinstance(key, int): + continue + if key.startswith("llm_adapter.") or key.startswith("net.llm_adapter."): + has_llm_adapter = True + for prefix in cosmos_prefixes: + if key.startswith(prefix): + has_cosmos_dit = True + if has_llm_adapter and has_cosmos_dit: + return True + + return False + + class Main_Diffusers_ZImage_Config(Diffusers_Config_Base, Main_Config_Base, Config_Base): """Model config for Z-Image diffusers models (Z-Image-Turbo, Z-Image-Base).""" @@ -1199,3 +1233,30 @@ def _validate_looks_like_gguf_quantized(cls, mod: ModelOnDisk) -> None: has_ggml_tensors = _has_ggml_tensors(mod.load_state_dict()) if not has_ggml_tensors: raise NotAMatchError("state dict does not look like GGUF quantized") + + +class Main_Checkpoint_Anima_Config(Checkpoint_Config_Base, Main_Config_Base, Config_Base): + """Model config for Anima single-file checkpoint models (safetensors). + + Anima is built on NVIDIA Cosmos Predict2 DiT with a custom LLM Adapter + that bridges Qwen3 0.6B text encoder outputs to the DiT. + """ + + base: Literal[BaseModelType.Anima] = Field(default=BaseModelType.Anima) + format: Literal[ModelFormat.Checkpoint] = Field(default=ModelFormat.Checkpoint) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_anima_model(mod) + + return cls(**override_fields) + + @classmethod + def _validate_looks_like_anima_model(cls, mod: ModelOnDisk) -> None: + has_anima_keys = _has_anima_keys(mod.load_state_dict()) + if not has_anima_keys: + raise NotAMatchError("state dict does not look like an Anima model") diff --git a/invokeai/backend/model_manager/configs/qwen3_encoder.py b/invokeai/backend/model_manager/configs/qwen3_encoder.py index 2e24fee9185..82cf3b62c8f 100644 --- a/invokeai/backend/model_manager/configs/qwen3_encoder.py +++ b/invokeai/backend/model_manager/configs/qwen3_encoder.py @@ -47,9 +47,10 @@ def _has_ggml_tensors(state_dict: dict[str | int, Any]) -> bool: def _get_qwen3_variant_from_state_dict(state_dict: dict[str | int, Any]) -> Optional[Qwen3VariantType]: - """Determine Qwen3 variant (4B vs 8B) from state dict based on hidden_size. + """Determine Qwen3 variant (0.6B, 4B, or 8B) from state dict based on hidden_size. The hidden_size can be determined from the embed_tokens.weight tensor shape: + - Qwen3 0.6B: hidden_size = 1024 - Qwen3 4B: hidden_size = 2560 - Qwen3 8B: hidden_size = 4096 @@ -57,6 +58,7 @@ def _get_qwen3_variant_from_state_dict(state_dict: dict[str | int, Any]) -> Opti For PyTorch format, the key is 'model.embed_tokens.weight'. """ # Hidden size thresholds + QWEN3_06B_HIDDEN_SIZE = 1024 QWEN3_4B_HIDDEN_SIZE = 2560 QWEN3_8B_HIDDEN_SIZE = 4096 @@ -91,7 +93,9 @@ def _get_qwen3_variant_from_state_dict(state_dict: dict[str | int, Any]) -> Opti return None # Determine variant based on hidden_size - if hidden_size == QWEN3_4B_HIDDEN_SIZE: + if hidden_size == QWEN3_06B_HIDDEN_SIZE: + return Qwen3VariantType.Qwen3_06B + elif hidden_size == QWEN3_4B_HIDDEN_SIZE: return Qwen3VariantType.Qwen3_4B elif hidden_size == QWEN3_8B_HIDDEN_SIZE: return Qwen3VariantType.Qwen3_8B @@ -206,6 +210,7 @@ def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) - @classmethod def _get_variant_from_config(cls, config_path) -> Qwen3VariantType: """Get variant from config.json based on hidden_size.""" + QWEN3_06B_HIDDEN_SIZE = 1024 QWEN3_4B_HIDDEN_SIZE = 2560 QWEN3_8B_HIDDEN_SIZE = 4096 @@ -217,6 +222,8 @@ def _get_variant_from_config(cls, config_path) -> Qwen3VariantType: return Qwen3VariantType.Qwen3_8B elif hidden_size == QWEN3_4B_HIDDEN_SIZE: return Qwen3VariantType.Qwen3_4B + elif hidden_size == QWEN3_06B_HIDDEN_SIZE: + return Qwen3VariantType.Qwen3_06B else: # Default to 4B for unknown sizes return Qwen3VariantType.Qwen3_4B diff --git a/invokeai/backend/model_manager/configs/vae.py b/invokeai/backend/model_manager/configs/vae.py index cc079cb9aae..ce26a94a6e9 100644 --- a/invokeai/backend/model_manager/configs/vae.py +++ b/invokeai/backend/model_manager/configs/vae.py @@ -175,6 +175,43 @@ def _validate_is_flux2_vae(cls, mod: ModelOnDisk) -> None: raise NotAMatchError("state dict does not look like a FLUX.2 VAE") +def _has_anima_vae_keys(state_dict: dict[str | int, Any]) -> bool: + """Check if state dict looks like an Anima QwenImage VAE (AutoencoderKLQwenImage). + + The Anima VAE has a distinctive structure with: + - encoder.downsamples.* (instead of encoder.down_blocks) + - decoder.upsamples.* (instead of decoder.up_blocks) + - decoder.head.* / decoder.middle.* + - Top-level conv1/conv2 weights + """ + required_prefixes = { + "encoder.downsamples.", + "decoder.upsamples.", + "decoder.middle.", + } + return all(any(str(k).startswith(prefix) for k in state_dict) for prefix in required_prefixes) + + +class VAE_Checkpoint_Anima_Config(Checkpoint_Config_Base, Config_Base): + """Model config for Anima QwenImage VAE checkpoint models (AutoencoderKLQwenImage).""" + + type: Literal[ModelType.VAE] = Field(default=ModelType.VAE) + format: Literal[ModelFormat.Checkpoint] = Field(default=ModelFormat.Checkpoint) + base: Literal[BaseModelType.Anima] = Field(default=BaseModelType.Anima) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + state_dict = mod.load_state_dict() + if not _has_anima_vae_keys(state_dict): + raise NotAMatchError("state dict does not look like an Anima QwenImage VAE") + + return cls(**override_fields) + + class VAE_Diffusers_Config_Base(Diffusers_Config_Base): """Model config for standalone VAE models (diffusers version).""" diff --git a/invokeai/backend/model_manager/load/model_loaders/anima.py b/invokeai/backend/model_manager/load/model_loaders/anima.py new file mode 100644 index 00000000000..e0e8ded029c --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/anima.py @@ -0,0 +1,127 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Class for Anima model loading in InvokeAI.""" + +from pathlib import Path +from typing import Optional + +import accelerate +import torch + +from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.configs.main import Main_Checkpoint_Anima_Config +from invokeai.backend.model_manager.load.load_default import ModelLoader +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry +from invokeai.backend.model_manager.taxonomy import ( + AnyModel, + BaseModelType, + ModelFormat, + ModelType, + SubModelType, +) +from invokeai.backend.util.devices import TorchDevice + + +@ModelLoaderRegistry.register(base=BaseModelType.Anima, type=ModelType.Main, format=ModelFormat.Checkpoint) +class AnimaCheckpointModel(ModelLoader): + """Class to load Anima transformer models from single-file checkpoints. + + The Anima checkpoint contains both the MiniTrainDIT backbone and the LLM Adapter + under a shared `net.` prefix. The loader strips this prefix and instantiates + the AnimaTransformer model with the correct architecture parameters. + """ + + def _load_model( + self, + config: AnyModelConfig, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + if not isinstance(config, Checkpoint_Config_Base): + raise ValueError("Only CheckpointConfigBase models are currently supported here.") + + match submodel_type: + case SubModelType.Transformer: + return self._load_from_singlefile(config) + + raise ValueError( + f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}" + ) + + def _load_from_singlefile( + self, + config: AnyModelConfig, + ) -> AnyModel: + from safetensors.torch import load_file + + from invokeai.backend.anima.anima_transformer import AnimaTransformer + + if not isinstance(config, Main_Checkpoint_Anima_Config): + raise TypeError( + f"Expected Main_Checkpoint_Anima_Config, got {type(config).__name__}. " + "Model configuration type mismatch." + ) + model_path = Path(config.path) + + # Load the state dict from safetensors + sd = load_file(model_path) + + # Strip the `net.` prefix that all Anima checkpoint keys have + # e.g., "net.blocks.0.self_attn.q_proj.weight" -> "blocks.0.self_attn.q_proj.weight" + prefix_to_strip = None + for prefix in ["net."]: + if any(k.startswith(prefix) for k in sd.keys() if isinstance(k, str)): + prefix_to_strip = prefix + break + + if prefix_to_strip: + stripped_sd = {} + for key, value in sd.items(): + if isinstance(key, str) and key.startswith(prefix_to_strip): + stripped_sd[key[len(prefix_to_strip) :]] = value + else: + stripped_sd[key] = value + sd = stripped_sd + + # Create an empty AnimaTransformer with Anima's default architecture parameters + with accelerate.init_empty_weights(): + model = AnimaTransformer( + max_img_h=240, + max_img_w=240, + max_frames=1, + in_channels=16, + out_channels=16, + patch_spatial=2, + patch_temporal=1, + concat_padding_mask=True, + model_channels=2048, + num_blocks=28, + num_heads=16, + mlp_ratio=4.0, + crossattn_emb_channels=1024, + pos_emb_cls="rope3d", + use_adaln_lora=True, + adaln_lora_dim=256, + extra_per_block_abs_pos_emb=False, + image_model="anima", + ) + + # Determine safe dtype + target_device = TorchDevice.choose_torch_device() + model_dtype = TorchDevice.choose_bfloat16_safe_dtype(target_device) + + # Handle memory management + new_sd_size = sum(ten.nelement() * model_dtype.itemsize for ten in sd.values()) + self._ram_cache.make_room(new_sd_size) + + # Convert to target dtype (skip non-float tensors like embedding indices) + for k in sd.keys(): + if sd[k].is_floating_point(): + sd[k] = sd[k].to(model_dtype) + + # Filter out rotary embedding inv_freq buffers that are regenerated at runtime + keys_to_remove = [k for k in sd.keys() if k.endswith(".inv_freq")] + for k in keys_to_remove: + del sd[k] + + model.load_state_dict(sd, assign=True, strict=False) + return model diff --git a/invokeai/backend/model_manager/load/model_loaders/vae.py b/invokeai/backend/model_manager/load/model_loaders/vae.py index e91903ccdad..db26e8c6547 100644 --- a/invokeai/backend/model_manager/load/model_loaders/vae.py +++ b/invokeai/backend/model_manager/load/model_loaders/vae.py @@ -6,7 +6,7 @@ from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL from invokeai.backend.model_manager.configs.factory import AnyModelConfig -from invokeai.backend.model_manager.configs.vae import VAE_Checkpoint_Config_Base +from invokeai.backend.model_manager.configs.vae import VAE_Checkpoint_Anima_Config, VAE_Checkpoint_Config_Base from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader from invokeai.backend.model_manager.taxonomy import ( @@ -28,7 +28,14 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if isinstance(config, VAE_Checkpoint_Config_Base): + if isinstance(config, VAE_Checkpoint_Anima_Config): + from diffusers.models.autoencoders import AutoencoderKLWan + + return AutoencoderKLWan.from_single_file( + config.path, + torch_dtype=self._torch_dtype, + ) + elif isinstance(config, VAE_Checkpoint_Config_Base): return AutoencoderKL.from_single_file( config.path, torch_dtype=self._torch_dtype, diff --git a/invokeai/backend/model_manager/starter_models.py b/invokeai/backend/model_manager/starter_models.py index 9f86f83dc59..f1520563bf1 100644 --- a/invokeai/backend/model_manager/starter_models.py +++ b/invokeai/backend/model_manager/starter_models.py @@ -862,6 +862,36 @@ class StarterModelBundle(BaseModel): ) # endregion +# region Anima +anima_qwen3_encoder = StarterModel( + name="Anima Qwen3 0.6B Text Encoder", + base=BaseModelType.Any, + source="https://huggingface.co/circlestone-labs/Anima/resolve/main/split_files/text_encoders/qwen_3_06b_base.safetensors", + description="Qwen3 0.6B text encoder for Anima. ~1.2GB", + type=ModelType.Qwen3Encoder, + format=ModelFormat.Checkpoint, +) + +anima_vae = StarterModel( + name="Anima QwenImage VAE", + base=BaseModelType.Anima, + source="https://huggingface.co/circlestone-labs/Anima/resolve/main/split_files/vae/qwen_image_vae.safetensors", + description="QwenImage VAE for Anima (fine-tuned Wan 2.1 VAE, 16 latent channels). ~200MB", + type=ModelType.VAE, + format=ModelFormat.Checkpoint, +) + +anima_preview2 = StarterModel( + name="Anima Preview 2", + base=BaseModelType.Anima, + source="https://huggingface.co/circlestone-labs/Anima/resolve/main/split_files/diffusion_models/anima-preview2.safetensors", + description="Anima Preview 2 - 2B parameter anime-focused text-to-image model built on Cosmos Predict2 DiT. ~4.5GB", + type=ModelType.Main, + format=ModelFormat.Checkpoint, + dependencies=[anima_qwen3_encoder, anima_vae], +) +# endregion + # List of starter models, displayed on the frontend. # The order/sort of this list is not changed by the frontend - set it how you want it here. STARTER_MODELS: list[StarterModel] = [ @@ -957,6 +987,9 @@ class StarterModelBundle(BaseModel): z_image_qwen3_encoder_quantized, z_image_controlnet_union, z_image_controlnet_tile, + anima_preview2, + anima_qwen3_encoder, + anima_vae, ] sd1_bundle: list[StarterModel] = [ @@ -1025,12 +1058,19 @@ class StarterModelBundle(BaseModel): flux2_klein_qwen3_4b_encoder, ] +anima_bundle: list[StarterModel] = [ + anima_preview2, + anima_qwen3_encoder, + anima_vae, +] + STARTER_BUNDLES: dict[str, StarterModelBundle] = { BaseModelType.StableDiffusion1: StarterModelBundle(name="Stable Diffusion 1.5", models=sd1_bundle), BaseModelType.StableDiffusionXL: StarterModelBundle(name="SDXL", models=sdxl_bundle), BaseModelType.Flux: StarterModelBundle(name="FLUX.1 dev", models=flux_bundle), BaseModelType.Flux2: StarterModelBundle(name="FLUX.2 Klein", models=flux2_klein_bundle), BaseModelType.ZImage: StarterModelBundle(name="Z-Image Turbo", models=zimage_bundle), + BaseModelType.Anima: StarterModelBundle(name="Anima", models=anima_bundle), } assert len(STARTER_MODELS) == len({m.source for m in STARTER_MODELS}), "Duplicate starter models" diff --git a/invokeai/backend/model_manager/taxonomy.py b/invokeai/backend/model_manager/taxonomy.py index c002418a6bd..2f3afe0136f 100644 --- a/invokeai/backend/model_manager/taxonomy.py +++ b/invokeai/backend/model_manager/taxonomy.py @@ -52,6 +52,8 @@ class BaseModelType(str, Enum): """Indicates the model is associated with CogView 4 model architecture.""" ZImage = "z-image" """Indicates the model is associated with Z-Image model architecture, including Z-Image-Turbo.""" + Anima = "anima" + """Indicates the model is associated with Anima model architecture (Cosmos Predict2 DiT + LLM Adapter).""" Unknown = "unknown" """Indicates the model's base architecture is unknown.""" @@ -152,6 +154,9 @@ class Qwen3VariantType(str, Enum): Qwen3_8B = "qwen3_8b" """Qwen3 8B text encoder (hidden_size=4096). Used by FLUX.2 Klein 9B.""" + Qwen3_06B = "qwen3_06b" + """Qwen3 0.6B text encoder (hidden_size=1024). Used by Anima.""" + class ModelFormat(str, Enum): """Storage format of model.""" diff --git a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py index 9d1bd676174..e6ca9aa18e7 100644 --- a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py +++ b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py @@ -88,6 +88,31 @@ def to(self, device: torch.device | None = None, dtype: torch.dtype | None = Non return self +@dataclass +class AnimaConditioningInfo: + """Anima text conditioning information from Qwen3 0.6B encoder + T5-XXL tokenizer. + + Anima uses a dual-conditioning scheme where Qwen3 hidden states are combined + with T5-XXL token IDs inside the LLM Adapter (part of the transformer). + """ + + qwen3_embeds: torch.Tensor + """Qwen3 0.6B hidden states. Shape: (seq_len, hidden_size) where hidden_size=1024.""" + + t5xxl_ids: torch.Tensor + """T5-XXL token IDs. Shape: (seq_len,).""" + + t5xxl_weights: Optional[torch.Tensor] = None + """Per-token weights for prompt weighting. Shape: (seq_len,). None means uniform weight.""" + + def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None): + self.qwen3_embeds = self.qwen3_embeds.to(device=device, dtype=dtype) + self.t5xxl_ids = self.t5xxl_ids.to(device=device) + if self.t5xxl_weights is not None: + self.t5xxl_weights = self.t5xxl_weights.to(device=device, dtype=dtype) + return self + + @dataclass class ConditioningFieldData: # If you change this class, adding more types, you _must_ update the instantiation of ObjectSerializerDisk in @@ -100,6 +125,7 @@ class ConditioningFieldData: | List[SD3ConditioningInfo] | List[CogView4ConditioningInfo] | List[ZImageConditioningInfo] + | List[AnimaConditioningInfo] ) diff --git a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts index 8dcd93cc5de..e82cf4de9c8 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts @@ -211,6 +211,26 @@ const slice = createSlice({ } state.zImageQwen3SourceModel = result.data; }, + animaVaeModelSelected: (state, action: PayloadAction) => { + const result = zParamsState.shape.animaVaeModel.safeParse(action.payload); + if (!result.success) { + return; + } + state.animaVaeModel = result.data; + }, + animaQwen3EncoderModelSelected: ( + state, + action: PayloadAction<{ key: string; name: string; base: string } | null> + ) => { + const result = zParamsState.shape.animaQwen3EncoderModel.safeParse(action.payload); + if (!result.success) { + return; + } + state.animaQwen3EncoderModel = result.data; + }, + setAnimaScheduler: (state, action: PayloadAction<'euler' | 'heun' | 'lcm'>) => { + state.animaScheduler = action.payload; + }, kleinVaeModelSelected: (state, action: PayloadAction) => { const result = zParamsState.shape.kleinVaeModel.safeParse(action.payload); if (!result.success) { @@ -558,6 +578,13 @@ export const { paramsReset, } = slice.actions; +/** @knipignore */ +export const animaVaeModelSelected = slice.actions.animaVaeModelSelected; +/** @knipignore */ +export const animaQwen3EncoderModelSelected = slice.actions.animaQwen3EncoderModelSelected; +/** @knipignore */ +export const setAnimaScheduler = slice.actions.setAnimaScheduler; + export const paramsSliceConfig: SliceConfig = { slice, schema: zParamsState, @@ -593,6 +620,8 @@ export const selectIsFLUX = createParamsSelector((params) => params.model?.base export const selectIsSD3 = createParamsSelector((params) => params.model?.base === 'sd-3'); export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4'); export const selectIsZImage = createParamsSelector((params) => params.model?.base === 'z-image'); +/** @knipignore */ +export const selectIsAnima = createParamsSelector((params) => params.model?.base === 'anima'); export const selectIsFlux2 = createParamsSelector((params) => params.model?.base === 'flux2'); export const selectIsFluxKontext = createParamsSelector((params) => { if (params.model?.base === 'flux' && params.model?.name.toLowerCase().includes('kontext')) { @@ -614,6 +643,9 @@ export const selectCLIPGEmbedModel = createParamsSelector((params) => params.cli export const selectZImageVaeModel = createParamsSelector((params) => params.zImageVaeModel); export const selectZImageQwen3EncoderModel = createParamsSelector((params) => params.zImageQwen3EncoderModel); export const selectZImageQwen3SourceModel = createParamsSelector((params) => params.zImageQwen3SourceModel); +export const selectAnimaVaeModel = createParamsSelector((params) => params.animaVaeModel); +export const selectAnimaQwen3EncoderModel = createParamsSelector((params) => params.animaQwen3EncoderModel); +export const selectAnimaScheduler = createParamsSelector((params) => params.animaScheduler); export const selectKleinVaeModel = createParamsSelector((params) => params.kleinVaeModel); export const selectKleinQwen3EncoderModel = createParamsSelector((params) => params.kleinQwen3EncoderModel); diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts index 40babc7bc85..80ac3f978e8 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts @@ -750,6 +750,10 @@ export const zParamsState = z.object({ zImageVaeModel: zParameterVAEModel.nullable(), // Optional: Separate FLUX VAE zImageQwen3EncoderModel: zModelIdentifierField.nullable(), // Optional: Separate Qwen3 Encoder zImageQwen3SourceModel: zParameterModel.nullable(), // Diffusers Z-Image model (fallback for VAE/Encoder) + // Anima model components - uses Qwen3 0.6B + T5-XXL tokenizer + QwenImage VAE + animaVaeModel: zParameterVAEModel.nullable(), // Optional: Separate QwenImage/FLUX VAE for Anima + animaQwen3EncoderModel: zModelIdentifierField.nullable(), // Optional: Separate Qwen3 0.6B Encoder for Anima + animaScheduler: z.enum(['euler', 'heun', 'lcm']).default('euler'), // Flux2 Klein model components - uses Qwen3 instead of CLIP+T5 kleinVaeModel: zParameterVAEModel.nullable(), // Optional: Separate FLUX.2 VAE for Klein kleinQwen3EncoderModel: zModelIdentifierField.nullable(), // Optional: Separate Qwen3 Encoder for Klein @@ -815,6 +819,9 @@ export const getInitialParamsState = (): ParamsState => ({ zImageVaeModel: null, zImageQwen3EncoderModel: null, zImageQwen3SourceModel: null, + animaVaeModel: null, + animaQwen3EncoderModel: null, + animaScheduler: 'euler', kleinVaeModel: null, kleinQwen3EncoderModel: null, zImageSeedVarianceEnabled: false, diff --git a/invokeai/frontend/web/src/features/modelManagerV2/models.ts b/invokeai/frontend/web/src/features/modelManagerV2/models.ts index 7b5a08adfe2..e1784bc965e 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/models.ts +++ b/invokeai/frontend/web/src/features/modelManagerV2/models.ts @@ -143,6 +143,7 @@ export const MODEL_BASE_TO_COLOR: Record = { flux2: 'gold', cogview4: 'red', 'z-image': 'cyan', + anima: 'pink', unknown: 'red', }; @@ -184,6 +185,7 @@ export const MODEL_BASE_TO_LONG_NAME: Record = { flux2: 'FLUX.2', cogview4: 'CogView4', 'z-image': 'Z-Image', + anima: 'Anima', unknown: 'Unknown', }; @@ -201,6 +203,7 @@ export const MODEL_BASE_TO_SHORT_NAME: Record = { flux2: 'FLUX.2', cogview4: 'CogView4', 'z-image': 'Z-Image', + anima: 'Anima', unknown: 'Unknown', }; @@ -220,6 +223,7 @@ export const MODEL_VARIANT_TO_LONG_NAME: Record = { gigantic: 'CLIP G', qwen3_4b: 'Qwen3 4B', qwen3_8b: 'Qwen3 8B', + qwen3_06b: 'Qwen3 0.6B', }; export const MODEL_FORMAT_TO_LONG_NAME: Record = { @@ -251,4 +255,5 @@ export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = [ 'cogview4', 'sd-3', 'z-image', + 'anima', ]; diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index 36805c022d8..570b3026319 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -94,10 +94,11 @@ export const zBaseModelType = z.enum([ 'flux2', 'cogview4', 'z-image', + 'anima', 'unknown', ]); export type BaseModelType = z.infer; -export const zMainModelBase = z.enum(['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'flux2', 'cogview4', 'z-image']); +export const zMainModelBase = z.enum(['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'flux2', 'cogview4', 'z-image', 'anima']); type MainModelBase = z.infer; export const isMainModelBase = (base: unknown): base is MainModelBase => zMainModelBase.safeParse(base).success; export const zModelType = z.enum([ @@ -142,7 +143,7 @@ export const zModelVariantType = z.enum(['normal', 'inpaint', 'depth']); export const zFluxVariantType = z.enum(['dev', 'dev_fill', 'schnell']); export const zFlux2VariantType = z.enum(['klein_4b', 'klein_9b', 'klein_9b_base']); export const zZImageVariantType = z.enum(['turbo', 'zbase']); -export const zQwen3VariantType = z.enum(['qwen3_4b', 'qwen3_8b']); +export const zQwen3VariantType = z.enum(['qwen3_4b', 'qwen3_8b', 'qwen3_06b']); export const zAnyModelVariant = z.union([ zModelVariantType, zClipVariantType, diff --git a/invokeai/frontend/web/src/features/nodes/types/constants.ts b/invokeai/frontend/web/src/features/nodes/types/constants.ts index 656d323b65a..9da499ab91c 100644 --- a/invokeai/frontend/web/src/features/nodes/types/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/types/constants.ts @@ -56,6 +56,7 @@ export const FIELD_COLORS: { [key: string]: string } = { SD3MainModelField: 'teal.500', CogView4MainModelField: 'teal.500', ZImageMainModelField: 'teal.500', + AnimaMainModelField: 'teal.500', SDXLMainModelField: 'teal.500', SDXLRefinerModelField: 'teal.500', SpandrelImageToImageModelField: 'teal.500', diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts new file mode 100644 index 00000000000..6dcc139fa4a --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts @@ -0,0 +1,184 @@ +import { objectEquals } from '@observ33r/object-equals'; +import { logger } from 'app/logging/logger'; +import { getPrefixedId } from 'features/controlLayers/konva/util'; +import { + selectAnimaQwen3EncoderModel, + selectAnimaScheduler, + selectAnimaVaeModel, + selectMainModelConfig, + selectParamsSlice, +} from 'features/controlLayers/store/paramsSlice'; +import { selectCanvasMetadata } from 'features/controlLayers/store/selectors'; +import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers'; +import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker'; +import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker'; +import { Graph } from 'features/nodes/util/graph/generation/Graph'; +import { + getOriginalAndScaledSizesForTextToImage, + selectCanvasOutputFields, + selectPresetModifiedPrompts, +} from 'features/nodes/util/graph/graphBuilderUtils'; +import type { GraphBuilderArg, GraphBuilderReturn, ImageOutputNodes } from 'features/nodes/util/graph/types'; +import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; +import { selectActiveTab } from 'features/ui/store/uiSelectors'; +import type { Invocation } from 'services/api/types'; +import { isNonRefinerMainModelConfig } from 'services/api/types'; +import { assert } from 'tsafe'; + +const log = logger('system'); + +export const buildAnimaGraph = async (arg: GraphBuilderArg): Promise => { + const { generationMode, state } = arg; + + log.debug({ generationMode }, 'Building Anima graph'); + + const model = selectMainModelConfig(state); + assert(model, 'No model selected'); + assert(model.base === 'anima', 'Selected model is not an Anima model'); + + // Get Anima component models + const animaVaeModel = selectAnimaVaeModel(state); + const animaQwen3EncoderModel = selectAnimaQwen3EncoderModel(state); + const animaScheduler = selectAnimaScheduler(state); + + // Validate required component models + assert( + animaVaeModel !== null, + 'No VAE model selected for Anima. Set a compatible VAE (Wan 2.1 QwenImage or FLUX VAE).' + ); + assert(animaQwen3EncoderModel !== null, 'No Qwen3 Encoder model selected for Anima. Set a Qwen3 0.6B encoder model.'); + + const params = selectParamsSlice(state); + const { cfgScale: guidance_scale, steps } = params; + + const prompts = selectPresetModifiedPrompts(state); + + // Anima currently only supports txt2img + if (generationMode !== 'txt2img') { + throw new UnsupportedGenerationModeError( + `Anima does not yet support ${generationMode}. Only txt2img is currently available.` + ); + } + + const { originalSize, scaledSize } = getOriginalAndScaledSizesForTextToImage(state); + + const g = new Graph(getPrefixedId('anima_graph')); + + const modelLoader = g.addNode({ + type: 'anima_model_loader', + id: getPrefixedId('anima_model_loader'), + model, + vae_model: animaVaeModel ?? undefined, + qwen3_encoder_model: animaQwen3EncoderModel ?? undefined, + }); + + const positivePrompt = g.addNode({ + id: getPrefixedId('positive_prompt'), + type: 'string', + }); + const posCond = g.addNode({ + type: 'anima_text_encoder', + id: getPrefixedId('pos_prompt'), + }); + + // Anima supports negative conditioning when guidance_scale > 1 + let negCond: Invocation<'anima_text_encoder'> | null = null; + if (guidance_scale > 1) { + negCond = g.addNode({ + type: 'anima_text_encoder', + id: getPrefixedId('neg_prompt'), + prompt: prompts.negative, + }); + } + + const seed = g.addNode({ + id: getPrefixedId('seed'), + type: 'integer', + }); + const denoise = g.addNode({ + type: 'anima_denoise', + id: getPrefixedId('denoise_latents'), + guidance_scale, + steps, + width: scaledSize.width, + height: scaledSize.height, + scheduler: animaScheduler, + }); + const l2i = g.addNode({ + type: 'anima_l2i', + id: getPrefixedId('l2i'), + }); + + // Connect model loader outputs + g.addEdge(modelLoader, 'transformer', denoise, 'transformer'); + g.addEdge(modelLoader, 'qwen3_encoder', posCond, 'qwen3_encoder'); + g.addEdge(modelLoader, 'vae', l2i, 'vae'); + + // Connect positive prompt + g.addEdge(positivePrompt, 'value', posCond, 'prompt'); + g.addEdge(posCond, 'conditioning', denoise, 'positive_conditioning'); + + // Connect negative conditioning if guidance_scale > 1 + if (negCond !== null) { + g.addEdge(modelLoader, 'qwen3_encoder', negCond, 'qwen3_encoder'); + g.addEdge(negCond, 'conditioning', denoise, 'negative_conditioning'); + } + + // Connect seed and denoiser to L2I + g.addEdge(seed, 'value', denoise, 'seed'); + g.addEdge(denoise, 'latents', l2i, 'latents'); + + const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig); + assert(modelConfig.base === 'anima'); + + g.upsertMetadata({ + cfg_scale: guidance_scale, + negative_prompt: prompts.negative, + model: Graph.getModelMetadataField(modelConfig), + steps, + scheduler: animaScheduler, + width: originalSize.width, + height: originalSize.height, + vae: animaVaeModel ?? undefined, + qwen3_encoder: animaQwen3EncoderModel ?? undefined, + generation_mode: 'txt2img', + }); + g.addEdgeToMetadata(seed, 'value', 'seed'); + g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); + + // Handle output resize if scaled size differs from original + let canvasOutput: Invocation; + if (!objectEquals(scaledSize, originalSize)) { + const resizeImageToOriginalSize = g.addNode({ + id: getPrefixedId('resize_image_to_original_size'), + type: 'img_resize', + ...originalSize, + }); + g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image'); + canvasOutput = resizeImageToOriginalSize; + } else { + canvasOutput = l2i; + } + + if (state.system.shouldUseNSFWChecker) { + canvasOutput = addNSFWChecker(g, canvasOutput); + } + + if (state.system.shouldUseWatermarker) { + canvasOutput = addWatermarker(g, canvasOutput); + } + + g.updateNode(canvasOutput, selectCanvasOutputFields(state)); + + if (selectActiveTab(state) === 'canvas') { + g.upsertMetadata(selectCanvasMetadata(state)); + } + + g.setMetadataReceivingNode(canvasOutput); + + return { + g, + seed, + positivePrompt, + }; +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts b/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts index 0424f175066..e48a6ee4503 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts @@ -213,7 +213,8 @@ export const isMainModelWithoutUnet = (modelLoader: Invocation { case 'sd-3': case 'cogview4': case 'z-image': + case 'anima': default: return 1024; } @@ -60,7 +61,7 @@ export const isInSDXLTrainingDimensions = (width: number, height: number): boole /** * Gets the grid size for a given base model. For Flux, the grid size is 16, otherwise it is 8. - * - sd-1, sd-2, sdxl: 8 + * - sd-1, sd-2, sdxl, anima: 8 * - flux, sd-3, z-image: 16 * - cogview4: 32 * - default: 8 @@ -79,6 +80,7 @@ export const getGridSize = (base?: BaseModelType | null): number => { case 'sd-1': case 'sd-2': case 'sdxl': + case 'anima': default: return 8; } diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts index cf00a12ee5f..cbae643ee92 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts @@ -6,6 +6,7 @@ import { extractMessageFromAssertionError } from 'common/util/extractMessageFrom import { withResult, withResultAsync } from 'common/util/result'; import { positivePromptAddedToHistory, selectPositivePrompt } from 'features/controlLayers/store/paramsSlice'; import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig'; +import { buildAnimaGraph } from 'features/nodes/util/graph/generation/buildAnimaGraph'; import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph'; import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph'; import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph'; @@ -52,6 +53,8 @@ const enqueueGenerate = async (store: AppStore, prepend: boolean) => { return await buildCogView4Graph(graphBuilderArg); case 'z-image': return await buildZImageGraph(graphBuilderArg); + case 'anima': + return await buildAnimaGraph(graphBuilderArg); default: assert(false, `No graph builders for base ${base}`); } diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index fc6506ce22b..83e8faa23ff 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -1,290 +1,4 @@ export type paths = { - "/api/v1/auth/status": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Get Setup Status - * @description Check if initial administrator setup is required. - * - * Returns: - * SetupStatusResponse indicating whether setup is needed and multiuser mode status - */ - get: operations["get_setup_status_api_v1_auth_status_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/auth/login": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Login - * @description Authenticate user and return access token. - * - * Args: - * request: Login credentials (email and password) - * - * Returns: - * LoginResponse containing JWT token and user information - * - * Raises: - * HTTPException: 401 if credentials are invalid or user is inactive - * HTTPException: 403 if multiuser mode is disabled - */ - post: operations["login_api_v1_auth_login_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/auth/logout": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Logout - * @description Logout current user. - * - * Currently a no-op since we use stateless JWT tokens. For token invalidation in - * future implementations, consider: - * - Token blacklist: Store invalidated tokens in Redis/database with expiration - * - Token versioning: Add version field to user record, increment on logout - * - Short-lived tokens: Use refresh token pattern with token rotation - * - Session storage: Track active sessions server-side for revocation - * - * Args: - * current_user: The authenticated user (validates token) - * - * Returns: - * LogoutResponse indicating success - */ - post: operations["logout_api_v1_auth_logout_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/auth/me": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Get Current User Info - * @description Get current authenticated user's information. - * - * Args: - * current_user: The authenticated user's token data - * - * Returns: - * UserDTO containing user information - * - * Raises: - * HTTPException: 404 if user is not found (should not happen normally) - */ - get: operations["get_current_user_info_api_v1_auth_me_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - /** - * Update Current User - * @description Update the current user's own profile. - * - * To change the password, both ``current_password`` and ``new_password`` must - * be provided. The current password is verified before the change is applied. - * - * Args: - * request: Profile fields to update - * current_user: The authenticated user - * - * Returns: - * The updated user - * - * Raises: - * HTTPException: 400 if current password is incorrect or new password is weak - * HTTPException: 404 if user not found - */ - patch: operations["update_current_user_api_v1_auth_me_patch"]; - trace?: never; - }; - "/api/v1/auth/setup": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Setup Admin - * @description Set up initial administrator account. - * - * This endpoint can only be called once, when no admin user exists. It creates - * the first admin user for the system. - * - * Args: - * request: Admin account details (email, display_name, password) - * - * Returns: - * SetupResponse containing the created admin user - * - * Raises: - * HTTPException: 400 if admin already exists or password is weak - * HTTPException: 403 if multiuser mode is disabled - */ - post: operations["setup_admin_api_v1_auth_setup_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/auth/generate-password": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Generate Password - * @description Generate a strong random password. - * - * Returns a cryptographically secure random password of 16 characters - * containing uppercase, lowercase, digits, and punctuation. - */ - get: operations["generate_password_api_v1_auth_generate_password_get"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/auth/users": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * List Users - * @description List all users. Requires admin privileges. - * - * The internal 'system' user (created for backward compatibility) is excluded - * from the results since it cannot be managed through this interface. - * - * Returns: - * List of all real users (system user excluded) - */ - get: operations["list_users_api_v1_auth_users_get"]; - put?: never; - /** - * Create User - * @description Create a new user. Requires admin privileges. - * - * Args: - * request: New user details - * - * Returns: - * The created user - * - * Raises: - * HTTPException: 400 if email already exists or password is weak - */ - post: operations["create_user_api_v1_auth_users_post"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v1/auth/users/{user_id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Get User - * @description Get a user by ID. Requires admin privileges. - * - * Args: - * user_id: The user ID - * - * Returns: - * The user - * - * Raises: - * HTTPException: 404 if user not found - */ - get: operations["get_user_api_v1_auth_users__user_id__get"]; - put?: never; - post?: never; - /** - * Delete User - * @description Delete a user. Requires admin privileges. - * - * Admins can delete any user including other admins, but cannot delete the last - * remaining admin. - * - * Args: - * user_id: The user ID - * - * Raises: - * HTTPException: 400 if attempting to delete the last admin - * HTTPException: 404 if user not found - */ - delete: operations["delete_user_api_v1_auth_users__user_id__delete"]; - options?: never; - head?: never; - /** - * Update User - * @description Update a user. Requires admin privileges. - * - * Args: - * user_id: The user ID - * request: Fields to update - * - * Returns: - * The updated user - * - * Raises: - * HTTPException: 400 if password is weak - * HTTPException: 404 if user not found - */ - patch: operations["update_user_api_v1_auth_users__user_id__patch"]; - trace?: never; - }; "/api/v1/utilities/dynamicprompts": { parameters: { query?: never; @@ -369,27 +83,6 @@ export type paths = { patch?: never; trace?: never; }; - "/api/v2/models/get_by_hash": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Get Model Records By Hash - * @description Gets a model by its hash. This is useful for recalling models that were deleted and reinstalled, - * as the hash remains stable across reinstallations while the key (UUID) changes. - */ - get: operations["get_model_records_by_hash"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; "/api/v2/models/i/{key}": { parameters: { query?: never; @@ -538,7 +231,6 @@ export type paths = { * * "waiting" -- Job is waiting in the queue to run * * "downloading" -- Model file(s) are downloading * * "running" -- Model has downloaded and the model probing and registration process is running - * * "paused" -- Job is paused and can be resumed * * "completed" -- Installation completed successfully * * "error" -- An error occurred. Details will be in the "error_type" and "error" fields. * * "cancelled" -- Job was cancelled before completion. @@ -636,7 +328,7 @@ export type paths = { patch?: never; trace?: never; }; - "/api/v2/models/install/{id}/pause": { + "/api/v2/models/convert/{key}": { parameters: { query?: never; header?: never; @@ -644,59 +336,58 @@ export type paths = { cookie?: never; }; get?: never; - put?: never; /** - * Pause Model Install Job - * @description Pause the model install job corresponding to the given job ID. + * Convert Model + * @description Permanently convert a model into diffusers format, replacing the safetensors version. + * Note that during the conversion process the key and model hash will change. + * The return value is the model configuration for the converted model. */ - post: operations["pause_model_install_job"]; + put: operations["convert_model"]; + post?: never; delete?: never; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/install/{id}/resume": { + "/api/v2/models/starter_models": { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - get?: never; + /** Get Starter Models */ + get: operations["get_starter_models"]; put?: never; - /** - * Resume Model Install Job - * @description Resume a paused model install job corresponding to the given job ID. - */ - post: operations["resume_model_install_job"]; + post?: never; delete?: never; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/install/{id}/restart_failed": { + "/api/v2/models/stats": { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - get?: never; - put?: never; /** - * Restart Failed Model Install Job - * @description Restart failed or non-resumable file downloads for the given job. + * Get model manager RAM cache performance statistics. + * @description Return performance statistics on the model manager's RAM cache. Will return null if no models have been loaded. */ - post: operations["restart_failed_model_install_job"]; + get: operations["get_stats"]; + put?: never; + post?: never; delete?: never; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/install/{id}/restart_file": { + "/api/v2/models/empty_model_cache": { parameters: { query?: never; header?: never; @@ -706,115 +397,36 @@ export type paths = { get?: never; put?: never; /** - * Restart Model Install File - * @description Restart a specific file download for the given job. + * Empty Model Cache + * @description Drop all models from the model cache to free RAM/VRAM. 'Locked' models that are in active use will not be dropped. */ - post: operations["restart_model_install_file"]; + post: operations["empty_model_cache"]; delete?: never; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/convert/{key}": { + "/api/v2/models/hf_login": { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - get?: never; - /** - * Convert Model - * @description Permanently convert a model into diffusers format, replacing the safetensors version. - * Note that during the conversion process the key and model hash will change. - * The return value is the model configuration for the converted model. - */ - put: operations["convert_model"]; - post?: never; - delete?: never; + /** Get Hf Login Status */ + get: operations["get_hf_login_status"]; + put?: never; + /** Do Hf Login */ + post: operations["do_hf_login"]; + /** Reset Hf Token */ + delete: operations["reset_hf_token"]; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/starter_models": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Get Starter Models */ - get: operations["get_starter_models"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v2/models/stats": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Get model manager RAM cache performance statistics. - * @description Return performance statistics on the model manager's RAM cache. Will return null if no models have been loaded. - */ - get: operations["get_stats"]; - put?: never; - post?: never; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v2/models/empty_model_cache": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - get?: never; - put?: never; - /** - * Empty Model Cache - * @description Drop all models from the model cache to free RAM/VRAM. 'Locked' models that are in active use will not be dropped. - */ - post: operations["empty_model_cache"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v2/models/hf_login": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** Get Hf Login Status */ - get: operations["get_hf_login_status"]; - put?: never; - /** Do Hf Login */ - post: operations["do_hf_login"]; - /** Reset Hf Token */ - delete: operations["reset_hf_token"]; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; - "/api/v2/models/sync/orphaned": { + "/api/v2/models/sync/orphaned": { parameters: { query?: never; header?: never; @@ -950,7 +562,7 @@ export type paths = { put?: never; /** * Upload Image - * @description Uploads an image for the current user + * @description Uploads an image */ post: operations["upload_image"]; delete?: never; @@ -968,7 +580,7 @@ export type paths = { }; /** * List Image Dtos - * @description Gets a list of image DTOs for the current user + * @description Gets a list of image DTOs */ get: operations["list_image_dtos"]; put?: never; @@ -1293,13 +905,13 @@ export type paths = { }; /** * List Boards - * @description Gets a list of boards for the current user, including shared boards. Admin users see all boards. + * @description Gets a list of boards */ get: operations["list_boards"]; put?: never; /** * Create Board - * @description Creates a board for the current user + * @description Creates a board */ post: operations["create_board"]; delete?: never; @@ -1317,21 +929,21 @@ export type paths = { }; /** * Get Board - * @description Gets a board (user must have access to it) + * @description Gets a board */ get: operations["get_board"]; put?: never; post?: never; /** * Delete Board - * @description Deletes a board (user must have access to it) + * @description Deletes a board */ delete: operations["delete_board"]; options?: never; head?: never; /** * Update Board - * @description Updates a board (user must have access to it) + * @description Updates a board */ patch: operations["update_board"]; trace?: never; @@ -1667,7 +1279,7 @@ export type paths = { put?: never; /** * Enqueue Batch - * @description Processes a batch and enqueues the output graphs for execution for the current user. + * @description Processes a batch and enqueues the output graphs for execution. */ post: operations["enqueue_batch"]; delete?: never; @@ -1746,7 +1358,7 @@ export type paths = { get?: never; /** * Resume - * @description Resumes session processor. Admin only. + * @description Resumes session processor */ put: operations["resume"]; post?: never; @@ -1766,7 +1378,7 @@ export type paths = { get?: never; /** * Pause - * @description Pauses session processor. Admin only. + * @description Pauses session processor */ put: operations["pause"]; post?: never; @@ -1786,7 +1398,7 @@ export type paths = { get?: never; /** * Cancel All Except Current - * @description Immediately cancels all queue items except in-processing items. Non-admin users can only cancel their own items. + * @description Immediately cancels all queue items except in-processing items */ put: operations["cancel_all_except_current"]; post?: never; @@ -1806,7 +1418,7 @@ export type paths = { get?: never; /** * Delete All Except Current - * @description Immediately deletes all queue items except in-processing items. Non-admin users can only delete their own items. + * @description Immediately deletes all queue items except in-processing items */ put: operations["delete_all_except_current"]; post?: never; @@ -1826,7 +1438,7 @@ export type paths = { get?: never; /** * Cancel By Batch Ids - * @description Immediately cancels all queue items from the given batch ids. Non-admin users can only cancel their own items. + * @description Immediately cancels all queue items from the given batch ids */ put: operations["cancel_by_batch_ids"]; post?: never; @@ -1846,7 +1458,7 @@ export type paths = { get?: never; /** * Cancel By Destination - * @description Immediately cancels all queue items with the given destination. Non-admin users can only cancel their own items. + * @description Immediately cancels all queue items with the given origin */ put: operations["cancel_by_destination"]; post?: never; @@ -1866,7 +1478,7 @@ export type paths = { get?: never; /** * Retry Items By Id - * @description Retries the given queue items. Users can only retry their own items unless they are an admin. + * @description Immediately cancels all queue items with the given origin */ put: operations["retry_items_by_id"]; post?: never; @@ -1886,7 +1498,7 @@ export type paths = { get?: never; /** * Clear - * @description Clears the queue entirely. Admin users clear all items; non-admin users only clear their own items. If there's a currently-executing item, users can only cancel it if they own it or are an admin. + * @description Clears the queue entirely, immediately canceling the currently-executing session */ put: operations["clear"]; post?: never; @@ -1906,7 +1518,7 @@ export type paths = { get?: never; /** * Prune - * @description Prunes all completed or errored queue items. Non-admin users can only prune their own items. + * @description Prunes all completed or errored queue items */ put: operations["prune"]; post?: never; @@ -2012,7 +1624,7 @@ export type paths = { post?: never; /** * Delete Queue Item - * @description Deletes a queue item. Users can only delete their own items unless they are an admin. + * @description Deletes a queue item */ delete: operations["delete_queue_item"]; options?: never; @@ -2030,7 +1642,7 @@ export type paths = { get?: never; /** * Cancel Queue Item - * @description Cancels a queue item. Users can only cancel their own items unless they are an admin. + * @description Deletes a queue item */ put: operations["cancel_queue_item"]; post?: never; @@ -2072,7 +1684,7 @@ export type paths = { post?: never; /** * Delete By Destination - * @description Deletes all items with the given destination. Non-admin users can only delete their own items. + * @description Deletes all items with the given destination */ delete: operations["delete_by_destination"]; options?: never; @@ -2355,7 +1967,7 @@ export type paths = { }; /** * Get Client State By Key - * @description Gets the client state for the current user (or system user if not authenticated) + * @description Gets the client state */ get: operations["get_client_state_by_key"]; put?: never; @@ -2377,7 +1989,7 @@ export type paths = { put?: never; /** * Set Client State - * @description Sets the client state for the current user (or system user if not authenticated) + * @description Sets the client state */ post: operations["set_client_state"]; delete?: never; @@ -2397,7 +2009,7 @@ export type paths = { put?: never; /** * Delete Client State - * @description Deletes the client state for the current user (or system user if not authenticated) + * @description Deletes the client state */ post: operations["delete_client_state"]; delete?: never; @@ -2406,61 +2018,6 @@ export type paths = { patch?: never; trace?: never; }; - "/api/v1/recall/{queue_id}": { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - /** - * Get Recall Parameters - * @description Retrieve all stored recall parameters for a given queue. - * - * Returns a dictionary of all recall parameters that have been set for the queue. - * - * Args: - * queue_id: The queue ID to retrieve parameters for - * - * Returns: - * A dictionary containing all stored recall parameters - */ - get: operations["get_recall_parameters"]; - put?: never; - /** - * Update Recall Parameters - * @description Update recallable parameters that can be recalled on the frontend. - * - * This endpoint allows updating parameters such as prompt, model, steps, and other - * generation settings. These parameters are stored in client state and can be - * accessed by the frontend to populate UI elements. - * - * Args: - * queue_id: The queue ID to associate these parameters with - * parameters: The RecallParameter object containing the parameters to update - * - * Returns: - * A dictionary containing the updated parameters and status - * - * Example: - * POST /api/v1/recall/{queue_id} - * { - * "positive_prompt": "a beautiful landscape", - * "model": "sd-1.5", - * "steps": 20, - * "cfg_scale": 7.5, - * "width": 512, - * "height": 512, - * "seed": 12345 - * } - */ - post: operations["update_recall_parameters"]; - delete?: never; - options?: never; - head?: never; - patch?: never; - trace?: never; - }; }; export type webhooks = Record; export type components = { @@ -2519,59 +2076,6 @@ export type components = { */ type: "add"; }; - /** - * AdminUserCreateRequest - * @description Request body for admin to create a new user. - */ - AdminUserCreateRequest: { - /** - * Email - * @description User email address - */ - email: string; - /** - * Display Name - * @description Display name - */ - display_name?: string | null; - /** - * Password - * @description User password - */ - password: string; - /** - * Is Admin - * @description Whether user should have admin privileges - * @default false - */ - is_admin?: boolean; - }; - /** - * AdminUserUpdateRequest - * @description Request body for admin to update any user. - */ - AdminUserUpdateRequest: { - /** - * Display Name - * @description Display name - */ - display_name?: string | null; - /** - * Password - * @description New password - */ - password?: string | null; - /** - * Is Admin - * @description Whether user should have admin privileges - */ - is_admin?: boolean | null; - /** - * Is Active - * @description Whether user account should be active - */ - is_active?: boolean | null; - }; /** * Alpha Mask to Tensor * @description Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0. @@ -2612,35 +2116,42 @@ export type components = { */ type: "alpha_mask_to_tensor"; }; - AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** - * AppVersion - * @description App Version Response + * AnimaConditioningField + * @description An Anima conditioning tensor primitive value. + * + * Anima conditioning contains Qwen3 0.6B hidden states and T5-XXL token IDs, + * which are combined by the LLM Adapter inside the transformer. */ - AppVersion: { + AnimaConditioningField: { /** - * Version - * @description App version + * Conditioning Name + * @description The name of conditioning tensor */ - version: string; + conditioning_name: string; }; /** - * Apply Tensor Mask to Image - * @description Applies a tensor mask to an image. - * - * The image is converted to RGBA and the mask is applied to the alpha channel. + * AnimaConditioningOutput + * @description Base class for nodes that output an Anima text conditioning tensor. */ - ApplyMaskTensorToImageInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + AnimaConditioningOutput: { + /** @description Conditioning tensor */ + conditioning: components["schemas"]["AnimaConditioningField"]; /** - * @description Optional metadata to be saved with the image - * @default null + * type + * @default anima_conditioning_output + * @constant */ - metadata?: components["schemas"]["MetadataField"] | null; + type: "anima_conditioning_output"; + }; + /** + * Denoise - Anima + * @description Run the denoising process with an Anima model. + * + * Uses rectified flow sampling with shift=3.0 and the Cosmos Predict2 DiT + * backbone with integrated LLM Adapter for text conditioning. + */ + AnimaDenoiseInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -2659,34 +2170,73 @@ export type components = { */ use_cache?: boolean; /** - * @description The mask tensor to apply. + * Transformer + * @description Anima transformer model. * @default null */ - mask?: components["schemas"]["TensorField"] | null; + transformer?: components["schemas"]["TransformerField"] | null; /** - * @description The image to apply the mask to. + * @description Positive conditioning tensor * @default null */ - image?: components["schemas"]["ImageField"] | null; + positive_conditioning?: components["schemas"]["AnimaConditioningField"] | null; /** - * Invert - * @description Whether to invert the mask. - * @default false + * @description Negative conditioning tensor + * @default null */ - invert?: boolean; + negative_conditioning?: components["schemas"]["AnimaConditioningField"] | null; + /** + * Guidance Scale + * @description Guidance scale for classifier-free guidance. Recommended: 4.0-5.0 for Anima. + * @default 4.5 + */ + guidance_scale?: number; + /** + * Width + * @description Width of the generated image. + * @default 1024 + */ + width?: number; + /** + * Height + * @description Height of the generated image. + * @default 1024 + */ + height?: number; + /** + * Steps + * @description Number of denoising steps. 30 recommended for Anima. + * @default 30 + */ + steps?: number; + /** + * Seed + * @description Randomness seed for reproducibility. + * @default 0 + */ + seed?: number; + /** + * Scheduler + * @description Scheduler (sampler) for the denoising process. + * @default euler + * @enum {string} + */ + scheduler?: "euler" | "heun" | "lcm"; /** * type - * @default apply_tensor_mask_to_image + * @default anima_denoise * @constant */ - type: "apply_tensor_mask_to_image"; + type: "anima_denoise"; }; /** - * Apply Mask to Image - * @description Extracts a region from a generated image using a mask and blends it seamlessly onto a source image. - * The mask uses black to indicate areas to keep from the generated image and white for areas to discard. + * Latents to Image - Anima + * @description Generates an image from latents using the Anima VAE. + * + * Supports both the Wan 2.1 QwenImage VAE (AutoencoderKL with mean/std normalization) + * and FLUX VAE (FluxAutoEncoder) as fallback. */ - ApplyMaskToImageInvocation: { + AnimaLatentsToImageInvocation: { /** * @description The board to save the image to * @default null @@ -2715,65 +2265,307 @@ export type components = { */ use_cache?: boolean; /** - * @description The image from which to extract the masked region + * @description Latents tensor * @default null */ - image?: components["schemas"]["ImageField"] | null; + latents?: components["schemas"]["LatentsField"] | null; /** - * @description The mask defining the region (black=keep, white=discard) + * @description VAE * @default null */ - mask?: components["schemas"]["ImageField"] | null; - /** - * Invert Mask - * @description Whether to invert the mask before applying it - * @default false - */ - invert_mask?: boolean; + vae?: components["schemas"]["VAEField"] | null; /** * type - * @default apply_mask_to_image + * @default anima_l2i * @constant */ - type: "apply_mask_to_image"; + type: "anima_l2i"; }; /** - * BaseMetadata - * @description Adds typing data for discriminated union. + * Main Model - Anima + * @description Loads an Anima model, outputting its submodels. + * + * Anima uses: + * - Transformer: Cosmos Predict2 DiT + LLM Adapter (from single-file checkpoint) + * - Qwen3 Encoder: Qwen3 0.6B (standalone single-file) + * - VAE: AutoencoderKLQwenImage / Wan 2.1 VAE (standalone single-file or FLUX VAE) */ - BaseMetadata: { + AnimaModelLoaderInvocation: { /** - * Name - * @description model's name + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - name: string; + id: string; /** - * @description discriminator enum property added by openapi-typescript - * @enum {string} + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - type: "basemetadata"; - }; - /** - * BaseModelType - * @description An enumeration of base model architectures. For example, Stable Diffusion 1.x, Stable Diffusion 2.x, FLUX, etc. - * - * Every model config must have a base architecture type. - * - * Not all models are associated with a base architecture. For example, CLIP models are their own thing, not related - * to any particular model architecture. To simplify internal APIs and make it easier to work with models, we use a - * fallback/null value `BaseModelType.Any` for these models, instead of making the model base optional. - * @enum {string} - */ - BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "flux2" | "cogview4" | "z-image" | "unknown"; - /** Batch */ - Batch: { + is_intermediate?: boolean; /** - * Batch Id - * @description The ID of the batch + * Use Cache + * @description Whether or not to use the cache + * @default true */ - batch_id?: string; + use_cache?: boolean; /** - * Origin + * Transformer + * @description Anima main model (transformer + LLM adapter). + */ + model: components["schemas"]["ModelIdentifierField"]; + /** + * VAE + * @description Standalone VAE model. Anima uses a Wan 2.1 / QwenImage VAE (16-channel). If not provided, a FLUX VAE can be used as a fallback. + * @default null + */ + vae_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Qwen3 Encoder + * @description Standalone Qwen3 0.6B Encoder model. + * @default null + */ + qwen3_encoder_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * type + * @default anima_model_loader + * @constant + */ + type: "anima_model_loader"; + }; + /** + * AnimaModelLoaderOutput + * @description Anima model loader output. + */ + AnimaModelLoaderOutput: { + /** + * Transformer + * @description Transformer + */ + transformer: components["schemas"]["TransformerField"]; + /** + * Qwen3 Encoder + * @description Qwen3 tokenizer and text encoder + */ + qwen3_encoder: components["schemas"]["Qwen3EncoderField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default anima_model_loader_output + * @constant + */ + type: "anima_model_loader_output"; + }; + /** + * Prompt - Anima + * @description Encodes and preps a prompt for an Anima image. + * + * Uses Qwen3 0.6B for hidden state extraction and T5-XXL tokenizer for + * token IDs (no T5 model weights needed). Both are combined by the + * LLM Adapter inside the Anima transformer during denoising. + */ + AnimaTextEncoderInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Prompt + * @description Text prompt to encode. + * @default null + */ + prompt?: string | null; + /** + * Qwen3 Encoder + * @description Qwen3 tokenizer and text encoder + * @default null + */ + qwen3_encoder?: components["schemas"]["Qwen3EncoderField"] | null; + /** + * type + * @default anima_text_encoder + * @constant + */ + type: "anima_text_encoder"; + }; + AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + /** + * AppVersion + * @description App Version Response + */ + AppVersion: { + /** + * Version + * @description App version + */ + version: string; + }; + /** + * Apply Tensor Mask to Image + * @description Applies a tensor mask to an image. + * + * The image is converted to RGBA and the mask is applied to the alpha channel. + */ + ApplyMaskTensorToImageInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The mask tensor to apply. + * @default null + */ + mask?: components["schemas"]["TensorField"] | null; + /** + * @description The image to apply the mask to. + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Invert + * @description Whether to invert the mask. + * @default false + */ + invert?: boolean; + /** + * type + * @default apply_tensor_mask_to_image + * @constant + */ + type: "apply_tensor_mask_to_image"; + }; + /** + * Apply Mask to Image + * @description Extracts a region from a generated image using a mask and blends it seamlessly onto a source image. + * The mask uses black to indicate areas to keep from the generated image and white for areas to discard. + */ + ApplyMaskToImageInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image from which to extract the masked region + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * @description The mask defining the region (black=keep, white=discard) + * @default null + */ + mask?: components["schemas"]["ImageField"] | null; + /** + * Invert Mask + * @description Whether to invert the mask before applying it + * @default false + */ + invert_mask?: boolean; + /** + * type + * @default apply_mask_to_image + * @constant + */ + type: "apply_mask_to_image"; + }; + /** + * BaseMetadata + * @description Adds typing data for discriminated union. + */ + BaseMetadata: { + /** + * Name + * @description model's name + */ + name: string; + /** + * @description discriminator enum property added by openapi-typescript + * @enum {string} + */ + type: "basemetadata"; + }; + /** + * BaseModelType + * @description An enumeration of base model architectures. For example, Stable Diffusion 1.x, Stable Diffusion 2.x, FLUX, etc. + * + * Every model config must have a base architecture type. + * + * Not all models are associated with a base architecture. For example, CLIP models are their own thing, not related + * to any particular model architecture. To simplify internal APIs and make it easier to work with models, we use a + * fallback/null value `BaseModelType.Any` for these models, instead of making the model base optional. + * @enum {string} + */ + BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "flux2" | "cogview4" | "z-image" | "anima" | "unknown"; + /** Batch */ + Batch: { + /** + * Batch Id + * @description The ID of the batch + */ + batch_id?: string; + /** + * Origin * @description The origin of this queue item. This data is used by the frontend to determine how to handle results. */ origin?: string | null; @@ -3063,11 +2855,6 @@ export type components = { * @description The name of the board. */ board_name: string; - /** - * User Id - * @description The user ID of the board owner. - */ - user_id: string; /** * Created At * @description The created timestamp of the board. @@ -3103,11 +2890,6 @@ export type components = { * @description The number of assets in the board. */ asset_count: number; - /** - * Owner Username - * @description The username of the board owner (for admin view). - */ - owner_username?: string | null; }; /** * BoardField @@ -5678,55 +5460,18 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; - /** - * ControlNetRecallParameter - * @description ControlNet configuration for recall - */ - ControlNetRecallParameter: { + /** ControlNet_Checkpoint_FLUX_Config */ + ControlNet_Checkpoint_FLUX_Config: { /** - * Model Name - * @description The name of the ControlNet/T2I Adapter/Control LoRA model + * Key + * @description A unique key for this model. */ - model_name: string; + key: string; /** - * Image Name - * @description The filename of the control image in outputs/images + * Hash + * @description The hash of the model file(s). */ - image_name?: string | null; - /** - * Weight - * @description The weight for the control adapter - * @default 1 - */ - weight?: number; - /** - * Begin Step Percent - * @description When the control adapter is first applied (% of total steps) - */ - begin_step_percent?: number | null; - /** - * End Step Percent - * @description When the control adapter is last applied (% of total steps) - */ - end_step_percent?: number | null; - /** - * Control Mode - * @description The control mode (ControlNet only) - */ - control_mode?: ("balanced" | "more_prompt" | "more_control") | null; - }; - /** ControlNet_Checkpoint_FLUX_Config */ - ControlNet_Checkpoint_FLUX_Config: { - /** - * Key - * @description A unique key for this model. - */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; + hash: string; /** * Path * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. @@ -7578,71 +7323,13 @@ export type components = { * @description Content type of downloaded file */ content_type?: string | null; - /** - * Canonical Url - * @description Canonical URL to request on resume - */ - canonical_url?: string | null; - /** - * Etag - * @description ETag from the remote server, if available - */ - etag?: string | null; - /** - * Last Modified - * @description Last-Modified from the remote server, if available - */ - last_modified?: string | null; - /** - * Final Url - * @description Final resolved URL after redirects, if available - */ - final_url?: string | null; - /** - * Expected Total Bytes - * @description Expected total size of the download - */ - expected_total_bytes?: number | null; - /** - * Resume Required - * @description True if server refused resume; restart required - * @default false - */ - resume_required?: boolean; - /** - * Resume Message - * @description Message explaining why resume is required - */ - resume_message?: string | null; - /** - * Resume From Scratch - * @description True if resume metadata existed but the partial file was missing and the download restarted from the beginning - * @default false - */ - resume_from_scratch?: boolean; }; /** * DownloadJobStatus * @description State of a download job. * @enum {string} */ - DownloadJobStatus: "waiting" | "running" | "paused" | "completed" | "cancelled" | "error"; - /** - * DownloadPausedEvent - * @description Event model for download_paused - */ - DownloadPausedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Source - * @description The source of the download - */ - source: string; - }; + DownloadJobStatus: "waiting" | "running" | "completed" | "cancelled" | "error"; /** * DownloadProgressEvent * @description Event model for download_progress @@ -8798,130 +8485,6 @@ export type components = { */ type: "flux2_denoise"; }; - /** - * Apply LoRA Collection - Flux2 Klein - * @description Applies a collection of LoRAs to a FLUX.2 Klein transformer and/or Qwen3 text encoder. - */ - Flux2KleinLoRACollectionLoader: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * LoRAs - * @description LoRA models and weights. May be a single LoRA or collection. - * @default null - */ - loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; - /** - * Transformer - * @description Transformer - * @default null - */ - transformer?: components["schemas"]["TransformerField"] | null; - /** - * Qwen3 Encoder - * @description Qwen3 tokenizer and text encoder - * @default null - */ - qwen3_encoder?: components["schemas"]["Qwen3EncoderField"] | null; - /** - * type - * @default flux2_klein_lora_collection_loader - * @constant - */ - type: "flux2_klein_lora_collection_loader"; - }; - /** - * Apply LoRA - Flux2 Klein - * @description Apply a LoRA model to a FLUX.2 Klein transformer and/or Qwen3 text encoder. - */ - Flux2KleinLoRALoaderInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * LoRA - * @description LoRA model to load - * @default null - */ - lora?: components["schemas"]["ModelIdentifierField"] | null; - /** - * Weight - * @description The weight at which the LoRA is applied to each model - * @default 0.75 - */ - weight?: number; - /** - * Transformer - * @description Transformer - * @default null - */ - transformer?: components["schemas"]["TransformerField"] | null; - /** - * Qwen3 Encoder - * @description Qwen3 tokenizer and text encoder - * @default null - */ - qwen3_encoder?: components["schemas"]["Qwen3EncoderField"] | null; - /** - * type - * @default flux2_klein_lora_loader - * @constant - */ - type: "flux2_klein_lora_loader"; - }; - /** - * Flux2KleinLoRALoaderOutput - * @description FLUX.2 Klein LoRA Loader Output - */ - Flux2KleinLoRALoaderOutput: { - /** - * Transformer - * @description Transformer - * @default null - */ - transformer: components["schemas"]["TransformerField"] | null; - /** - * Qwen3 Encoder - * @description Qwen3 tokenizer and text encoder - * @default null - */ - qwen3_encoder: components["schemas"]["Qwen3EncoderField"] | null; - /** - * type - * @default flux2_klein_lora_loader_output - * @constant - */ - type: "flux2_klein_lora_loader_output"; - }; /** * Main Model - Flux2 Klein * @description Loads a Flux2 Klein model, outputting its submodels. @@ -10561,17 +10124,6 @@ export type components = { */ type: "freeu"; }; - /** - * GeneratePasswordResponse - * @description Response containing a generated password. - */ - GeneratePasswordResponse: { - /** - * Password - * @description Generated strong password - */ - password: string; - }; /** * Get Image Mask Bounding Box * @description Gets the bounding box of the given mask image. @@ -10657,7 +10209,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; }; /** * Edges @@ -10694,7 +10246,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; + [key: string]: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; }; /** * Errors @@ -11140,48 +10692,6 @@ export type components = { */ type: "ip_adapter_output"; }; - /** - * IPAdapterRecallParameter - * @description IP Adapter configuration for recall - */ - IPAdapterRecallParameter: { - /** - * Model Name - * @description The name of the IP Adapter model - */ - model_name: string; - /** - * Image Name - * @description The filename of the reference image in outputs/images - */ - image_name?: string | null; - /** - * Weight - * @description The weight for the IP Adapter - * @default 1 - */ - weight?: number; - /** - * Begin Step Percent - * @description When the IP Adapter is first applied (% of total steps) - */ - begin_step_percent?: number | null; - /** - * End Step Percent - * @description When the IP Adapter is last applied (% of total steps) - */ - end_step_percent?: number | null; - /** - * Method - * @description The IP Adapter method - */ - method?: ("full" | "style" | "composition") | null; - /** - * Image Influence - * @description FLUX Redux image influence (if model is flux_redux) - */ - image_influence?: ("lowest" | "low" | "medium" | "high" | "highest") | null; - }; /** IPAdapter_Checkpoint_FLUX_Config */ IPAdapter_Checkpoint_FLUX_Config: { /** @@ -13520,7 +13030,7 @@ export type components = { * @description State of an install job running in the background. * @enum {string} */ - InstallStatus: "waiting" | "downloading" | "downloads_done" | "running" | "paused" | "completed" | "error" | "cancelled"; + InstallStatus: "waiting" | "downloading" | "downloads_done" | "running" | "completed" | "error" | "cancelled"; /** * Integer Batch * @description Create a batched generation, where the workflow is executed once for each integer in the batch. @@ -13864,12 +13374,6 @@ export type components = { * @default null */ destination: string | null; - /** - * User Id - * @description The ID of the user who created the queue item - * @default system - */ - user_id: string; /** * Session Id * @description The ID of the session (aka graph execution state) @@ -13879,7 +13383,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -13889,7 +13393,7 @@ export type components = { * Result * @description The result of the invocation */ - result: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; + result: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; }; /** * InvocationErrorEvent @@ -13928,12 +13432,6 @@ export type components = { * @default null */ destination: string | null; - /** - * User Id - * @description The ID of the user who created the queue item - * @default system - */ - user_id: string; /** * Session Id * @description The ID of the session (aka graph execution state) @@ -13943,7 +13441,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -13968,6 +13466,10 @@ export type components = { InvocationOutputMap: { add: components["schemas"]["IntegerOutput"]; alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; + anima_denoise: components["schemas"]["LatentsOutput"]; + anima_l2i: components["schemas"]["ImageOutput"]; + anima_model_loader: components["schemas"]["AnimaModelLoaderOutput"]; + anima_text_encoder: components["schemas"]["AnimaConditioningOutput"]; apply_mask_to_image: components["schemas"]["ImageOutput"]; apply_tensor_mask_to_image: components["schemas"]["ImageOutput"]; blank_image: components["schemas"]["ImageOutput"]; @@ -14020,8 +13522,6 @@ export type components = { float_range: components["schemas"]["FloatCollectionOutput"]; float_to_int: components["schemas"]["IntegerOutput"]; flux2_denoise: components["schemas"]["LatentsOutput"]; - flux2_klein_lora_collection_loader: components["schemas"]["Flux2KleinLoRALoaderOutput"]; - flux2_klein_lora_loader: components["schemas"]["Flux2KleinLoRALoaderOutput"]; flux2_klein_model_loader: components["schemas"]["Flux2KleinModelLoaderOutput"]; flux2_klein_text_encoder: components["schemas"]["FluxConditioningOutput"]; flux2_vae_decode: components["schemas"]["ImageOutput"]; @@ -14234,12 +13734,6 @@ export type components = { * @default null */ destination: string | null; - /** - * User Id - * @description The ID of the user who created the queue item - * @default system - */ - user_id: string; /** * Session Id * @description The ID of the session (aka graph execution state) @@ -14249,7 +13743,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -14309,12 +13803,6 @@ export type components = { * @default null */ destination: string | null; - /** - * User Id - * @description The ID of the user who created the queue item - * @default system - */ - user_id: string; /** * Session Id * @description The ID of the session (aka graph execution state) @@ -14324,7 +13812,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -14395,8 +13883,6 @@ export type components = { * scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes. * unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production. * allow_unknown_models: Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation. - * multiuser: Enable multiuser support. When disabled, the application runs in single-user mode using a default system account with administrator privileges. When enabled, requires user authentication and authorization. - * strict_password_checking: Enforce strict password requirements. When True, passwords must contain uppercase, lowercase, and numbers. When False (default), any password is accepted but its strength (weak/moderate/strong) is reported to the user. */ InvokeAIAppConfig: { /** @@ -14483,14 +13969,14 @@ export type components = { * Convert Cache Dir * Format: path * @description Path to the converted models cache directory (DEPRECATED, but do not delete because it is needed for migration from previous versions). - * @default models/.convert_cache + * @default models\.convert_cache */ convert_cache_dir?: string; /** * Download Cache Dir * Format: path * @description Path to the directory that contains dynamically downloaded models. - * @default models/.download_cache + * @default models\.download_cache */ download_cache_dir?: string; /** @@ -14764,18 +14250,6 @@ export type components = { * @default true */ allow_unknown_models?: boolean; - /** - * Multiuser - * @description Enable multiuser support. When disabled, the application runs in single-user mode using a default system account with administrator privileges. When enabled, requires user authentication and authorization. - * @default false - */ - multiuser?: boolean; - /** - * Strict Password Checking - * @description Enforce strict password requirements. When True, passwords must contain uppercase, lowercase, and numbers. When False (default), any password is accepted but its strength (weak/moderate/strong) is reported to the user. - * @default false - */ - strict_password_checking?: boolean; }; /** * InvokeAIAppConfigWithSetFields @@ -16019,29 +15493,6 @@ export type components = { */ weight: number; }; - /** - * LoRARecallParameter - * @description LoRA configuration for recall - */ - LoRARecallParameter: { - /** - * Model Name - * @description The name of the LoRA model - */ - model_name: string; - /** - * Weight - * @description The weight for the LoRA - * @default 0.75 - */ - weight?: number; - /** - * Is Enabled - * @description Whether the LoRA is enabled - * @default true - */ - is_enabled?: boolean; - }; /** * Select LoRA * @description Selects a LoRA model and weight. @@ -16175,11 +15626,8 @@ export type components = { */ base: "flux"; }; - /** - * LoRA_Diffusers_Flux2_Config - * @description Model config for FLUX.2 (Klein) LoRA models in Diffusers format. - */ - LoRA_Diffusers_Flux2_Config: { + /** LoRA_Diffusers_SD1_Config */ + LoRA_Diffusers_SD1_Config: { /** * Key * @description A unique key for this model. @@ -16248,14 +15696,13 @@ export type components = { format: "diffusers"; /** * Base - * @default flux2 + * @default sd-1 * @constant */ - base: "flux2"; - variant: components["schemas"]["Flux2VariantType"] | null; + base: "sd-1"; }; - /** LoRA_Diffusers_SD1_Config */ - LoRA_Diffusers_SD1_Config: { + /** LoRA_Diffusers_SD2_Config */ + LoRA_Diffusers_SD2_Config: { /** * Key * @description A unique key for this model. @@ -16324,13 +15771,13 @@ export type components = { format: "diffusers"; /** * Base - * @default sd-1 + * @default sd-2 * @constant */ - base: "sd-1"; + base: "sd-2"; }; - /** LoRA_Diffusers_SD2_Config */ - LoRA_Diffusers_SD2_Config: { + /** LoRA_Diffusers_SDXL_Config */ + LoRA_Diffusers_SDXL_Config: { /** * Key * @description A unique key for this model. @@ -16399,13 +15846,16 @@ export type components = { format: "diffusers"; /** * Base - * @default sd-2 + * @default sdxl * @constant */ - base: "sd-2"; + base: "sdxl"; }; - /** LoRA_Diffusers_SDXL_Config */ - LoRA_Diffusers_SDXL_Config: { + /** + * LoRA_Diffusers_ZImage_Config + * @description Model config for Z-Image LoRA models in Diffusers format. + */ + LoRA_Diffusers_ZImage_Config: { /** * Key * @description A unique key for this model. @@ -16474,89 +15924,10 @@ export type components = { format: "diffusers"; /** * Base - * @default sdxl - * @constant - */ - base: "sdxl"; - }; - /** - * LoRA_Diffusers_ZImage_Config - * @description Model config for Z-Image LoRA models in Diffusers format. - */ - LoRA_Diffusers_ZImage_Config: { - /** - * Key - * @description A unique key for this model. - */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; - /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. - */ - path: string; - /** - * File Size - * @description The size of the model in bytes. - */ - file_size: number; - /** - * Name - * @description Name of the model. - */ - name: string; - /** - * Description - * @description Model description - */ - description: string | null; - /** - * Source - * @description The original source of the model (path, URL or repo_id). - */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; - /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. - */ - source_api_response: string | null; - /** - * Cover Image - * @description Url for image to preview model - */ - cover_image: string | null; - /** - * Type - * @default lora - * @constant - */ - type: "lora"; - /** - * Trigger Phrases - * @description Set of trigger phrases for this model - */ - trigger_phrases: string[] | null; - /** @description Default settings for this model */ - default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; - /** - * Format - * @default diffusers - * @constant - */ - format: "diffusers"; - /** - * Base - * @default z-image + * @default z-image * @constant */ base: "z-image"; - variant: components["schemas"]["ZImageVariantType"] | null; }; /** LoRA_LyCORIS_FLUX_Config */ LoRA_LyCORIS_FLUX_Config: { @@ -16633,85 +16004,6 @@ export type components = { */ base: "flux"; }; - /** - * LoRA_LyCORIS_Flux2_Config - * @description Model config for FLUX.2 (Klein) LoRA models in LyCORIS format. - */ - LoRA_LyCORIS_Flux2_Config: { - /** - * Key - * @description A unique key for this model. - */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; - /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. - */ - path: string; - /** - * File Size - * @description The size of the model in bytes. - */ - file_size: number; - /** - * Name - * @description Name of the model. - */ - name: string; - /** - * Description - * @description Model description - */ - description: string | null; - /** - * Source - * @description The original source of the model (path, URL or repo_id). - */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; - /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. - */ - source_api_response: string | null; - /** - * Cover Image - * @description Url for image to preview model - */ - cover_image: string | null; - /** - * Type - * @default lora - * @constant - */ - type: "lora"; - /** - * Trigger Phrases - * @description Set of trigger phrases for this model - */ - trigger_phrases: string[] | null; - /** @description Default settings for this model */ - default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; - /** - * Format - * @default lycoris - * @constant - */ - format: "lycoris"; - /** - * Base - * @default flux2 - * @constant - */ - base: "flux2"; - variant: components["schemas"]["Flux2VariantType"] | null; - }; /** LoRA_LyCORIS_SD1_Config */ LoRA_LyCORIS_SD1_Config: { /** @@ -17014,7 +16306,6 @@ export type components = { * @constant */ base: "z-image"; - variant: components["schemas"]["ZImageVariantType"] | null; }; /** LoRA_OMI_FLUX_Config */ LoRA_OMI_FLUX_Config: { @@ -17189,57 +16480,6 @@ export type components = { * @enum {integer} */ LogLevel: 0 | 10 | 20 | 30 | 40 | 50; - /** - * LoginRequest - * @description Request body for user login. - */ - LoginRequest: { - /** - * Email - * @description User email address - */ - email: string; - /** - * Password - * @description User password - */ - password: string; - /** - * Remember Me - * @description Whether to extend session duration - * @default false - */ - remember_me?: boolean; - }; - /** - * LoginResponse - * @description Response from successful login. - */ - LoginResponse: { - /** - * Token - * @description JWT access token - */ - token: string; - /** @description User information */ - user: components["schemas"]["UserDTO"]; - /** - * Expires In - * @description Token expiration time in seconds - */ - expires_in: number; - }; - /** - * LogoutResponse - * @description Response from logout. - */ - LogoutResponse: { - /** - * Success - * @description Whether logout was successful - */ - success: boolean; - }; /** LoraModelDefaultSettings */ LoraModelDefaultSettings: { /** @@ -17517,6 +16757,92 @@ export type components = { format: "bnb_quantized_nf4b"; variant: components["schemas"]["FluxVariantType"]; }; + /** + * Main_Checkpoint_Anima_Config + * @description Model config for Anima single-file checkpoint models (safetensors). + * + * Anima is built on NVIDIA Cosmos Predict2 DiT with a custom LLM Adapter + * that bridges Qwen3 0.6B text encoder outputs to the DiT. + */ + Main_Checkpoint_Anima_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Type + * @default main + * @constant + */ + type: "main"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; + /** + * Config Path + * @description Path to the config for this model, if any. + */ + config_path: string | null; + /** + * Base + * @default anima + * @constant + */ + base: "anima"; + /** + * Format + * @default checkpoint + * @constant + */ + format: "checkpoint"; + }; /** * Main_Checkpoint_FLUX_Config * @description Model config for main checkpoint models. @@ -18095,7 +17421,6 @@ export type components = { * @constant */ format: "checkpoint"; - variant: components["schemas"]["ZImageVariantType"]; }; /** Main_Diffusers_CogView4_Config */ Main_Diffusers_CogView4_Config: { @@ -18738,7 +18063,7 @@ export type components = { }; /** * Main_Diffusers_ZImage_Config - * @description Model config for Z-Image diffusers models (Z-Image-Turbo, Z-Image-Base). + * @description Model config for Z-Image diffusers models (Z-Image-Turbo, Z-Image-Base, Z-Image-Edit). */ Main_Diffusers_ZImage_Config: { /** @@ -18815,7 +18140,6 @@ export type components = { * @constant */ base: "z-image"; - variant: components["schemas"]["ZImageVariantType"]; }; /** * Main_GGUF_FLUX_Config @@ -19067,7 +18391,6 @@ export type components = { * @constant */ format: "gguf_quantized"; - variant: components["schemas"]["ZImageVariantType"]; }; /** * Combine Masks @@ -20897,7 +20220,7 @@ export type components = { * Config * @description The installed model's config */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; /** * ModelInstallDownloadProgressEvent @@ -21063,7 +20386,7 @@ export type components = { * Config Out * @description After successful installation, this will hold the configuration object. */ - config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]) | null; + config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]) | null; /** * Inplace * @description Leave model in its current location; otherwise install under models directory @@ -21149,7 +20472,7 @@ export type components = { * Config * @description The model's config */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** * @description The submodel type, if any * @default null @@ -21170,7 +20493,7 @@ export type components = { * Config * @description The model's config */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** * @description The submodel type, if any * @default null @@ -21279,7 +20602,7 @@ export type components = { * Variant * @description The variant of the model. */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null; + variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["Qwen3VariantType"] | null; /** @description The prediction type of the model. */ prediction_type?: components["schemas"]["SchedulerPredictionType"] | null; /** @@ -21344,7 +20667,7 @@ export type components = { */ ModelsList: { /** Models */ - models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"])[]; + models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"])[]; }; /** * Multiply Integers @@ -22122,12 +21445,6 @@ export type components = { * @default null */ destination: string | null; - /** - * User Id - * @description The ID of the user who created the queue item - * @default system - */ - user_id: string; /** * Status * @description The new status of the queue item @@ -22472,7 +21789,7 @@ export type components = { * @description Qwen3 text encoder variants based on model size. * @enum {string} */ - Qwen3VariantType: "qwen3_4b" | "qwen3_8b"; + Qwen3VariantType: "qwen3_4b" | "qwen3_8b" | "qwen3_06b"; /** * Random Float * @description Outputs a single random float @@ -22708,160 +22025,6 @@ export type components = { */ type: "range_of_size"; }; - /** - * RecallParameter - * @description Request model for updating recallable parameters. - */ - RecallParameter: { - /** - * Positive Prompt - * @description Positive prompt text - */ - positive_prompt?: string | null; - /** - * Negative Prompt - * @description Negative prompt text - */ - negative_prompt?: string | null; - /** - * Model - * @description Main model name/identifier - */ - model?: string | null; - /** - * Refiner Model - * @description Refiner model name/identifier - */ - refiner_model?: string | null; - /** - * Vae Model - * @description VAE model name/identifier - */ - vae_model?: string | null; - /** - * Scheduler - * @description Scheduler name - */ - scheduler?: string | null; - /** - * Steps - * @description Number of generation steps - */ - steps?: number | null; - /** - * Refiner Steps - * @description Number of refiner steps - */ - refiner_steps?: number | null; - /** - * Cfg Scale - * @description CFG scale for guidance - */ - cfg_scale?: number | null; - /** - * Cfg Rescale Multiplier - * @description CFG rescale multiplier - */ - cfg_rescale_multiplier?: number | null; - /** - * Refiner Cfg Scale - * @description Refiner CFG scale - */ - refiner_cfg_scale?: number | null; - /** - * Guidance - * @description Guidance scale - */ - guidance?: number | null; - /** - * Width - * @description Image width in pixels - */ - width?: number | null; - /** - * Height - * @description Image height in pixels - */ - height?: number | null; - /** - * Seed - * @description Random seed - */ - seed?: number | null; - /** - * Denoise Strength - * @description Denoising strength - */ - denoise_strength?: number | null; - /** - * Refiner Denoise Start - * @description Refiner denoising start - */ - refiner_denoise_start?: number | null; - /** - * Clip Skip - * @description CLIP skip layers - */ - clip_skip?: number | null; - /** - * Seamless X - * @description Enable seamless X tiling - */ - seamless_x?: boolean | null; - /** - * Seamless Y - * @description Enable seamless Y tiling - */ - seamless_y?: boolean | null; - /** - * Refiner Positive Aesthetic Score - * @description Refiner positive aesthetic score - */ - refiner_positive_aesthetic_score?: number | null; - /** - * Refiner Negative Aesthetic Score - * @description Refiner negative aesthetic score - */ - refiner_negative_aesthetic_score?: number | null; - /** - * Loras - * @description List of LoRAs with their weights - */ - loras?: components["schemas"]["LoRARecallParameter"][] | null; - /** - * Control Layers - * @description List of control adapters (ControlNet, T2I Adapter, Control LoRA) with their settings - */ - control_layers?: components["schemas"]["ControlNetRecallParameter"][] | null; - /** - * Ip Adapters - * @description List of IP Adapters with their settings - */ - ip_adapters?: components["schemas"]["IPAdapterRecallParameter"][] | null; - }; - /** - * RecallParametersUpdatedEvent - * @description Event model for recall_parameters_updated - */ - RecallParametersUpdatedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Parameters - * @description The recall parameters that were updated - */ - parameters: { - [key: string]: unknown; - }; - }; /** * Create Rectangle Mask * @description Create a rectangular mask. @@ -24371,22 +23534,6 @@ export type components = { * @description The id of the queue with which this item is associated */ queue_id: string; - /** - * User Id - * @description The id of the user who created this queue item - * @default system - */ - user_id?: string; - /** - * User Display Name - * @description The display name of the user who created this queue item, if available - */ - user_display_name?: string | null; - /** - * User Email - * @description The email of the user who created this queue item, if available - */ - user_email?: string | null; /** * Field Values * @description The field values that were used for this queue item @@ -24454,71 +23601,6 @@ export type components = { * @description Total number of queue items */ total: number; - /** - * User Pending - * @description Number of queue items with status 'pending' for the current user - */ - user_pending?: number | null; - /** - * User In Progress - * @description Number of queue items with status 'in_progress' for the current user - */ - user_in_progress?: number | null; - }; - /** - * SetupRequest - * @description Request body for initial admin setup. - */ - SetupRequest: { - /** - * Email - * @description Admin email address - */ - email: string; - /** - * Display Name - * @description Admin display name - */ - display_name?: string | null; - /** - * Password - * @description Admin password - */ - password: string; - }; - /** - * SetupResponse - * @description Response from successful admin setup. - */ - SetupResponse: { - /** - * Success - * @description Whether setup was successful - */ - success: boolean; - /** @description Created admin user information */ - user: components["schemas"]["UserDTO"]; - }; - /** - * SetupStatusResponse - * @description Response for setup status check. - */ - SetupStatusResponse: { - /** - * Setup Required - * @description Whether initial setup is required - */ - setup_required: boolean; - /** - * Multiuser Enabled - * @description Whether multiuser mode is enabled - */ - multiuser_enabled: boolean; - /** - * Strict Password Checking - * @description Whether strict password requirements are enforced - */ - strict_password_checking: boolean; }; /** * Show Image @@ -25407,7 +24489,7 @@ export type components = { path_or_prefix: string; model_type: components["schemas"]["ModelType"]; /** Variant */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null; + variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["Qwen3VariantType"] | null; }; /** * Subtract Integers @@ -26857,77 +25939,6 @@ export type components = { */ unstarred_images: string[]; }; - /** - * UserDTO - * @description User data transfer object. - */ - UserDTO: { - /** - * User Id - * @description Unique user identifier - */ - user_id: string; - /** - * Email - * @description User email address - */ - email: string; - /** - * Display Name - * @description Display name - */ - display_name?: string | null; - /** - * Is Admin - * @description Whether user has admin privileges - * @default false - */ - is_admin?: boolean; - /** - * Is Active - * @description Whether user account is active - * @default true - */ - is_active?: boolean; - /** - * Created At - * Format: date-time - * @description When the user was created - */ - created_at: string; - /** - * Updated At - * Format: date-time - * @description When the user was last updated - */ - updated_at: string; - /** - * Last Login At - * @description When user last logged in - */ - last_login_at?: string | null; - }; - /** - * UserProfileUpdateRequest - * @description Request body for a user to update their own profile. - */ - UserProfileUpdateRequest: { - /** - * Display Name - * @description New display name - */ - display_name?: string | null; - /** - * Current Password - * @description Current password (required when changing password) - */ - current_password?: string | null; - /** - * New Password - * @description New password - */ - new_password?: string | null; - }; /** VAEField */ VAEField: { /** @description Info to load vae submodel */ @@ -26990,6 +26001,82 @@ export type components = { */ type: "vae_output"; }; + /** + * VAE_Checkpoint_Anima_Config + * @description Model config for Anima QwenImage VAE checkpoint models (AutoencoderKLQwenImage). + */ + VAE_Checkpoint_Anima_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Config Path + * @description Path to the config for this model, if any. + */ + config_path: string | null; + /** + * Type + * @default vae + * @constant + */ + type: "vae"; + /** + * Format + * @default checkpoint + * @constant + */ + format: "checkpoint"; + /** + * Base + * @default anima + * @constant + */ + base: "anima"; + }; /** VAE_Checkpoint_FLUX_Config */ VAE_Checkpoint_FLUX_Config: { /** @@ -28112,7 +27199,7 @@ export type components = { vae?: components["schemas"]["VAEField"] | null; /** * Scheduler - * @description Scheduler (sampler) for the denoising process. Euler is the default and recommended. Heun is 2nd-order (better quality, 2x slower). LCM works with Turbo only (not Base). + * @description Scheduler (sampler) for the denoising process. Euler is the default and recommended for Z-Image-Turbo. Heun is 2nd-order (better quality, 2x slower). LCM is optimized for few steps. * @default euler * @enum {string} */ @@ -28239,7 +27326,7 @@ export type components = { vae?: components["schemas"]["VAEField"] | null; /** * Scheduler - * @description Scheduler (sampler) for the denoising process. Euler is the default and recommended. Heun is 2nd-order (better quality, 2x slower). LCM works with Turbo only (not Base). + * @description Scheduler (sampler) for the denoising process. Euler is the default and recommended for Z-Image-Turbo. Heun is 2nd-order (better quality, 2x slower). LCM is optimized for few steps. * @default euler * @enum {string} */ @@ -28666,351 +27753,15 @@ export type components = { */ type: "z_image_text_encoder"; }; - /** - * ZImageVariantType - * @description Z-Image model variants. - * @enum {string} - */ - ZImageVariantType: "turbo" | "zbase"; }; responses: never; - parameters: never; - requestBodies: never; - headers: never; - pathItems: never; -}; -export type $defs = Record; -export interface operations { - get_setup_status_api_v1_auth_status_get: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["SetupStatusResponse"]; - }; - }; - }; - }; - login_api_v1_auth_login_post: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["LoginRequest"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["LoginResponse"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - logout_api_v1_auth_logout_post: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["LogoutResponse"]; - }; - }; - }; - }; - get_current_user_info_api_v1_auth_me_get: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["UserDTO"]; - }; - }; - }; - }; - update_current_user_api_v1_auth_me_patch: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["UserProfileUpdateRequest"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["UserDTO"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - setup_admin_api_v1_auth_setup_post: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["SetupRequest"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["SetupResponse"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - generate_password_api_v1_auth_generate_password_get: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["GeneratePasswordResponse"]; - }; - }; - }; - }; - list_users_api_v1_auth_users_get: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["UserDTO"][]; - }; - }; - }; - }; - create_user_api_v1_auth_users_post: { - parameters: { - query?: never; - header?: never; - path?: never; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["AdminUserCreateRequest"]; - }; - }; - responses: { - /** @description Successful Response */ - 201: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["UserDTO"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - get_user_api_v1_auth_users__user_id__get: { - parameters: { - query?: never; - header?: never; - path: { - /** @description User ID */ - user_id: string; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["UserDTO"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - delete_user_api_v1_auth_users__user_id__delete: { - parameters: { - query?: never; - header?: never; - path: { - /** @description User ID */ - user_id: string; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 204: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - update_user_api_v1_auth_users__user_id__patch: { - parameters: { - query?: never; - header?: never; - path: { - /** @description User ID */ - user_id: string; - }; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["AdminUserUpdateRequest"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["UserDTO"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; + parameters: never; + requestBodies: never; + headers: never; + pathItems: never; +}; +export type $defs = Record; +export interface operations { parse_dynamicprompts: { parameters: { query?: never; @@ -29124,39 +27875,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - get_model_records_by_hash: { - parameters: { - query: { - /** @description The hash of the model */ - hash: string; - }; - header?: never; - path?: never; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Validation Error */ @@ -29206,7 +27925,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -29311,7 +28030,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -29382,7 +28101,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -29886,166 +28605,6 @@ export interface operations { }; }; }; - pause_model_install_job: { - parameters: { - query?: never; - header?: never; - path: { - /** @description Model install job ID */ - id: number; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description The job was paused successfully */ - 201: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["ModelInstallJob"]; - }; - }; - /** @description No such job */ - 415: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - resume_model_install_job: { - parameters: { - query?: never; - header?: never; - path: { - /** @description Model install job ID */ - id: number; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description The job was resumed successfully */ - 201: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["ModelInstallJob"]; - }; - }; - /** @description No such job */ - 415: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - restart_failed_model_install_job: { - parameters: { - query?: never; - header?: never; - path: { - /** @description Model install job ID */ - id: number; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Failed files restarted successfully */ - 201: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["ModelInstallJob"]; - }; - }; - /** @description No such job */ - 415: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - restart_model_install_file: { - parameters: { - query?: never; - header?: never; - path: { - /** @description Model install job ID */ - id: number; - }; - cookie?: never; - }; - requestBody: { - content: { - "application/json": string; - }; - }; - responses: { - /** @description File restarted successfully */ - 201: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["ModelInstallJob"]; - }; - }; - /** @description No such job */ - 415: { - headers: { - [name: string]: unknown; - }; - content?: never; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; convert_model: { parameters: { query?: never; @@ -30082,7 +28641,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -33445,7 +32004,7 @@ export interface operations { }; header?: never; path: { - /** @description The queue id (ignored, kept for backwards compatibility) */ + /** @description The queue id to perform this operation on */ queue_id: string; }; cookie?: never; @@ -33480,7 +32039,7 @@ export interface operations { }; header?: never; path: { - /** @description The queue id (ignored, kept for backwards compatibility) */ + /** @description The queue id to perform this operation on */ queue_id: string; }; cookie?: never; @@ -33516,7 +32075,7 @@ export interface operations { query?: never; header?: never; path: { - /** @description The queue id (ignored, kept for backwards compatibility) */ + /** @description The queue id to perform this operation on */ queue_id: string; }; cookie?: never; @@ -33550,76 +32109,4 @@ export interface operations { }; }; }; - get_recall_parameters: { - parameters: { - query?: never; - header?: never; - path: { - /** @description The queue id to retrieve parameters for */ - queue_id: string; - }; - cookie?: never; - }; - requestBody?: never; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": { - [key: string]: unknown; - }; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; - update_recall_parameters: { - parameters: { - query?: never; - header?: never; - path: { - /** @description The queue id to perform this operation on */ - queue_id: string; - }; - cookie?: never; - }; - requestBody: { - content: { - "application/json": components["schemas"]["RecallParameter"]; - }; - }; - responses: { - /** @description Successful Response */ - 200: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": { - [key: string]: unknown; - }; - }; - }; - /** @description Validation Error */ - 422: { - headers: { - [name: string]: unknown; - }; - content: { - "application/json": components["schemas"]["HTTPValidationError"]; - }; - }; - }; - }; } From 0f840ab56067d561a959c05490bf254d9932a24e Mon Sep 17 00:00:00 2001 From: Your Name Date: Wed, 11 Mar 2026 22:25:44 -0400 Subject: [PATCH 02/14] schema --- .../frontend/web/src/services/api/schema.ts | 2421 +++++++++++++++-- 1 file changed, 2194 insertions(+), 227 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 83e8faa23ff..469a5ab334d 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -1,4 +1,290 @@ export type paths = { + "/api/v1/auth/status": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Setup Status + * @description Check if initial administrator setup is required. + * + * Returns: + * SetupStatusResponse indicating whether setup is needed and multiuser mode status + */ + get: operations["get_setup_status_api_v1_auth_status_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v1/auth/login": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Login + * @description Authenticate user and return access token. + * + * Args: + * request: Login credentials (email and password) + * + * Returns: + * LoginResponse containing JWT token and user information + * + * Raises: + * HTTPException: 401 if credentials are invalid or user is inactive + * HTTPException: 403 if multiuser mode is disabled + */ + post: operations["login_api_v1_auth_login_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v1/auth/logout": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Logout + * @description Logout current user. + * + * Currently a no-op since we use stateless JWT tokens. For token invalidation in + * future implementations, consider: + * - Token blacklist: Store invalidated tokens in Redis/database with expiration + * - Token versioning: Add version field to user record, increment on logout + * - Short-lived tokens: Use refresh token pattern with token rotation + * - Session storage: Track active sessions server-side for revocation + * + * Args: + * current_user: The authenticated user (validates token) + * + * Returns: + * LogoutResponse indicating success + */ + post: operations["logout_api_v1_auth_logout_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v1/auth/me": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Current User Info + * @description Get current authenticated user's information. + * + * Args: + * current_user: The authenticated user's token data + * + * Returns: + * UserDTO containing user information + * + * Raises: + * HTTPException: 404 if user is not found (should not happen normally) + */ + get: operations["get_current_user_info_api_v1_auth_me_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + /** + * Update Current User + * @description Update the current user's own profile. + * + * To change the password, both ``current_password`` and ``new_password`` must + * be provided. The current password is verified before the change is applied. + * + * Args: + * request: Profile fields to update + * current_user: The authenticated user + * + * Returns: + * The updated user + * + * Raises: + * HTTPException: 400 if current password is incorrect or new password is weak + * HTTPException: 404 if user not found + */ + patch: operations["update_current_user_api_v1_auth_me_patch"]; + trace?: never; + }; + "/api/v1/auth/setup": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Setup Admin + * @description Set up initial administrator account. + * + * This endpoint can only be called once, when no admin user exists. It creates + * the first admin user for the system. + * + * Args: + * request: Admin account details (email, display_name, password) + * + * Returns: + * SetupResponse containing the created admin user + * + * Raises: + * HTTPException: 400 if admin already exists or password is weak + * HTTPException: 403 if multiuser mode is disabled + */ + post: operations["setup_admin_api_v1_auth_setup_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v1/auth/generate-password": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Generate Password + * @description Generate a strong random password. + * + * Returns a cryptographically secure random password of 16 characters + * containing uppercase, lowercase, digits, and punctuation. + */ + get: operations["generate_password_api_v1_auth_generate_password_get"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v1/auth/users": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * List Users + * @description List all users. Requires admin privileges. + * + * The internal 'system' user (created for backward compatibility) is excluded + * from the results since it cannot be managed through this interface. + * + * Returns: + * List of all real users (system user excluded) + */ + get: operations["list_users_api_v1_auth_users_get"]; + put?: never; + /** + * Create User + * @description Create a new user. Requires admin privileges. + * + * Args: + * request: New user details + * + * Returns: + * The created user + * + * Raises: + * HTTPException: 400 if email already exists or password is weak + */ + post: operations["create_user_api_v1_auth_users_post"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v1/auth/users/{user_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get User + * @description Get a user by ID. Requires admin privileges. + * + * Args: + * user_id: The user ID + * + * Returns: + * The user + * + * Raises: + * HTTPException: 404 if user not found + */ + get: operations["get_user_api_v1_auth_users__user_id__get"]; + put?: never; + post?: never; + /** + * Delete User + * @description Delete a user. Requires admin privileges. + * + * Admins can delete any user including other admins, but cannot delete the last + * remaining admin. + * + * Args: + * user_id: The user ID + * + * Raises: + * HTTPException: 400 if attempting to delete the last admin + * HTTPException: 404 if user not found + */ + delete: operations["delete_user_api_v1_auth_users__user_id__delete"]; + options?: never; + head?: never; + /** + * Update User + * @description Update a user. Requires admin privileges. + * + * Args: + * user_id: The user ID + * request: Fields to update + * + * Returns: + * The updated user + * + * Raises: + * HTTPException: 400 if password is weak + * HTTPException: 404 if user not found + */ + patch: operations["update_user_api_v1_auth_users__user_id__patch"]; + trace?: never; + }; "/api/v1/utilities/dynamicprompts": { parameters: { query?: never; @@ -83,6 +369,27 @@ export type paths = { patch?: never; trace?: never; }; + "/api/v2/models/get_by_hash": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Model Records By Hash + * @description Gets a model by its hash. This is useful for recalling models that were deleted and reinstalled, + * as the hash remains stable across reinstallations while the key (UUID) changes. + */ + get: operations["get_model_records_by_hash"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/v2/models/i/{key}": { parameters: { query?: never; @@ -231,6 +538,7 @@ export type paths = { * * "waiting" -- Job is waiting in the queue to run * * "downloading" -- Model file(s) are downloading * * "running" -- Model has downloaded and the model probing and registration process is running + * * "paused" -- Job is paused and can be resumed * * "completed" -- Installation completed successfully * * "error" -- An error occurred. Details will be in the "error_type" and "error" fields. * * "cancelled" -- Job was cancelled before completion. @@ -328,7 +636,7 @@ export type paths = { patch?: never; trace?: never; }; - "/api/v2/models/convert/{key}": { + "/api/v2/models/install/{id}/pause": { parameters: { query?: never; header?: never; @@ -336,58 +644,59 @@ export type paths = { cookie?: never; }; get?: never; + put?: never; /** - * Convert Model - * @description Permanently convert a model into diffusers format, replacing the safetensors version. - * Note that during the conversion process the key and model hash will change. - * The return value is the model configuration for the converted model. + * Pause Model Install Job + * @description Pause the model install job corresponding to the given job ID. */ - put: operations["convert_model"]; - post?: never; + post: operations["pause_model_install_job"]; delete?: never; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/starter_models": { + "/api/v2/models/install/{id}/resume": { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - /** Get Starter Models */ - get: operations["get_starter_models"]; + get?: never; put?: never; - post?: never; + /** + * Resume Model Install Job + * @description Resume a paused model install job corresponding to the given job ID. + */ + post: operations["resume_model_install_job"]; delete?: never; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/stats": { + "/api/v2/models/install/{id}/restart_failed": { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; + get?: never; + put?: never; /** - * Get model manager RAM cache performance statistics. - * @description Return performance statistics on the model manager's RAM cache. Will return null if no models have been loaded. + * Restart Failed Model Install Job + * @description Restart failed or non-resumable file downloads for the given job. */ - get: operations["get_stats"]; - put?: never; - post?: never; + post: operations["restart_failed_model_install_job"]; delete?: never; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/empty_model_cache": { + "/api/v2/models/install/{id}/restart_file": { parameters: { query?: never; header?: never; @@ -397,50 +706,129 @@ export type paths = { get?: never; put?: never; /** - * Empty Model Cache - * @description Drop all models from the model cache to free RAM/VRAM. 'Locked' models that are in active use will not be dropped. + * Restart Model Install File + * @description Restart a specific file download for the given job. */ - post: operations["empty_model_cache"]; + post: operations["restart_model_install_file"]; delete?: never; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/hf_login": { + "/api/v2/models/convert/{key}": { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - /** Get Hf Login Status */ - get: operations["get_hf_login_status"]; - put?: never; - /** Do Hf Login */ - post: operations["do_hf_login"]; - /** Reset Hf Token */ - delete: operations["reset_hf_token"]; + get?: never; + /** + * Convert Model + * @description Permanently convert a model into diffusers format, replacing the safetensors version. + * Note that during the conversion process the key and model hash will change. + * The return value is the model configuration for the converted model. + */ + put: operations["convert_model"]; + post?: never; + delete?: never; options?: never; head?: never; patch?: never; trace?: never; }; - "/api/v2/models/sync/orphaned": { + "/api/v2/models/starter_models": { parameters: { query?: never; header?: never; path?: never; cookie?: never; }; - /** - * Get Orphaned Models - * @description Find orphaned model directories. - * - * Orphaned models are directories in the models folder that contain model files - * but are not referenced in the database. This can happen when models are deleted - * from the database but the files remain on disk. - * + /** Get Starter Models */ + get: operations["get_starter_models"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v2/models/stats": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get model manager RAM cache performance statistics. + * @description Return performance statistics on the model manager's RAM cache. Will return null if no models have been loaded. + */ + get: operations["get_stats"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v2/models/empty_model_cache": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Empty Model Cache + * @description Drop all models from the model cache to free RAM/VRAM. 'Locked' models that are in active use will not be dropped. + */ + post: operations["empty_model_cache"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v2/models/hf_login": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Get Hf Login Status */ + get: operations["get_hf_login_status"]; + put?: never; + /** Do Hf Login */ + post: operations["do_hf_login"]; + /** Reset Hf Token */ + delete: operations["reset_hf_token"]; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v2/models/sync/orphaned": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Orphaned Models + * @description Find orphaned model directories. + * + * Orphaned models are directories in the models folder that contain model files + * but are not referenced in the database. This can happen when models are deleted + * from the database but the files remain on disk. + * * Returns: * List of orphaned model directory information */ @@ -562,7 +950,7 @@ export type paths = { put?: never; /** * Upload Image - * @description Uploads an image + * @description Uploads an image for the current user */ post: operations["upload_image"]; delete?: never; @@ -580,7 +968,7 @@ export type paths = { }; /** * List Image Dtos - * @description Gets a list of image DTOs + * @description Gets a list of image DTOs for the current user */ get: operations["list_image_dtos"]; put?: never; @@ -905,13 +1293,13 @@ export type paths = { }; /** * List Boards - * @description Gets a list of boards + * @description Gets a list of boards for the current user, including shared boards. Admin users see all boards. */ get: operations["list_boards"]; put?: never; /** * Create Board - * @description Creates a board + * @description Creates a board for the current user */ post: operations["create_board"]; delete?: never; @@ -929,21 +1317,21 @@ export type paths = { }; /** * Get Board - * @description Gets a board + * @description Gets a board (user must have access to it) */ get: operations["get_board"]; put?: never; post?: never; /** * Delete Board - * @description Deletes a board + * @description Deletes a board (user must have access to it) */ delete: operations["delete_board"]; options?: never; head?: never; /** * Update Board - * @description Updates a board + * @description Updates a board (user must have access to it) */ patch: operations["update_board"]; trace?: never; @@ -1279,7 +1667,7 @@ export type paths = { put?: never; /** * Enqueue Batch - * @description Processes a batch and enqueues the output graphs for execution. + * @description Processes a batch and enqueues the output graphs for execution for the current user. */ post: operations["enqueue_batch"]; delete?: never; @@ -1358,7 +1746,7 @@ export type paths = { get?: never; /** * Resume - * @description Resumes session processor + * @description Resumes session processor. Admin only. */ put: operations["resume"]; post?: never; @@ -1378,7 +1766,7 @@ export type paths = { get?: never; /** * Pause - * @description Pauses session processor + * @description Pauses session processor. Admin only. */ put: operations["pause"]; post?: never; @@ -1398,7 +1786,7 @@ export type paths = { get?: never; /** * Cancel All Except Current - * @description Immediately cancels all queue items except in-processing items + * @description Immediately cancels all queue items except in-processing items. Non-admin users can only cancel their own items. */ put: operations["cancel_all_except_current"]; post?: never; @@ -1418,7 +1806,7 @@ export type paths = { get?: never; /** * Delete All Except Current - * @description Immediately deletes all queue items except in-processing items + * @description Immediately deletes all queue items except in-processing items. Non-admin users can only delete their own items. */ put: operations["delete_all_except_current"]; post?: never; @@ -1438,7 +1826,7 @@ export type paths = { get?: never; /** * Cancel By Batch Ids - * @description Immediately cancels all queue items from the given batch ids + * @description Immediately cancels all queue items from the given batch ids. Non-admin users can only cancel their own items. */ put: operations["cancel_by_batch_ids"]; post?: never; @@ -1458,7 +1846,7 @@ export type paths = { get?: never; /** * Cancel By Destination - * @description Immediately cancels all queue items with the given origin + * @description Immediately cancels all queue items with the given destination. Non-admin users can only cancel their own items. */ put: operations["cancel_by_destination"]; post?: never; @@ -1478,7 +1866,7 @@ export type paths = { get?: never; /** * Retry Items By Id - * @description Immediately cancels all queue items with the given origin + * @description Retries the given queue items. Users can only retry their own items unless they are an admin. */ put: operations["retry_items_by_id"]; post?: never; @@ -1498,7 +1886,7 @@ export type paths = { get?: never; /** * Clear - * @description Clears the queue entirely, immediately canceling the currently-executing session + * @description Clears the queue entirely. Admin users clear all items; non-admin users only clear their own items. If there's a currently-executing item, users can only cancel it if they own it or are an admin. */ put: operations["clear"]; post?: never; @@ -1518,7 +1906,7 @@ export type paths = { get?: never; /** * Prune - * @description Prunes all completed or errored queue items + * @description Prunes all completed or errored queue items. Non-admin users can only prune their own items. */ put: operations["prune"]; post?: never; @@ -1624,7 +2012,7 @@ export type paths = { post?: never; /** * Delete Queue Item - * @description Deletes a queue item + * @description Deletes a queue item. Users can only delete their own items unless they are an admin. */ delete: operations["delete_queue_item"]; options?: never; @@ -1642,7 +2030,7 @@ export type paths = { get?: never; /** * Cancel Queue Item - * @description Deletes a queue item + * @description Cancels a queue item. Users can only cancel their own items unless they are an admin. */ put: operations["cancel_queue_item"]; post?: never; @@ -1684,7 +2072,7 @@ export type paths = { post?: never; /** * Delete By Destination - * @description Deletes all items with the given destination + * @description Deletes all items with the given destination. Non-admin users can only delete their own items. */ delete: operations["delete_by_destination"]; options?: never; @@ -1967,7 +2355,7 @@ export type paths = { }; /** * Get Client State By Key - * @description Gets the client state + * @description Gets the client state for the current user (or system user if not authenticated) */ get: operations["get_client_state_by_key"]; put?: never; @@ -1989,7 +2377,7 @@ export type paths = { put?: never; /** * Set Client State - * @description Sets the client state + * @description Sets the client state for the current user (or system user if not authenticated) */ post: operations["set_client_state"]; delete?: never; @@ -2009,7 +2397,7 @@ export type paths = { put?: never; /** * Delete Client State - * @description Deletes the client state + * @description Deletes the client state for the current user (or system user if not authenticated) */ post: operations["delete_client_state"]; delete?: never; @@ -2018,6 +2406,61 @@ export type paths = { patch?: never; trace?: never; }; + "/api/v1/recall/{queue_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Get Recall Parameters + * @description Retrieve all stored recall parameters for a given queue. + * + * Returns a dictionary of all recall parameters that have been set for the queue. + * + * Args: + * queue_id: The queue ID to retrieve parameters for + * + * Returns: + * A dictionary containing all stored recall parameters + */ + get: operations["get_recall_parameters"]; + put?: never; + /** + * Update Recall Parameters + * @description Update recallable parameters that can be recalled on the frontend. + * + * This endpoint allows updating parameters such as prompt, model, steps, and other + * generation settings. These parameters are stored in client state and can be + * accessed by the frontend to populate UI elements. + * + * Args: + * queue_id: The queue ID to associate these parameters with + * parameters: The RecallParameter object containing the parameters to update + * + * Returns: + * A dictionary containing the updated parameters and status + * + * Example: + * POST /api/v1/recall/{queue_id} + * { + * "positive_prompt": "a beautiful landscape", + * "model": "sd-1.5", + * "steps": 20, + * "cfg_scale": 7.5, + * "width": 512, + * "height": 512, + * "seed": 12345 + * } + */ + post: operations["update_recall_parameters"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; }; export type webhooks = Record; export type components = { @@ -2076,6 +2519,59 @@ export type components = { */ type: "add"; }; + /** + * AdminUserCreateRequest + * @description Request body for admin to create a new user. + */ + AdminUserCreateRequest: { + /** + * Email + * @description User email address + */ + email: string; + /** + * Display Name + * @description Display name + */ + display_name?: string | null; + /** + * Password + * @description User password + */ + password: string; + /** + * Is Admin + * @description Whether user should have admin privileges + * @default false + */ + is_admin?: boolean; + }; + /** + * AdminUserUpdateRequest + * @description Request body for admin to update any user. + */ + AdminUserUpdateRequest: { + /** + * Display Name + * @description Display name + */ + display_name?: string | null; + /** + * Password + * @description New password + */ + password?: string | null; + /** + * Is Admin + * @description Whether user should have admin privileges + */ + is_admin?: boolean | null; + /** + * Is Active + * @description Whether user account should be active + */ + is_active?: boolean | null; + }; /** * Alpha Mask to Tensor * @description Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0. @@ -2233,8 +2729,8 @@ export type components = { * Latents to Image - Anima * @description Generates an image from latents using the Anima VAE. * - * Supports both the Wan 2.1 QwenImage VAE (AutoencoderKL with mean/std normalization) - * and FLUX VAE (FluxAutoEncoder) as fallback. + * Supports the Wan 2.1 QwenImage VAE (AutoencoderKLWan) with explicit + * latent denormalization, and FLUX VAE as fallback. */ AnimaLatentsToImageInvocation: { /** @@ -2404,7 +2900,7 @@ export type components = { */ type: "anima_text_encoder"; }; - AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** * AppVersion * @description App Version Response @@ -2855,6 +3351,11 @@ export type components = { * @description The name of the board. */ board_name: string; + /** + * User Id + * @description The user ID of the board owner. + */ + user_id: string; /** * Created At * @description The created timestamp of the board. @@ -2890,6 +3391,11 @@ export type components = { * @description The number of assets in the board. */ asset_count: number; + /** + * Owner Username + * @description The username of the board owner (for admin view). + */ + owner_username?: string | null; }; /** * BoardField @@ -5460,6 +5966,43 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; + /** + * ControlNetRecallParameter + * @description ControlNet configuration for recall + */ + ControlNetRecallParameter: { + /** + * Model Name + * @description The name of the ControlNet/T2I Adapter/Control LoRA model + */ + model_name: string; + /** + * Image Name + * @description The filename of the control image in outputs/images + */ + image_name?: string | null; + /** + * Weight + * @description The weight for the control adapter + * @default 1 + */ + weight?: number; + /** + * Begin Step Percent + * @description When the control adapter is first applied (% of total steps) + */ + begin_step_percent?: number | null; + /** + * End Step Percent + * @description When the control adapter is last applied (% of total steps) + */ + end_step_percent?: number | null; + /** + * Control Mode + * @description The control mode (ControlNet only) + */ + control_mode?: ("balanced" | "more_prompt" | "more_control") | null; + }; /** ControlNet_Checkpoint_FLUX_Config */ ControlNet_Checkpoint_FLUX_Config: { /** @@ -7323,13 +7866,71 @@ export type components = { * @description Content type of downloaded file */ content_type?: string | null; + /** + * Canonical Url + * @description Canonical URL to request on resume + */ + canonical_url?: string | null; + /** + * Etag + * @description ETag from the remote server, if available + */ + etag?: string | null; + /** + * Last Modified + * @description Last-Modified from the remote server, if available + */ + last_modified?: string | null; + /** + * Final Url + * @description Final resolved URL after redirects, if available + */ + final_url?: string | null; + /** + * Expected Total Bytes + * @description Expected total size of the download + */ + expected_total_bytes?: number | null; + /** + * Resume Required + * @description True if server refused resume; restart required + * @default false + */ + resume_required?: boolean; + /** + * Resume Message + * @description Message explaining why resume is required + */ + resume_message?: string | null; + /** + * Resume From Scratch + * @description True if resume metadata existed but the partial file was missing and the download restarted from the beginning + * @default false + */ + resume_from_scratch?: boolean; }; /** * DownloadJobStatus * @description State of a download job. * @enum {string} */ - DownloadJobStatus: "waiting" | "running" | "completed" | "cancelled" | "error"; + DownloadJobStatus: "waiting" | "running" | "paused" | "completed" | "cancelled" | "error"; + /** + * DownloadPausedEvent + * @description Event model for download_paused + */ + DownloadPausedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Source + * @description The source of the download + */ + source: string; + }; /** * DownloadProgressEvent * @description Event model for download_progress @@ -8486,19 +9087,10 @@ export type components = { type: "flux2_denoise"; }; /** - * Main Model - Flux2 Klein - * @description Loads a Flux2 Klein model, outputting its submodels. - * - * Flux2 Klein uses Qwen3 as the text encoder instead of CLIP+T5. - * It uses a 32-channel VAE (AutoencoderKLFlux2) instead of the 16-channel FLUX.1 VAE. - * - * When using a Diffusers format model, both VAE and Qwen3 encoder are extracted - * automatically from the main model. You can override with standalone models: - * - Transformer: Always from Flux2 Klein main model - * - VAE: From main model (Diffusers) or standalone VAE - * - Qwen3 Encoder: From main model (Diffusers) or standalone Qwen3 model + * Apply LoRA Collection - Flux2 Klein + * @description Applies a collection of LoRAs to a FLUX.2 Klein transformer and/or Qwen3 text encoder. */ - Flux2KleinModelLoaderInvocation: { + Flux2KleinLoRACollectionLoader: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8517,62 +9109,195 @@ export type components = { */ use_cache?: boolean; /** - * Transformer - * @description Flux model (Transformer) to load - */ - model: components["schemas"]["ModelIdentifierField"]; - /** - * VAE - * @description Standalone VAE model. Flux2 Klein uses the same VAE as FLUX (16-channel). If not provided, VAE will be loaded from the Qwen3 Source model. + * LoRAs + * @description LoRA models and weights. May be a single LoRA or collection. * @default null */ - vae_model?: components["schemas"]["ModelIdentifierField"] | null; + loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; /** - * Qwen3 Encoder - * @description Standalone Qwen3 Encoder model. If not provided, encoder will be loaded from the Qwen3 Source model. + * Transformer + * @description Transformer * @default null */ - qwen3_encoder_model?: components["schemas"]["ModelIdentifierField"] | null; + transformer?: components["schemas"]["TransformerField"] | null; /** - * Qwen3 Source (Diffusers) - * @description Diffusers Flux2 Klein model to extract VAE and/or Qwen3 encoder from. Use this if you don't have separate VAE/Qwen3 models. Ignored if both VAE and Qwen3 Encoder are provided separately. + * Qwen3 Encoder + * @description Qwen3 tokenizer and text encoder * @default null */ - qwen3_source_model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * Max Seq Length - * @description Max sequence length for the Qwen3 encoder. - * @default 512 - * @enum {integer} - */ - max_seq_len?: 256 | 512; + qwen3_encoder?: components["schemas"]["Qwen3EncoderField"] | null; /** * type - * @default flux2_klein_model_loader + * @default flux2_klein_lora_collection_loader * @constant */ - type: "flux2_klein_model_loader"; + type: "flux2_klein_lora_collection_loader"; }; /** - * Flux2KleinModelLoaderOutput - * @description Flux2 Klein model loader output. + * Apply LoRA - Flux2 Klein + * @description Apply a LoRA model to a FLUX.2 Klein transformer and/or Qwen3 text encoder. */ - Flux2KleinModelLoaderOutput: { + Flux2KleinLoRALoaderInvocation: { /** - * Transformer - * @description Transformer + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - transformer: components["schemas"]["TransformerField"]; + id: string; /** - * Qwen3 Encoder - * @description Qwen3 tokenizer and text encoder + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - qwen3_encoder: components["schemas"]["Qwen3EncoderField"]; + is_intermediate?: boolean; /** - * VAE - * @description VAE - */ - vae: components["schemas"]["VAEField"]; + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * LoRA + * @description LoRA model to load + * @default null + */ + lora?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Weight + * @description The weight at which the LoRA is applied to each model + * @default 0.75 + */ + weight?: number; + /** + * Transformer + * @description Transformer + * @default null + */ + transformer?: components["schemas"]["TransformerField"] | null; + /** + * Qwen3 Encoder + * @description Qwen3 tokenizer and text encoder + * @default null + */ + qwen3_encoder?: components["schemas"]["Qwen3EncoderField"] | null; + /** + * type + * @default flux2_klein_lora_loader + * @constant + */ + type: "flux2_klein_lora_loader"; + }; + /** + * Flux2KleinLoRALoaderOutput + * @description FLUX.2 Klein LoRA Loader Output + */ + Flux2KleinLoRALoaderOutput: { + /** + * Transformer + * @description Transformer + * @default null + */ + transformer: components["schemas"]["TransformerField"] | null; + /** + * Qwen3 Encoder + * @description Qwen3 tokenizer and text encoder + * @default null + */ + qwen3_encoder: components["schemas"]["Qwen3EncoderField"] | null; + /** + * type + * @default flux2_klein_lora_loader_output + * @constant + */ + type: "flux2_klein_lora_loader_output"; + }; + /** + * Main Model - Flux2 Klein + * @description Loads a Flux2 Klein model, outputting its submodels. + * + * Flux2 Klein uses Qwen3 as the text encoder instead of CLIP+T5. + * It uses a 32-channel VAE (AutoencoderKLFlux2) instead of the 16-channel FLUX.1 VAE. + * + * When using a Diffusers format model, both VAE and Qwen3 encoder are extracted + * automatically from the main model. You can override with standalone models: + * - Transformer: Always from Flux2 Klein main model + * - VAE: From main model (Diffusers) or standalone VAE + * - Qwen3 Encoder: From main model (Diffusers) or standalone Qwen3 model + */ + Flux2KleinModelLoaderInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Transformer + * @description Flux model (Transformer) to load + */ + model: components["schemas"]["ModelIdentifierField"]; + /** + * VAE + * @description Standalone VAE model. Flux2 Klein uses the same VAE as FLUX (16-channel). If not provided, VAE will be loaded from the Qwen3 Source model. + * @default null + */ + vae_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Qwen3 Encoder + * @description Standalone Qwen3 Encoder model. If not provided, encoder will be loaded from the Qwen3 Source model. + * @default null + */ + qwen3_encoder_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Qwen3 Source (Diffusers) + * @description Diffusers Flux2 Klein model to extract VAE and/or Qwen3 encoder from. Use this if you don't have separate VAE/Qwen3 models. Ignored if both VAE and Qwen3 Encoder are provided separately. + * @default null + */ + qwen3_source_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Max Seq Length + * @description Max sequence length for the Qwen3 encoder. + * @default 512 + * @enum {integer} + */ + max_seq_len?: 256 | 512; + /** + * type + * @default flux2_klein_model_loader + * @constant + */ + type: "flux2_klein_model_loader"; + }; + /** + * Flux2KleinModelLoaderOutput + * @description Flux2 Klein model loader output. + */ + Flux2KleinModelLoaderOutput: { + /** + * Transformer + * @description Transformer + */ + transformer: components["schemas"]["TransformerField"]; + /** + * Qwen3 Encoder + * @description Qwen3 tokenizer and text encoder + */ + qwen3_encoder: components["schemas"]["Qwen3EncoderField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; /** * Max Seq Length * @description The max sequence length for the Qwen3 encoder. @@ -10124,6 +10849,17 @@ export type components = { */ type: "freeu"; }; + /** + * GeneratePasswordResponse + * @description Response containing a generated password. + */ + GeneratePasswordResponse: { + /** + * Password + * @description Generated strong password + */ + password: string; + }; /** * Get Image Mask Bounding Box * @description Gets the bounding box of the given mask image. @@ -10209,7 +10945,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; }; /** * Edges @@ -10246,7 +10982,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; + [key: string]: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; }; /** * Errors @@ -10692,6 +11428,48 @@ export type components = { */ type: "ip_adapter_output"; }; + /** + * IPAdapterRecallParameter + * @description IP Adapter configuration for recall + */ + IPAdapterRecallParameter: { + /** + * Model Name + * @description The name of the IP Adapter model + */ + model_name: string; + /** + * Image Name + * @description The filename of the reference image in outputs/images + */ + image_name?: string | null; + /** + * Weight + * @description The weight for the IP Adapter + * @default 1 + */ + weight?: number; + /** + * Begin Step Percent + * @description When the IP Adapter is first applied (% of total steps) + */ + begin_step_percent?: number | null; + /** + * End Step Percent + * @description When the IP Adapter is last applied (% of total steps) + */ + end_step_percent?: number | null; + /** + * Method + * @description The IP Adapter method + */ + method?: ("full" | "style" | "composition") | null; + /** + * Image Influence + * @description FLUX Redux image influence (if model is flux_redux) + */ + image_influence?: ("lowest" | "low" | "medium" | "high" | "highest") | null; + }; /** IPAdapter_Checkpoint_FLUX_Config */ IPAdapter_Checkpoint_FLUX_Config: { /** @@ -13030,7 +13808,7 @@ export type components = { * @description State of an install job running in the background. * @enum {string} */ - InstallStatus: "waiting" | "downloading" | "downloads_done" | "running" | "completed" | "error" | "cancelled"; + InstallStatus: "waiting" | "downloading" | "downloads_done" | "running" | "paused" | "completed" | "error" | "cancelled"; /** * Integer Batch * @description Create a batched generation, where the workflow is executed once for each integer in the batch. @@ -13374,6 +14152,12 @@ export type components = { * @default null */ destination: string | null; + /** + * User Id + * @description The ID of the user who created the queue item + * @default system + */ + user_id: string; /** * Session Id * @description The ID of the session (aka graph execution state) @@ -13383,7 +14167,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -13393,7 +14177,7 @@ export type components = { * Result * @description The result of the invocation */ - result: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; + result: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; }; /** * InvocationErrorEvent @@ -13432,6 +14216,12 @@ export type components = { * @default null */ destination: string | null; + /** + * User Id + * @description The ID of the user who created the queue item + * @default system + */ + user_id: string; /** * Session Id * @description The ID of the session (aka graph execution state) @@ -13441,7 +14231,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -13522,6 +14312,8 @@ export type components = { float_range: components["schemas"]["FloatCollectionOutput"]; float_to_int: components["schemas"]["IntegerOutput"]; flux2_denoise: components["schemas"]["LatentsOutput"]; + flux2_klein_lora_collection_loader: components["schemas"]["Flux2KleinLoRALoaderOutput"]; + flux2_klein_lora_loader: components["schemas"]["Flux2KleinLoRALoaderOutput"]; flux2_klein_model_loader: components["schemas"]["Flux2KleinModelLoaderOutput"]; flux2_klein_text_encoder: components["schemas"]["FluxConditioningOutput"]; flux2_vae_decode: components["schemas"]["ImageOutput"]; @@ -13734,6 +14526,12 @@ export type components = { * @default null */ destination: string | null; + /** + * User Id + * @description The ID of the user who created the queue item + * @default system + */ + user_id: string; /** * Session Id * @description The ID of the session (aka graph execution state) @@ -13743,7 +14541,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -13803,6 +14601,12 @@ export type components = { * @default null */ destination: string | null; + /** + * User Id + * @description The ID of the user who created the queue item + * @default system + */ + user_id: string; /** * Session Id * @description The ID of the session (aka graph execution state) @@ -13812,7 +14616,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -13883,6 +14687,8 @@ export type components = { * scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes. * unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production. * allow_unknown_models: Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation. + * multiuser: Enable multiuser support. When disabled, the application runs in single-user mode using a default system account with administrator privileges. When enabled, requires user authentication and authorization. + * strict_password_checking: Enforce strict password requirements. When True, passwords must contain uppercase, lowercase, and numbers. When False (default), any password is accepted but its strength (weak/moderate/strong) is reported to the user. */ InvokeAIAppConfig: { /** @@ -14250,6 +15056,18 @@ export type components = { * @default true */ allow_unknown_models?: boolean; + /** + * Multiuser + * @description Enable multiuser support. When disabled, the application runs in single-user mode using a default system account with administrator privileges. When enabled, requires user authentication and authorization. + * @default false + */ + multiuser?: boolean; + /** + * Strict Password Checking + * @description Enforce strict password requirements. When True, passwords must contain uppercase, lowercase, and numbers. When False (default), any password is accepted but its strength (weak/moderate/strong) is reported to the user. + * @default false + */ + strict_password_checking?: boolean; }; /** * InvokeAIAppConfigWithSetFields @@ -15493,6 +16311,29 @@ export type components = { */ weight: number; }; + /** + * LoRARecallParameter + * @description LoRA configuration for recall + */ + LoRARecallParameter: { + /** + * Model Name + * @description The name of the LoRA model + */ + model_name: string; + /** + * Weight + * @description The weight for the LoRA + * @default 0.75 + */ + weight?: number; + /** + * Is Enabled + * @description Whether the LoRA is enabled + * @default true + */ + is_enabled?: boolean; + }; /** * Select LoRA * @description Selects a LoRA model and weight. @@ -15626,8 +16467,11 @@ export type components = { */ base: "flux"; }; - /** LoRA_Diffusers_SD1_Config */ - LoRA_Diffusers_SD1_Config: { + /** + * LoRA_Diffusers_Flux2_Config + * @description Model config for FLUX.2 (Klein) LoRA models in Diffusers format. + */ + LoRA_Diffusers_Flux2_Config: { /** * Key * @description A unique key for this model. @@ -15696,13 +16540,14 @@ export type components = { format: "diffusers"; /** * Base - * @default sd-1 + * @default flux2 * @constant */ - base: "sd-1"; + base: "flux2"; + variant: components["schemas"]["Flux2VariantType"] | null; }; - /** LoRA_Diffusers_SD2_Config */ - LoRA_Diffusers_SD2_Config: { + /** LoRA_Diffusers_SD1_Config */ + LoRA_Diffusers_SD1_Config: { /** * Key * @description A unique key for this model. @@ -15771,13 +16616,13 @@ export type components = { format: "diffusers"; /** * Base - * @default sd-2 + * @default sd-1 * @constant */ - base: "sd-2"; + base: "sd-1"; }; - /** LoRA_Diffusers_SDXL_Config */ - LoRA_Diffusers_SDXL_Config: { + /** LoRA_Diffusers_SD2_Config */ + LoRA_Diffusers_SD2_Config: { /** * Key * @description A unique key for this model. @@ -15846,16 +16691,13 @@ export type components = { format: "diffusers"; /** * Base - * @default sdxl + * @default sd-2 * @constant */ - base: "sdxl"; + base: "sd-2"; }; - /** - * LoRA_Diffusers_ZImage_Config - * @description Model config for Z-Image LoRA models in Diffusers format. - */ - LoRA_Diffusers_ZImage_Config: { + /** LoRA_Diffusers_SDXL_Config */ + LoRA_Diffusers_SDXL_Config: { /** * Key * @description A unique key for this model. @@ -15924,13 +16766,16 @@ export type components = { format: "diffusers"; /** * Base - * @default z-image + * @default sdxl * @constant */ - base: "z-image"; + base: "sdxl"; }; - /** LoRA_LyCORIS_FLUX_Config */ - LoRA_LyCORIS_FLUX_Config: { + /** + * LoRA_Diffusers_ZImage_Config + * @description Model config for Z-Image LoRA models in Diffusers format. + */ + LoRA_Diffusers_ZImage_Config: { /** * Key * @description A unique key for this model. @@ -15993,19 +16838,20 @@ export type components = { default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; /** * Format - * @default lycoris + * @default diffusers * @constant */ - format: "lycoris"; + format: "diffusers"; /** * Base - * @default flux + * @default z-image * @constant */ - base: "flux"; + base: "z-image"; + variant: components["schemas"]["ZImageVariantType"] | null; }; - /** LoRA_LyCORIS_SD1_Config */ - LoRA_LyCORIS_SD1_Config: { + /** LoRA_LyCORIS_FLUX_Config */ + LoRA_LyCORIS_FLUX_Config: { /** * Key * @description A unique key for this model. @@ -16074,10 +16920,164 @@ export type components = { format: "lycoris"; /** * Base - * @default sd-1 + * @default flux * @constant */ - base: "sd-1"; + base: "flux"; + }; + /** + * LoRA_LyCORIS_Flux2_Config + * @description Model config for FLUX.2 (Klein) LoRA models in LyCORIS format. + */ + LoRA_LyCORIS_Flux2_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default lycoris + * @constant + */ + format: "lycoris"; + /** + * Base + * @default flux2 + * @constant + */ + base: "flux2"; + variant: components["schemas"]["Flux2VariantType"] | null; + }; + /** LoRA_LyCORIS_SD1_Config */ + LoRA_LyCORIS_SD1_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default lycoris + * @constant + */ + format: "lycoris"; + /** + * Base + * @default sd-1 + * @constant + */ + base: "sd-1"; }; /** LoRA_LyCORIS_SD2_Config */ LoRA_LyCORIS_SD2_Config: { @@ -16306,6 +17306,7 @@ export type components = { * @constant */ base: "z-image"; + variant: components["schemas"]["ZImageVariantType"] | null; }; /** LoRA_OMI_FLUX_Config */ LoRA_OMI_FLUX_Config: { @@ -16480,6 +17481,57 @@ export type components = { * @enum {integer} */ LogLevel: 0 | 10 | 20 | 30 | 40 | 50; + /** + * LoginRequest + * @description Request body for user login. + */ + LoginRequest: { + /** + * Email + * @description User email address + */ + email: string; + /** + * Password + * @description User password + */ + password: string; + /** + * Remember Me + * @description Whether to extend session duration + * @default false + */ + remember_me?: boolean; + }; + /** + * LoginResponse + * @description Response from successful login. + */ + LoginResponse: { + /** + * Token + * @description JWT access token + */ + token: string; + /** @description User information */ + user: components["schemas"]["UserDTO"]; + /** + * Expires In + * @description Token expiration time in seconds + */ + expires_in: number; + }; + /** + * LogoutResponse + * @description Response from logout. + */ + LogoutResponse: { + /** + * Success + * @description Whether logout was successful + */ + success: boolean; + }; /** LoraModelDefaultSettings */ LoraModelDefaultSettings: { /** @@ -17421,6 +18473,7 @@ export type components = { * @constant */ format: "checkpoint"; + variant: components["schemas"]["ZImageVariantType"]; }; /** Main_Diffusers_CogView4_Config */ Main_Diffusers_CogView4_Config: { @@ -18063,7 +19116,7 @@ export type components = { }; /** * Main_Diffusers_ZImage_Config - * @description Model config for Z-Image diffusers models (Z-Image-Turbo, Z-Image-Base, Z-Image-Edit). + * @description Model config for Z-Image diffusers models (Z-Image-Turbo, Z-Image-Base). */ Main_Diffusers_ZImage_Config: { /** @@ -18140,6 +19193,7 @@ export type components = { * @constant */ base: "z-image"; + variant: components["schemas"]["ZImageVariantType"]; }; /** * Main_GGUF_FLUX_Config @@ -18391,6 +19445,7 @@ export type components = { * @constant */ format: "gguf_quantized"; + variant: components["schemas"]["ZImageVariantType"]; }; /** * Combine Masks @@ -20220,7 +21275,7 @@ export type components = { * Config * @description The installed model's config */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; /** * ModelInstallDownloadProgressEvent @@ -20386,7 +21441,7 @@ export type components = { * Config Out * @description After successful installation, this will hold the configuration object. */ - config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]) | null; + config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]) | null; /** * Inplace * @description Leave model in its current location; otherwise install under models directory @@ -20472,7 +21527,7 @@ export type components = { * Config * @description The model's config */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** * @description The submodel type, if any * @default null @@ -20493,7 +21548,7 @@ export type components = { * Config * @description The model's config */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** * @description The submodel type, if any * @default null @@ -20602,7 +21657,7 @@ export type components = { * Variant * @description The variant of the model. */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["Qwen3VariantType"] | null; + variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null; /** @description The prediction type of the model. */ prediction_type?: components["schemas"]["SchedulerPredictionType"] | null; /** @@ -20667,7 +21722,7 @@ export type components = { */ ModelsList: { /** Models */ - models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"])[]; + models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"])[]; }; /** * Multiply Integers @@ -21445,6 +22500,12 @@ export type components = { * @default null */ destination: string | null; + /** + * User Id + * @description The ID of the user who created the queue item + * @default system + */ + user_id: string; /** * Status * @description The new status of the queue item @@ -22026,92 +23087,246 @@ export type components = { type: "range_of_size"; }; /** - * Create Rectangle Mask - * @description Create a rectangular mask. + * RecallParameter + * @description Request model for updating recallable parameters. */ - RectangleMaskInvocation: { + RecallParameter: { /** - * @description Optional metadata to be saved with the image - * @default null + * Positive Prompt + * @description Positive prompt text */ - metadata?: components["schemas"]["MetadataField"] | null; + positive_prompt?: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Negative Prompt + * @description Negative prompt text */ - id: string; + negative_prompt?: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Model + * @description Main model name/identifier */ - is_intermediate?: boolean; + model?: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Refiner Model + * @description Refiner model name/identifier */ - use_cache?: boolean; + refiner_model?: string | null; /** - * Width - * @description The width of the entire mask. - * @default null + * Vae Model + * @description VAE model name/identifier */ - width?: number | null; + vae_model?: string | null; /** - * Height - * @description The height of the entire mask. - * @default null + * Scheduler + * @description Scheduler name */ - height?: number | null; + scheduler?: string | null; /** - * X Left - * @description The left x-coordinate of the rectangular masked region (inclusive). - * @default null + * Steps + * @description Number of generation steps */ - x_left?: number | null; + steps?: number | null; /** - * Y Top - * @description The top y-coordinate of the rectangular masked region (inclusive). - * @default null + * Refiner Steps + * @description Number of refiner steps */ - y_top?: number | null; + refiner_steps?: number | null; /** - * Rectangle Width - * @description The width of the rectangular masked region. - * @default null + * Cfg Scale + * @description CFG scale for guidance */ - rectangle_width?: number | null; + cfg_scale?: number | null; /** - * Rectangle Height - * @description The height of the rectangular masked region. - * @default null + * Cfg Rescale Multiplier + * @description CFG rescale multiplier */ - rectangle_height?: number | null; + cfg_rescale_multiplier?: number | null; /** - * type - * @default rectangle_mask - * @constant + * Refiner Cfg Scale + * @description Refiner CFG scale */ - type: "rectangle_mask"; - }; - /** - * RemoteModelFile - * @description Information about a downloadable file that forms part of a model. - */ - RemoteModelFile: { + refiner_cfg_scale?: number | null; /** - * Url - * Format: uri - * @description The url to download this model file + * Guidance + * @description Guidance scale */ - url: string; + guidance?: number | null; /** - * Path - * Format: path - * @description The path to the file, relative to the model root + * Width + * @description Image width in pixels */ - path: string; + width?: number | null; + /** + * Height + * @description Image height in pixels + */ + height?: number | null; + /** + * Seed + * @description Random seed + */ + seed?: number | null; + /** + * Denoise Strength + * @description Denoising strength + */ + denoise_strength?: number | null; + /** + * Refiner Denoise Start + * @description Refiner denoising start + */ + refiner_denoise_start?: number | null; + /** + * Clip Skip + * @description CLIP skip layers + */ + clip_skip?: number | null; + /** + * Seamless X + * @description Enable seamless X tiling + */ + seamless_x?: boolean | null; + /** + * Seamless Y + * @description Enable seamless Y tiling + */ + seamless_y?: boolean | null; + /** + * Refiner Positive Aesthetic Score + * @description Refiner positive aesthetic score + */ + refiner_positive_aesthetic_score?: number | null; + /** + * Refiner Negative Aesthetic Score + * @description Refiner negative aesthetic score + */ + refiner_negative_aesthetic_score?: number | null; + /** + * Loras + * @description List of LoRAs with their weights + */ + loras?: components["schemas"]["LoRARecallParameter"][] | null; + /** + * Control Layers + * @description List of control adapters (ControlNet, T2I Adapter, Control LoRA) with their settings + */ + control_layers?: components["schemas"]["ControlNetRecallParameter"][] | null; + /** + * Ip Adapters + * @description List of IP Adapters with their settings + */ + ip_adapters?: components["schemas"]["IPAdapterRecallParameter"][] | null; + }; + /** + * RecallParametersUpdatedEvent + * @description Event model for recall_parameters_updated + */ + RecallParametersUpdatedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Parameters + * @description The recall parameters that were updated + */ + parameters: { + [key: string]: unknown; + }; + }; + /** + * Create Rectangle Mask + * @description Create a rectangular mask. + */ + RectangleMaskInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Width + * @description The width of the entire mask. + * @default null + */ + width?: number | null; + /** + * Height + * @description The height of the entire mask. + * @default null + */ + height?: number | null; + /** + * X Left + * @description The left x-coordinate of the rectangular masked region (inclusive). + * @default null + */ + x_left?: number | null; + /** + * Y Top + * @description The top y-coordinate of the rectangular masked region (inclusive). + * @default null + */ + y_top?: number | null; + /** + * Rectangle Width + * @description The width of the rectangular masked region. + * @default null + */ + rectangle_width?: number | null; + /** + * Rectangle Height + * @description The height of the rectangular masked region. + * @default null + */ + rectangle_height?: number | null; + /** + * type + * @default rectangle_mask + * @constant + */ + type: "rectangle_mask"; + }; + /** + * RemoteModelFile + * @description Information about a downloadable file that forms part of a model. + */ + RemoteModelFile: { + /** + * Url + * Format: uri + * @description The url to download this model file + */ + url: string; + /** + * Path + * Format: path + * @description The path to the file, relative to the model root + */ + path: string; /** * Size * @description The size of this file, in bytes @@ -23534,6 +24749,22 @@ export type components = { * @description The id of the queue with which this item is associated */ queue_id: string; + /** + * User Id + * @description The id of the user who created this queue item + * @default system + */ + user_id?: string; + /** + * User Display Name + * @description The display name of the user who created this queue item, if available + */ + user_display_name?: string | null; + /** + * User Email + * @description The email of the user who created this queue item, if available + */ + user_email?: string | null; /** * Field Values * @description The field values that were used for this queue item @@ -23601,6 +24832,71 @@ export type components = { * @description Total number of queue items */ total: number; + /** + * User Pending + * @description Number of queue items with status 'pending' for the current user + */ + user_pending?: number | null; + /** + * User In Progress + * @description Number of queue items with status 'in_progress' for the current user + */ + user_in_progress?: number | null; + }; + /** + * SetupRequest + * @description Request body for initial admin setup. + */ + SetupRequest: { + /** + * Email + * @description Admin email address + */ + email: string; + /** + * Display Name + * @description Admin display name + */ + display_name?: string | null; + /** + * Password + * @description Admin password + */ + password: string; + }; + /** + * SetupResponse + * @description Response from successful admin setup. + */ + SetupResponse: { + /** + * Success + * @description Whether setup was successful + */ + success: boolean; + /** @description Created admin user information */ + user: components["schemas"]["UserDTO"]; + }; + /** + * SetupStatusResponse + * @description Response for setup status check. + */ + SetupStatusResponse: { + /** + * Setup Required + * @description Whether initial setup is required + */ + setup_required: boolean; + /** + * Multiuser Enabled + * @description Whether multiuser mode is enabled + */ + multiuser_enabled: boolean; + /** + * Strict Password Checking + * @description Whether strict password requirements are enforced + */ + strict_password_checking: boolean; }; /** * Show Image @@ -24489,7 +25785,7 @@ export type components = { path_or_prefix: string; model_type: components["schemas"]["ModelType"]; /** Variant */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["Qwen3VariantType"] | null; + variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null; }; /** * Subtract Integers @@ -25939,6 +27235,77 @@ export type components = { */ unstarred_images: string[]; }; + /** + * UserDTO + * @description User data transfer object. + */ + UserDTO: { + /** + * User Id + * @description Unique user identifier + */ + user_id: string; + /** + * Email + * @description User email address + */ + email: string; + /** + * Display Name + * @description Display name + */ + display_name?: string | null; + /** + * Is Admin + * @description Whether user has admin privileges + * @default false + */ + is_admin?: boolean; + /** + * Is Active + * @description Whether user account is active + * @default true + */ + is_active?: boolean; + /** + * Created At + * Format: date-time + * @description When the user was created + */ + created_at: string; + /** + * Updated At + * Format: date-time + * @description When the user was last updated + */ + updated_at: string; + /** + * Last Login At + * @description When user last logged in + */ + last_login_at?: string | null; + }; + /** + * UserProfileUpdateRequest + * @description Request body for a user to update their own profile. + */ + UserProfileUpdateRequest: { + /** + * Display Name + * @description New display name + */ + display_name?: string | null; + /** + * Current Password + * @description Current password (required when changing password) + */ + current_password?: string | null; + /** + * New Password + * @description New password + */ + new_password?: string | null; + }; /** VAEField */ VAEField: { /** @description Info to load vae submodel */ @@ -27199,7 +28566,7 @@ export type components = { vae?: components["schemas"]["VAEField"] | null; /** * Scheduler - * @description Scheduler (sampler) for the denoising process. Euler is the default and recommended for Z-Image-Turbo. Heun is 2nd-order (better quality, 2x slower). LCM is optimized for few steps. + * @description Scheduler (sampler) for the denoising process. Euler is the default and recommended. Heun is 2nd-order (better quality, 2x slower). LCM works with Turbo only (not Base). * @default euler * @enum {string} */ @@ -27326,7 +28693,7 @@ export type components = { vae?: components["schemas"]["VAEField"] | null; /** * Scheduler - * @description Scheduler (sampler) for the denoising process. Euler is the default and recommended for Z-Image-Turbo. Heun is 2nd-order (better quality, 2x slower). LCM is optimized for few steps. + * @description Scheduler (sampler) for the denoising process. Euler is the default and recommended. Heun is 2nd-order (better quality, 2x slower). LCM works with Turbo only (not Base). * @default euler * @enum {string} */ @@ -27753,6 +29120,12 @@ export type components = { */ type: "z_image_text_encoder"; }; + /** + * ZImageVariantType + * @description Z-Image model variants. + * @enum {string} + */ + ZImageVariantType: "turbo" | "zbase"; }; responses: never; parameters: never; @@ -27762,6 +29135,336 @@ export type components = { }; export type $defs = Record; export interface operations { + get_setup_status_api_v1_auth_status_get: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["SetupStatusResponse"]; + }; + }; + }; + }; + login_api_v1_auth_login_post: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["LoginRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["LoginResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + logout_api_v1_auth_logout_post: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["LogoutResponse"]; + }; + }; + }; + }; + get_current_user_info_api_v1_auth_me_get: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["UserDTO"]; + }; + }; + }; + }; + update_current_user_api_v1_auth_me_patch: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["UserProfileUpdateRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["UserDTO"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + setup_admin_api_v1_auth_setup_post: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["SetupRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["SetupResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + generate_password_api_v1_auth_generate_password_get: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["GeneratePasswordResponse"]; + }; + }; + }; + }; + list_users_api_v1_auth_users_get: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["UserDTO"][]; + }; + }; + }; + }; + create_user_api_v1_auth_users_post: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["AdminUserCreateRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 201: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["UserDTO"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + get_user_api_v1_auth_users__user_id__get: { + parameters: { + query?: never; + header?: never; + path: { + /** @description User ID */ + user_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["UserDTO"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + delete_user_api_v1_auth_users__user_id__delete: { + parameters: { + query?: never; + header?: never; + path: { + /** @description User ID */ + user_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 204: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + update_user_api_v1_auth_users__user_id__patch: { + parameters: { + query?: never; + header?: never; + path: { + /** @description User ID */ + user_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["AdminUserUpdateRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["UserDTO"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; parse_dynamicprompts: { parameters: { query?: never; @@ -27875,7 +29578,39 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + get_model_records_by_hash: { + parameters: { + query: { + /** @description The hash of the model */ + hash: string; + }; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Validation Error */ @@ -27925,7 +29660,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -28030,7 +29765,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -28101,7 +29836,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -28605,6 +30340,166 @@ export interface operations { }; }; }; + pause_model_install_job: { + parameters: { + query?: never; + header?: never; + path: { + /** @description Model install job ID */ + id: number; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description The job was paused successfully */ + 201: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ModelInstallJob"]; + }; + }; + /** @description No such job */ + 415: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + resume_model_install_job: { + parameters: { + query?: never; + header?: never; + path: { + /** @description Model install job ID */ + id: number; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description The job was resumed successfully */ + 201: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ModelInstallJob"]; + }; + }; + /** @description No such job */ + 415: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + restart_failed_model_install_job: { + parameters: { + query?: never; + header?: never; + path: { + /** @description Model install job ID */ + id: number; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Failed files restarted successfully */ + 201: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ModelInstallJob"]; + }; + }; + /** @description No such job */ + 415: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + restart_model_install_file: { + parameters: { + query?: never; + header?: never; + path: { + /** @description Model install job ID */ + id: number; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": string; + }; + }; + responses: { + /** @description File restarted successfully */ + 201: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ModelInstallJob"]; + }; + }; + /** @description No such job */ + 415: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; convert_model: { parameters: { query?: never; @@ -28641,7 +30536,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -32004,7 +33899,7 @@ export interface operations { }; header?: never; path: { - /** @description The queue id to perform this operation on */ + /** @description The queue id (ignored, kept for backwards compatibility) */ queue_id: string; }; cookie?: never; @@ -32039,7 +33934,7 @@ export interface operations { }; header?: never; path: { - /** @description The queue id to perform this operation on */ + /** @description The queue id (ignored, kept for backwards compatibility) */ queue_id: string; }; cookie?: never; @@ -32075,7 +33970,7 @@ export interface operations { query?: never; header?: never; path: { - /** @description The queue id to perform this operation on */ + /** @description The queue id (ignored, kept for backwards compatibility) */ queue_id: string; }; cookie?: never; @@ -32109,4 +34004,76 @@ export interface operations { }; }; }; + get_recall_parameters: { + parameters: { + query?: never; + header?: never; + path: { + /** @description The queue id to retrieve parameters for */ + queue_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": { + [key: string]: unknown; + }; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + update_recall_parameters: { + parameters: { + query?: never; + header?: never; + path: { + /** @description The queue id to perform this operation on */ + queue_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["RecallParameter"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": { + [key: string]: unknown; + }; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; } From 686a39715777f4cac26f7f3cc908d0286959ad62 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 12 Mar 2026 03:06:28 -0400 Subject: [PATCH 03/14] image to image --- invokeai/app/invocations/anima_denoise.py | 189 +++++++++++++++++- .../app/invocations/anima_image_to_latents.py | 121 +++++++++++ invokeai/app/invocations/metadata.py | 4 + invokeai/frontend/web/public/locales/en.json | 4 + .../listeners/modelSelected.ts | 50 +++++ .../controlLayers/store/paramsSlice.ts | 13 +- .../web/src/features/metadata/parsing.tsx | 56 ++++++ .../util/graph/generation/addImageToImage.ts | 18 +- .../nodes/util/graph/generation/addInpaint.ts | 7 +- .../util/graph/generation/addOutpaint.ts | 3 +- .../util/graph/generation/addTextToImage.ts | 12 +- .../util/graph/generation/buildAnimaGraph.ts | 106 +++++++--- .../nodes/util/graph/graphBuilderUtils.ts | 11 + .../src/features/nodes/util/graph/types.ts | 9 +- .../Advanced/ParamAnimaModelSelect.tsx | 116 +++++++++++ .../components/Core/ParamAnimaScheduler.tsx | 45 +++++ .../features/queue/hooks/useEnqueueCanvas.ts | 3 + .../AdvancedSettingsAccordion.tsx | 12 +- .../GenerationSettingsAccordion.tsx | 6 +- .../src/services/api/hooks/modelsByType.ts | 3 + .../frontend/web/src/services/api/schema.ts | 92 ++++++++- .../frontend/web/src/services/api/types.ts | 7 + 22 files changed, 823 insertions(+), 64 deletions(-) create mode 100644 invokeai/app/invocations/anima_image_to_latents.py create mode 100644 invokeai/frontend/web/src/features/parameters/components/Advanced/ParamAnimaModelSelect.tsx create mode 100644 invokeai/frontend/web/src/features/parameters/components/Core/ParamAnimaScheduler.tsx diff --git a/invokeai/app/invocations/anima_denoise.py b/invokeai/app/invocations/anima_denoise.py index b16ad97b03d..bef3249cc4b 100644 --- a/invokeai/app/invocations/anima_denoise.py +++ b/invokeai/app/invocations/anima_denoise.py @@ -19,15 +19,18 @@ import inspect import math from contextlib import ExitStack -from typing import Callable +from typing import Callable, Optional import torch +import torchvision.transforms as tv_transforms from diffusers.schedulers.scheduling_utils import SchedulerMixin +from torchvision.transforms.functional import resize as tv_resize from tqdm import tqdm from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation from invokeai.app.invocations.fields import ( AnimaConditioningField, + DenoiseMaskField, FieldDescriptions, Input, InputField, @@ -38,6 +41,10 @@ from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.flux.schedulers import ANIMA_SCHEDULER_LABELS, ANIMA_SCHEDULER_MAP, ANIMA_SCHEDULER_NAME_VALUES from invokeai.backend.model_manager.taxonomy import BaseModelType +from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import ( + RectifiedFlowInpaintExtension, + assert_broadcastable, +) from invokeai.backend.stable_diffusion.diffusion.conditioning_data import AnimaConditioningInfo from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState from invokeai.backend.util.devices import TorchDevice @@ -70,12 +77,86 @@ def time_snr_shift(alpha: float, t: float) -> float: return alpha * t / (1 + (alpha - 1) * t) +def inverse_time_snr_shift(alpha: float, sigma: float) -> float: + """Recover linear t from a shifted sigma value. + + Inverse of time_snr_shift: given sigma = alpha * t / (1 + (alpha-1) * t), + solve for t = sigma / (alpha - (alpha-1) * sigma). + + This is needed for the inpainting extension, which expects linear t values + for gradient mask thresholding. With Anima's shift=3.0, the difference + between shifted sigma and linear t is large (e.g. at t=0.5, sigma=0.75), + causing overly aggressive mask thresholding if sigma is used directly. + + Args: + alpha: Shift factor (3.0 for Anima). + sigma: Shifted sigma value in [0, 1]. + + Returns: + Linear t value in [0, 1]. + """ + if alpha == 1.0: + return sigma + denominator = alpha - (alpha - 1) * sigma + if abs(denominator) < 1e-8: + return 1.0 + return sigma / denominator + + +class AnimaInpaintExtension(RectifiedFlowInpaintExtension): + """Inpaint extension for Anima that accounts for the time-SNR shift. + + Anima uses a fixed shift=3.0 which makes sigma values significantly larger + than the corresponding linear t values. The base RectifiedFlowInpaintExtension + uses t_prev for both gradient mask thresholding and noise mixing, which assumes + linear t values. + + This subclass: + - Uses the LINEAR t for gradient mask thresholding (correct progressive reveal) + - Uses the SHIFTED sigma for noise mixing (matches the denoiser's noise level) + """ + + def __init__( + self, + init_latents: torch.Tensor, + inpaint_mask: torch.Tensor, + noise: torch.Tensor, + shift: float = ANIMA_SHIFT, + ): + assert_broadcastable(init_latents.shape, inpaint_mask.shape, noise.shape) + self._init_latents = init_latents + self._inpaint_mask = inpaint_mask + self._noise = noise + self._shift = shift + + def merge_intermediate_latents_with_init_latents( + self, intermediate_latents: torch.Tensor, sigma_prev: float + ) -> torch.Tensor: + """Merge intermediate latents with init latents, correcting for Anima's shift. + + Args: + intermediate_latents: The denoised latents at the current step. + sigma_prev: The SHIFTED sigma value for the next step. + """ + # Recover linear t from shifted sigma for gradient mask thresholding. + # This ensures the gradient mask is revealed at the correct pace. + t_prev = inverse_time_snr_shift(self._shift, sigma_prev) + mask = self._apply_mask_gradient_adjustment(t_prev) + + # Use shifted sigma for noise mixing to match the denoiser's noise level. + # The Euler step produces latents at noise level sigma_prev, so the + # preserved regions must also be at sigma_prev noise level. + noised_init_latents = self._noise * sigma_prev + (1.0 - sigma_prev) * self._init_latents + + return intermediate_latents * mask + noised_init_latents * (1.0 - mask) + + @invocation( "anima_denoise", title="Denoise - Anima", tags=["image", "anima"], category="image", - version="1.0.0", + version="1.1.0", classification=Classification.Prototype, ) class AnimaDenoiseInvocation(BaseInvocation): @@ -83,8 +164,21 @@ class AnimaDenoiseInvocation(BaseInvocation): Uses rectified flow sampling with shift=3.0 and the Cosmos Predict2 DiT backbone with integrated LLM Adapter for text conditioning. + + Supports txt2img, img2img (via latents input), and inpainting (via denoise_mask). """ + # If latents is provided, this means we are doing image-to-image. + latents: Optional[LatentsField] = InputField( + default=None, description=FieldDescriptions.latents, input=Input.Connection + ) + # denoise_mask is used for inpainting. Only the masked region is modified. + denoise_mask: Optional[DenoiseMaskField] = InputField( + default=None, description=FieldDescriptions.denoise_mask, input=Input.Connection + ) + denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start) + denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end) + add_noise: bool = InputField(default=True, description="Add noise based on denoising start.") transformer: TransformerField = InputField( description="Anima transformer model.", input=Input.Connection, title="Transformer" ) @@ -117,6 +211,30 @@ def invoke(self, context: InvocationContext) -> LatentsOutput: name = context.tensors.save(tensor=latents) return LatentsOutput.build(latents_name=name, latents=latents, seed=None) + def _prep_inpaint_mask(self, context: InvocationContext, latents: torch.Tensor) -> torch.Tensor | None: + """Prepare the inpaint mask for Anima. + + Anima uses 3D latents [B, C, T, H, W] internally but the mask operates + on the spatial dimensions [B, C, H, W] which match the squeezed output. + """ + if self.denoise_mask is None: + return None + mask = context.tensors.load(self.denoise_mask.mask_name) + + # Invert mask: 0.0 = regions to denoise, 1.0 = regions to preserve + mask = 1.0 - mask + + _, _, latent_height, latent_width = latents.shape + mask = tv_resize( + img=mask, + size=[latent_height, latent_width], + interpolation=tv_transforms.InterpolationMode.BILINEAR, + antialias=False, + ) + + mask = mask.to(device=latents.device, dtype=latents.dtype) + return mask + def _get_noise( self, height: int, @@ -197,14 +315,59 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: # Generate sigma schedule sigmas = self._get_sigmas(self.steps) + + # Apply denoising_start and denoising_end clipping (for img2img/inpaint) + if self.denoising_start > 0 or self.denoising_end < 1: + total_sigmas = len(sigmas) + start_idx = int(self.denoising_start * (total_sigmas - 1)) + end_idx = int(self.denoising_end * (total_sigmas - 1)) + 1 + sigmas = sigmas[start_idx:end_idx] + total_steps = len(sigmas) - 1 + # Load input latents if provided (image-to-image) + init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None + if init_latents is not None: + init_latents = init_latents.to(device=device, dtype=inference_dtype) + # Anima denoiser works in 3D: add temporal dim if needed + if init_latents.ndim == 4: + init_latents = init_latents.unsqueeze(2) # [B, C, H, W] -> [B, C, 1, H, W] + # Generate initial noise (3D latent: [B, C, T, H, W]) - latents = self._get_noise(self.height, self.width, inference_dtype, device, self.seed) + noise = self._get_noise(self.height, self.width, inference_dtype, device, self.seed) + + # Prepare input latents + if init_latents is not None: + if self.add_noise: + # Noise the init_latents for img2img: latents = s_0 * noise + (1 - s_0) * init_latents + s_0 = sigmas[0] + latents = s_0 * noise + (1.0 - s_0) * init_latents + else: + latents = init_latents + else: + if self.denoising_start > 1e-5: + raise ValueError("denoising_start should be 0 when initial latents are not provided.") + latents = noise if total_steps <= 0: return latents.squeeze(2) # Remove temporal dim for output + # Prepare inpaint extension (operates on squeezed 4D latents) + # Uses AnimaInpaintExtension which corrects for the time-SNR shift: + # - Linear t for gradient mask thresholding (correct progressive reveal) + # - Shifted sigma for noise mixing (matches denoiser's noise level) + inpaint_mask = self._prep_inpaint_mask(context, latents.squeeze(2)) + inpaint_extension: AnimaInpaintExtension | None = None + if inpaint_mask is not None: + if init_latents is None: + raise ValueError("Initial latents are required when using an inpaint mask (image-to-image inpainting)") + inpaint_extension = AnimaInpaintExtension( + init_latents=init_latents.squeeze(2), + inpaint_mask=inpaint_mask, + noise=noise.squeeze(2), + shift=ANIMA_SHIFT, + ) + step_callback = self._build_step_callback(context) # Initialize diffusers scheduler if not using built-in Euler @@ -270,6 +433,19 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: step_output = scheduler.step(model_output=noise_pred, timestep=sched_timestep, sample=latents) latents = step_output.prev_sample + # Get sigma_prev for inpainting + if step_index + 1 < len(scheduler.sigmas): + sigma_prev = scheduler.sigmas[step_index + 1].item() + else: + sigma_prev = 0.0 + + if inpaint_extension is not None: + latents_4d = latents.squeeze(2) + latents_4d = inpaint_extension.merge_intermediate_latents_with_init_latents( + latents_4d, sigma_prev + ) + latents = latents_4d.unsqueeze(2) + if is_heun: if not in_first_order: user_step += 1 @@ -331,6 +507,13 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: latents = latents + (sigma_prev - sigma_curr) * noise_pred latents = latents.to(dtype=latents_dtype) + if inpaint_extension is not None: + latents_4d = latents.squeeze(2) + latents_4d = inpaint_extension.merge_intermediate_latents_with_init_latents( + latents_4d, sigma_prev + ) + latents = latents_4d.unsqueeze(2) + step_callback( PipelineIntermediateState( step=step_idx + 1, diff --git a/invokeai/app/invocations/anima_image_to_latents.py b/invokeai/app/invocations/anima_image_to_latents.py new file mode 100644 index 00000000000..1bb260a9ae0 --- /dev/null +++ b/invokeai/app/invocations/anima_image_to_latents.py @@ -0,0 +1,121 @@ +"""Anima image-to-latents invocation. + +Encodes an image to latent space using the Anima VAE (AutoencoderKLWan or FLUX VAE). + +For Wan VAE (AutoencoderKLWan): +- Input image is converted to 5D tensor [B, C, T, H, W] with T=1 +- After encoding, latents are normalized: (latents - mean) / std + (inverse of the denormalization in anima_latents_to_image.py) + +For FLUX VAE (AutoEncoder): +- Encoding is handled internally by the FLUX VAE +""" + +from typing import Union + +import einops +import torch +from diffusers.models.autoencoders import AutoencoderKLWan + +from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.fields import ( + FieldDescriptions, + ImageField, + Input, + InputField, + WithBoard, + WithMetadata, +) +from invokeai.app.invocations.model import VAEField +from invokeai.app.invocations.primitives import LatentsOutput +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.flux.modules.autoencoder import AutoEncoder as FluxAutoEncoder +from invokeai.backend.model_manager.load.load_base import LoadedModel +from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor +from invokeai.backend.util.devices import TorchDevice +from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux + +AnimaVAE = Union[AutoencoderKLWan, FluxAutoEncoder] + + +@invocation( + "anima_i2l", + title="Image to Latents - Anima", + tags=["image", "latents", "vae", "i2l", "anima"], + category="image", + version="1.0.0", + classification=Classification.Prototype, +) +class AnimaImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard): + """Generates latents from an image using the Anima VAE (supports Wan 2.1 and FLUX VAE).""" + + image: ImageField = InputField(description="The image to encode.") + vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection) + + @staticmethod + def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor: + if not isinstance(vae_info.model, (AutoencoderKLWan, FluxAutoEncoder)): + raise TypeError( + f"Expected AutoencoderKLWan or FluxAutoEncoder for Anima VAE, got {type(vae_info.model).__name__}." + ) + + estimated_working_memory = estimate_vae_working_memory_flux( + operation="encode", + image_tensor=image_tensor, + vae=vae_info.model, + ) + + with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae): + if not isinstance(vae, (AutoencoderKLWan, FluxAutoEncoder)): + raise TypeError( + f"Expected AutoencoderKLWan or FluxAutoEncoder, got {type(vae).__name__}." + ) + + vae_dtype = next(iter(vae.parameters())).dtype + image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype) + + with torch.inference_mode(): + if isinstance(vae, FluxAutoEncoder): + # FLUX VAE handles scaling internally + generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0) + latents = vae.encode(image_tensor, sample=True, generator=generator) + else: + # AutoencoderKLWan expects 5D input [B, C, T, H, W] + if image_tensor.ndim == 4: + image_tensor = image_tensor.unsqueeze(2) # [B, C, H, W] -> [B, C, 1, H, W] + + encoded = vae.encode(image_tensor, return_dict=False)[0] + latents = encoded.sample().to(dtype=vae_dtype) + + # Normalize to denoiser space: (latents - mean) / std + # This is the inverse of the denormalization in anima_latents_to_image.py + latents_mean = torch.tensor(vae.config.latents_mean).view(1, -1, 1, 1, 1).to(latents) + latents_std = torch.tensor(vae.config.latents_std).view(1, -1, 1, 1, 1).to(latents) + latents = (latents - latents_mean) / latents_std + + # Remove temporal dimension: [B, C, 1, H, W] -> [B, C, H, W] + if latents.ndim == 5: + latents = latents.squeeze(2) + + return latents + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> LatentsOutput: + image = context.images.get_pil(self.image.image_name) + + image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) + if image_tensor.dim() == 3: + image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w") + + vae_info = context.models.load(self.vae.vae) + if not isinstance(vae_info.model, (AutoencoderKLWan, FluxAutoEncoder)): + raise TypeError( + f"Expected AutoencoderKLWan or FluxAutoEncoder for Anima VAE, got {type(vae_info.model).__name__}." + ) + + context.util.signal_progress("Running Anima VAE encode") + latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor) + + latents = latents.to("cpu") + name = context.tensors.save(tensor=latents) + return LatentsOutput.build(latents_name=name, latents=latents, seed=None) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index bc13b72c7bb..29e8b3d69b2 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -166,6 +166,10 @@ def invoke(self, context: InvocationContext) -> MetadataOutput: "z_image_img2img", "z_image_inpaint", "z_image_outpaint", + "anima_txt2img", + "anima_img2img", + "anima_inpaint", + "anima_outpaint", ] diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 58be5430a26..c66be903997 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1197,6 +1197,10 @@ "typePhraseHere": "Type phrase here", "t5Encoder": "T5 Encoder", "qwen3Encoder": "Qwen3 Encoder", + "animaVae": "VAE", + "animaVaePlaceholder": "Select Anima-compatible VAE", + "animaQwen3Encoder": "Qwen3 0.6B Encoder", + "animaQwen3EncoderPlaceholder": "Select Qwen3 0.6B encoder", "zImageVae": "VAE (optional)", "zImageVaePlaceholder": "From VAE source model", "zImageQwen3Encoder": "Qwen3 Encoder (optional)", diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts index 20057472ca8..2d431f86939 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts @@ -4,6 +4,8 @@ import { bboxSyncedToOptimalDimension, rgRefImageModelChanged } from 'features/c import { buildSelectIsStaging, selectCanvasSessionId } from 'features/controlLayers/store/canvasStagingAreaSlice'; import { loraIsEnabledChanged } from 'features/controlLayers/store/lorasSlice'; import { + animaQwen3EncoderModelSelected, + animaVaeModelSelected, kleinQwen3EncoderModelSelected, kleinVaeModelSelected, modelChanged, @@ -39,6 +41,7 @@ import { toast } from 'features/toast/toast'; import { t } from 'i18next'; import { modelConfigsAdapterSelectors, selectModelConfigsQuery } from 'services/api/endpoints/models'; import { + selectAnimaVAEModels, selectFluxVAEModels, selectGlobalRefImageModels, selectQwen3EncoderModels, @@ -155,6 +158,53 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) = } } + // handle incompatible Anima models - clear if switching away from anima + const { animaVaeModel, animaQwen3EncoderModel } = state.params; + if (newBase !== 'anima') { + if (animaVaeModel) { + dispatch(animaVaeModelSelected(null)); + modelsUpdatedDisabledOrCleared += 1; + } + if (animaQwen3EncoderModel) { + dispatch(animaQwen3EncoderModelSelected(null)); + modelsUpdatedDisabledOrCleared += 1; + } + } else { + // Switching to Anima - set defaults if no valid configuration exists + const hasValidConfig = animaVaeModel && animaQwen3EncoderModel; + + if (!hasValidConfig) { + const availableQwen3Encoders = selectQwen3EncoderModels(state); + const availableAnimaVAEs = selectAnimaVAEModels(state); + + if (availableQwen3Encoders.length > 0 && availableAnimaVAEs.length > 0) { + const qwen3Encoder = availableQwen3Encoders[0]; + const fluxVAE = availableAnimaVAEs[0]; + + if (qwen3Encoder && !animaQwen3EncoderModel) { + dispatch( + animaQwen3EncoderModelSelected({ + key: qwen3Encoder.key, + name: qwen3Encoder.name, + base: qwen3Encoder.base, + }) + ); + } + if (fluxVAE && !animaVaeModel) { + dispatch( + animaVaeModelSelected({ + key: fluxVAE.key, + hash: fluxVAE.hash, + name: fluxVAE.name, + base: fluxVAE.base, + type: fluxVAE.type, + }) + ); + } + } + } + } + // handle incompatible FLUX.2 Klein models - clear if switching away from flux2 const { kleinVaeModel, kleinQwen3EncoderModel } = state.params; if (newBase !== 'flux2') { diff --git a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts index e82cf4de9c8..373325c9b84 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts @@ -500,6 +500,8 @@ const resetState = (state: ParamsState): ParamsState => { newState.zImageVaeModel = oldState.zImageVaeModel; newState.zImageQwen3EncoderModel = oldState.zImageQwen3EncoderModel; newState.zImageQwen3SourceModel = oldState.zImageQwen3SourceModel; + newState.animaVaeModel = oldState.animaVaeModel; + newState.animaQwen3EncoderModel = oldState.animaQwen3EncoderModel; newState.kleinVaeModel = oldState.kleinVaeModel; newState.kleinQwen3EncoderModel = oldState.kleinQwen3EncoderModel; return newState; @@ -576,15 +578,11 @@ export const { syncedToOptimalDimension, paramsReset, + animaVaeModelSelected, + animaQwen3EncoderModelSelected, + setAnimaScheduler, } = slice.actions; -/** @knipignore */ -export const animaVaeModelSelected = slice.actions.animaVaeModelSelected; -/** @knipignore */ -export const animaQwen3EncoderModelSelected = slice.actions.animaQwen3EncoderModelSelected; -/** @knipignore */ -export const setAnimaScheduler = slice.actions.setAnimaScheduler; - export const paramsSliceConfig: SliceConfig = { slice, schema: zParamsState, @@ -620,7 +618,6 @@ export const selectIsFLUX = createParamsSelector((params) => params.model?.base export const selectIsSD3 = createParamsSelector((params) => params.model?.base === 'sd-3'); export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4'); export const selectIsZImage = createParamsSelector((params) => params.model?.base === 'z-image'); -/** @knipignore */ export const selectIsAnima = createParamsSelector((params) => params.model?.base === 'anima'); export const selectIsFlux2 = createParamsSelector((params) => params.model?.base === 'flux2'); export const selectIsFluxKontext = createParamsSelector((params) => { diff --git a/invokeai/frontend/web/src/features/metadata/parsing.tsx b/invokeai/frontend/web/src/features/metadata/parsing.tsx index 7d1d511a3c2..213bd92e934 100644 --- a/invokeai/frontend/web/src/features/metadata/parsing.tsx +++ b/invokeai/frontend/web/src/features/metadata/parsing.tsx @@ -8,6 +8,8 @@ import { getPrefixedId } from 'features/controlLayers/konva/util'; import { bboxHeightChanged, bboxWidthChanged, canvasMetadataRecalled } from 'features/controlLayers/store/canvasSlice'; import { loraAllDeleted, loraRecalled } from 'features/controlLayers/store/lorasSlice'; import { + animaQwen3EncoderModelSelected, + animaVaeModelSelected, heightChanged, kleinQwen3EncoderModelSelected, kleinVaeModelSelected, @@ -15,6 +17,7 @@ import { positivePromptChanged, refinerModelChanged, selectBase, + setAnimaScheduler, setCfgRescaleMultiplier, setCfgScale, setClipSkip, @@ -471,6 +474,11 @@ const Scheduler: SingleMetadataHandler = { if (value === 'euler' || value === 'heun' || value === 'lcm') { store.dispatch(setZImageScheduler(value)); } + } else if (base === 'anima') { + // Anima supports euler, heun, lcm + if (value === 'euler' || value === 'heun' || value === 'lcm') { + store.dispatch(setAnimaScheduler(value)); + } } else { // SD, SDXL, SD3, CogView4, etc. use the general scheduler store.dispatch(setScheduler(value)); @@ -933,6 +941,52 @@ const ZImageQwen3SourceModel: SingleMetadataHandler = { }; //#endregion ZImageQwen3SourceModel +//#region AnimaVAEModel +const AnimaVAEModel: SingleMetadataHandler = { + [SingleMetadataKey]: true, + type: 'AnimaVAEModel', + parse: async (metadata, store) => { + const raw = getProperty(metadata, 'vae'); + const parsed = await parseModelIdentifier(raw, store, 'vae'); + assert(parsed.type === 'vae'); + const base = selectBase(store.getState()); + assert(base === 'anima', 'AnimaVAEModel handler only works with Anima models'); + return Promise.resolve(parsed); + }, + recall: (value, store) => { + store.dispatch(animaVaeModelSelected(value)); + }, + i18nKey: 'metadata.vae', + LabelComponent: MetadataLabel, + ValueComponent: ({ value }: SingleMetadataValueProps) => ( + + ), +}; +//#endregion AnimaVAEModel + +//#region AnimaQwen3EncoderModel +const AnimaQwen3EncoderModel: SingleMetadataHandler = { + [SingleMetadataKey]: true, + type: 'AnimaQwen3EncoderModel', + parse: async (metadata, store) => { + const raw = getProperty(metadata, 'qwen3_encoder'); + const parsed = await parseModelIdentifier(raw, store, 'qwen3_encoder'); + assert(parsed.type === 'qwen3_encoder'); + const base = selectBase(store.getState()); + assert(base === 'anima', 'AnimaQwen3EncoderModel handler only works with Anima models'); + return Promise.resolve(parsed); + }, + recall: (value, store) => { + store.dispatch(animaQwen3EncoderModelSelected(value)); + }, + i18nKey: 'metadata.qwen3Encoder', + LabelComponent: MetadataLabel, + ValueComponent: ({ value }: SingleMetadataValueProps) => ( + + ), +}; +//#endregion AnimaQwen3EncoderModel + //#region KleinVAEModel const KleinVAEModel: SingleMetadataHandler = { [SingleMetadataKey]: true, @@ -1228,6 +1282,8 @@ export const ImageMetadataHandlers = { Qwen3EncoderModel, ZImageVAEModel, ZImageQwen3SourceModel, + AnimaVAEModel, + AnimaQwen3EncoderModel, KleinVAEModel, KleinQwen3EncoderModel, ZImageSeedVarianceEnabled, diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts index 42730774992..1c69cdc0d11 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts @@ -21,7 +21,9 @@ type AddImageToImageArg = { state: RootState; manager: CanvasManager; l2i: Invocation; - i2l: Invocation<'i2l' | 'flux_vae_encode' | 'flux2_vae_encode' | 'sd3_i2l' | 'cogview4_i2l' | 'z_image_i2l'>; + i2l: Invocation< + 'i2l' | 'flux_vae_encode' | 'flux2_vae_encode' | 'sd3_i2l' | 'cogview4_i2l' | 'z_image_i2l' | 'anima_i2l' + >; noise?: Invocation<'noise'>; denoise: Invocation; vaeSource: Invocation; @@ -37,7 +39,16 @@ export const addImageToImage = async ({ denoise, vaeSource, }: AddImageToImageArg): Promise< - Invocation<'img_resize' | 'l2i' | 'flux_vae_decode' | 'flux2_vae_decode' | 'sd3_l2i' | 'cogview4_l2i' | 'z_image_l2i'> + Invocation< + | 'img_resize' + | 'l2i' + | 'flux_vae_decode' + | 'flux2_vae_decode' + | 'sd3_l2i' + | 'cogview4_l2i' + | 'z_image_l2i' + | 'anima_l2i' + > > => { const { denoising_start, denoising_end } = getDenoisingStartAndEnd(state); denoise.denoising_start = denoising_start; @@ -50,7 +61,8 @@ export const addImageToImage = async ({ denoise.type === 'flux_denoise' || denoise.type === 'flux2_denoise' || denoise.type === 'sd3_denoise' || - denoise.type === 'z_image_denoise' + denoise.type === 'z_image_denoise' || + denoise.type === 'anima_denoise' ) { denoise.width = scaledSize.width; denoise.height = scaledSize.height; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts index 837e7f09eaf..b53d79e2a5f 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts @@ -24,7 +24,9 @@ type AddInpaintArg = { state: RootState; manager: CanvasManager; l2i: Invocation; - i2l: Invocation<'i2l' | 'flux_vae_encode' | 'flux2_vae_encode' | 'sd3_i2l' | 'cogview4_i2l' | 'z_image_i2l'>; + i2l: Invocation< + 'i2l' | 'flux_vae_encode' | 'flux2_vae_encode' | 'sd3_i2l' | 'cogview4_i2l' | 'z_image_i2l' | 'anima_i2l' + >; noise?: Invocation<'noise'>; denoise: Invocation; vaeSource: Invocation; @@ -58,7 +60,8 @@ export const addInpaint = async ({ denoise.type === 'flux_denoise' || denoise.type === 'flux2_denoise' || denoise.type === 'sd3_denoise' || - denoise.type === 'z_image_denoise' + denoise.type === 'z_image_denoise' || + denoise.type === 'anima_denoise' ) { denoise.width = scaledSize.width; denoise.height = scaledSize.height; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts index 481a44e4c61..14be20c70e3 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts @@ -60,7 +60,8 @@ export const addOutpaint = async ({ denoise.type === 'flux_denoise' || denoise.type === 'flux2_denoise' || denoise.type === 'sd3_denoise' || - denoise.type === 'z_image_denoise' + denoise.type === 'z_image_denoise' || + denoise.type === 'anima_denoise' ) { denoise.width = scaledSize.width; denoise.height = scaledSize.height; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts index d70457ac63e..9cfd5e3b552 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts @@ -22,7 +22,14 @@ export const addTextToImage = ({ denoise, l2i, }: AddTextToImageArg): Invocation< - 'img_resize' | 'l2i' | 'flux_vae_decode' | 'flux2_vae_decode' | 'sd3_l2i' | 'cogview4_l2i' | 'z_image_l2i' + | 'img_resize' + | 'l2i' + | 'flux_vae_decode' + | 'flux2_vae_decode' + | 'sd3_l2i' + | 'cogview4_l2i' + | 'z_image_l2i' + | 'anima_l2i' > => { denoise.denoising_start = 0; denoise.denoising_end = 1; @@ -34,7 +41,8 @@ export const addTextToImage = ({ denoise.type === 'flux_denoise' || denoise.type === 'flux2_denoise' || denoise.type === 'sd3_denoise' || - denoise.type === 'z_image_denoise' + denoise.type === 'z_image_denoise' || + denoise.type === 'anima_denoise' ) { denoise.width = scaledSize.width; denoise.height = scaledSize.height; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts index 6dcc139fa4a..6b1c06e7f40 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts @@ -1,4 +1,3 @@ -import { objectEquals } from '@observ33r/object-equals'; import { logger } from 'app/logging/logger'; import { getPrefixedId } from 'features/controlLayers/konva/util'; import { @@ -10,27 +9,27 @@ import { } from 'features/controlLayers/store/paramsSlice'; import { selectCanvasMetadata } from 'features/controlLayers/store/selectors'; import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers'; +import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage'; +import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint'; import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker'; +import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint'; +import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage'; import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker'; import { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { - getOriginalAndScaledSizesForTextToImage, - selectCanvasOutputFields, - selectPresetModifiedPrompts, -} from 'features/nodes/util/graph/graphBuilderUtils'; +import { selectCanvasOutputFields, selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils'; import type { GraphBuilderArg, GraphBuilderReturn, ImageOutputNodes } from 'features/nodes/util/graph/types'; -import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; import { selectActiveTab } from 'features/ui/store/uiSelectors'; import type { Invocation } from 'services/api/types'; import { isNonRefinerMainModelConfig } from 'services/api/types'; +import type { Equals } from 'tsafe'; import { assert } from 'tsafe'; const log = logger('system'); export const buildAnimaGraph = async (arg: GraphBuilderArg): Promise => { - const { generationMode, state } = arg; + const { generationMode, state, manager } = arg; - log.debug({ generationMode }, 'Building Anima graph'); + log.debug({ generationMode, manager: manager?.id }, 'Building Anima graph'); const model = selectMainModelConfig(state); assert(model, 'No model selected'); @@ -53,15 +52,6 @@ export const buildAnimaGraph = async (arg: GraphBuilderArg): Promise; - if (!objectEquals(scaledSize, originalSize)) { - const resizeImageToOriginalSize = g.addNode({ - id: getPrefixedId('resize_image_to_original_size'), - type: 'img_resize', - ...originalSize, + let canvasOutput: Invocation = l2i; + + if (generationMode === 'txt2img') { + canvasOutput = addTextToImage({ + g, + state, + denoise, + l2i, + }); + g.upsertMetadata({ generation_mode: 'anima_txt2img' }); + } else if (generationMode === 'img2img') { + assert(manager !== null); + const i2l = g.addNode({ + type: 'anima_i2l', + id: getPrefixedId('anima_i2l'), + }); + + canvasOutput = await addImageToImage({ + g, + state, + manager, + denoise, + l2i, + i2l, + vaeSource: modelLoader, + }); + g.upsertMetadata({ generation_mode: 'anima_img2img' }); + } else if (generationMode === 'inpaint') { + assert(manager !== null); + const i2l = g.addNode({ + type: 'anima_i2l', + id: getPrefixedId('anima_i2l'), + }); + + canvasOutput = await addInpaint({ + g, + state, + manager, + l2i, + i2l, + denoise, + vaeSource: modelLoader, + modelLoader, + seed, + }); + g.upsertMetadata({ generation_mode: 'anima_inpaint' }); + } else if (generationMode === 'outpaint') { + assert(manager !== null); + const i2l = g.addNode({ + type: 'anima_i2l', + id: getPrefixedId('anima_i2l'), + }); + + canvasOutput = await addOutpaint({ + g, + state, + manager, + l2i, + i2l, + denoise, + vaeSource: modelLoader, + modelLoader, + seed, }); - g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image'); - canvasOutput = resizeImageToOriginalSize; + g.upsertMetadata({ generation_mode: 'anima_outpaint' }); } else { - canvasOutput = l2i; + assert>(false); } if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts b/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts index e48a6ee4503..fdc3c2a5a2d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts @@ -257,6 +257,17 @@ export const getDenoisingStartAndEnd = (state: RootState): { denoising_start: nu }; } } + case 'anima': { + // Anima uses a fixed shift=3.0 which makes the sigma schedule highly non-linear. + // Without rescaling, most of the visual 'change' is concentrated in the high denoise + // strength range (>0.8). The exponent 0.2 spreads the effective range more evenly, + // matching the approach used for FLUX and SD3. + const animaExponent = optimizedDenoisingEnabled ? 0.2 : 1; + return { + denoising_start: 1 - denoisingStrength ** animaExponent, + denoising_end: 1, + }; + } case 'sd-1': case 'sd-2': case 'cogview4': diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/types.ts b/invokeai/frontend/web/src/features/nodes/util/graph/types.ts index 2c307f3a8d7..f02a7bf6f8c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/types.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/types.ts @@ -24,7 +24,8 @@ export type LatentToImageNodes = | 'flux2_vae_decode' | 'sd3_l2i' | 'cogview4_l2i' - | 'z_image_l2i'; + | 'z_image_l2i' + | 'anima_l2i'; export type ImageToLatentsNodes = | 'i2l' @@ -32,7 +33,8 @@ export type ImageToLatentsNodes = | 'flux2_vae_encode' | 'sd3_i2l' | 'cogview4_i2l' - | 'z_image_i2l'; + | 'z_image_i2l' + | 'anima_i2l'; export type DenoiseLatentsNodes = | 'denoise_latents' @@ -40,7 +42,8 @@ export type DenoiseLatentsNodes = | 'flux2_denoise' | 'sd3_denoise' | 'cogview4_denoise' - | 'z_image_denoise'; + | 'z_image_denoise' + | 'anima_denoise'; export type MainModelLoaderNodes = | 'main_model_loader' diff --git a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamAnimaModelSelect.tsx b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamAnimaModelSelect.tsx new file mode 100644 index 00000000000..743558311e2 --- /dev/null +++ b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamAnimaModelSelect.tsx @@ -0,0 +1,116 @@ +import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { useModelCombobox } from 'common/hooks/useModelCombobox'; +import { + animaQwen3EncoderModelSelected, + animaVaeModelSelected, + selectAnimaQwen3EncoderModel, + selectAnimaVaeModel, +} from 'features/controlLayers/store/paramsSlice'; +import { zModelIdentifierField } from 'features/nodes/types/common'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useAnimaVAEModels, useQwen3EncoderModels } from 'services/api/hooks/modelsByType'; +import type { Qwen3EncoderModelConfig, VAEModelConfig } from 'services/api/types'; + +/** + * Anima VAE Model Select - uses Anima-base VAE models (QwenImage/Wan 2.1 VAE) + */ +const ParamAnimaVaeModelSelect = memo(() => { + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + const animaVaeModel = useAppSelector(selectAnimaVaeModel); + const [modelConfigs, { isLoading }] = useAnimaVAEModels(); + + const _onChange = useCallback( + (model: VAEModelConfig | null) => { + if (model) { + dispatch(animaVaeModelSelected(zModelIdentifierField.parse(model))); + } else { + dispatch(animaVaeModelSelected(null)); + } + }, + [dispatch] + ); + + const { options, value, onChange, noOptionsMessage } = useModelCombobox({ + modelConfigs, + onChange: _onChange, + selectedModel: animaVaeModel, + isLoading, + }); + + return ( + + {t('modelManager.animaVae')} + + + ); +}); + +ParamAnimaVaeModelSelect.displayName = 'ParamAnimaVaeModelSelect'; + +/** + * Anima Qwen3 0.6B Encoder Model Select + */ +const ParamAnimaQwen3EncoderModelSelect = memo(() => { + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + const animaQwen3EncoderModel = useAppSelector(selectAnimaQwen3EncoderModel); + const [modelConfigs, { isLoading }] = useQwen3EncoderModels(); + + const _onChange = useCallback( + (model: Qwen3EncoderModelConfig | null) => { + if (model) { + dispatch(animaQwen3EncoderModelSelected(zModelIdentifierField.parse(model))); + } else { + dispatch(animaQwen3EncoderModelSelected(null)); + } + }, + [dispatch] + ); + + const { options, value, onChange, noOptionsMessage } = useModelCombobox({ + modelConfigs, + onChange: _onChange, + selectedModel: animaQwen3EncoderModel, + isLoading, + }); + + return ( + + {t('modelManager.animaQwen3Encoder')} + + + ); +}); + +ParamAnimaQwen3EncoderModelSelect.displayName = 'ParamAnimaQwen3EncoderModelSelect'; + +/** + * Combined component for Anima model selection (VAE + Qwen3 Encoder) + */ +const ParamAnimaModelSelect = () => { + return ( + <> + + + + ); +}; + +export default memo(ParamAnimaModelSelect); diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamAnimaScheduler.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamAnimaScheduler.tsx new file mode 100644 index 00000000000..fbb1819b4e7 --- /dev/null +++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamAnimaScheduler.tsx @@ -0,0 +1,45 @@ +import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; +import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; +import { selectAnimaScheduler, setAnimaScheduler } from 'features/controlLayers/store/paramsSlice'; +import { isParameterZImageScheduler } from 'features/parameters/types/parameterSchemas'; +import { memo, useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; + +// Anima scheduler options (same flow-matching schedulers as Z-Image) +const ANIMA_SCHEDULER_OPTIONS: ComboboxOption[] = [ + { value: 'euler', label: 'Euler' }, + { value: 'heun', label: 'Heun (2nd order)' }, + { value: 'lcm', label: 'LCM' }, +]; + +const ParamAnimaScheduler = () => { + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + const animaScheduler = useAppSelector(selectAnimaScheduler); + + const onChange = useCallback( + (v) => { + // Reuse Z-Image scheduler type guard since the values are identical + if (!isParameterZImageScheduler(v?.value)) { + return; + } + dispatch(setAnimaScheduler(v.value)); + }, + [dispatch] + ); + + const value = useMemo(() => ANIMA_SCHEDULER_OPTIONS.find((o) => o.value === animaScheduler), [animaScheduler]); + + return ( + + + {t('parameters.scheduler')} + + + + ); +}; + +export default memo(ParamAnimaScheduler); diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts index 652cf4c5b24..b32ea8e3030 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts @@ -8,6 +8,7 @@ import { useCanvasManagerSafe } from 'features/controlLayers/contexts/CanvasMana import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager'; import { positivePromptAddedToHistory, selectPositivePrompt } from 'features/controlLayers/store/paramsSlice'; import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig'; +import { buildAnimaGraph } from 'features/nodes/util/graph/generation/buildAnimaGraph'; import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph'; import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph'; import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph'; @@ -59,6 +60,8 @@ const enqueueCanvas = async (store: AppStore, canvasManager: CanvasManager, prep return await buildCogView4Graph(graphBuilderArg); case 'z-image': return await buildZImageGraph(graphBuilderArg); + case 'anima': + return await buildAnimaGraph(graphBuilderArg); default: assert(false, `No graph builders for base ${base}`); } diff --git a/invokeai/frontend/web/src/features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion.tsx b/invokeai/frontend/web/src/features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion.tsx index eddf82e9221..44286780784 100644 --- a/invokeai/frontend/web/src/features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion.tsx +++ b/invokeai/frontend/web/src/features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion.tsx @@ -4,6 +4,7 @@ import { skipToken } from '@reduxjs/toolkit/query'; import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppSelector } from 'app/store/storeHooks'; import { + selectIsAnima, selectIsFLUX, selectIsFlux2, selectIsSD3, @@ -11,6 +12,7 @@ import { selectParamsSlice, selectVAEKey, } from 'features/controlLayers/store/paramsSlice'; +import ParamAnimaModelSelect from 'features/parameters/components/Advanced/ParamAnimaModelSelect'; import ParamCFGRescaleMultiplier from 'features/parameters/components/Advanced/ParamCFGRescaleMultiplier'; import ParamCLIPEmbedModelSelect from 'features/parameters/components/Advanced/ParamCLIPEmbedModelSelect'; import ParamCLIPGEmbedModelSelect from 'features/parameters/components/Advanced/ParamCLIPGEmbedModelSelect'; @@ -45,6 +47,7 @@ export const AdvancedSettingsAccordion = memo(() => { const isFlux2 = useAppSelector(selectIsFlux2); const isSD3 = useAppSelector(selectIsSD3); const isZImage = useAppSelector(selectIsZImage); + const isAnima = useAppSelector(selectIsAnima); const selectBadges = useMemo( () => @@ -94,13 +97,13 @@ export const AdvancedSettingsAccordion = memo(() => { return ( - {!isZImage && !isFlux2 && ( + {!isZImage && !isAnima && !isFlux2 && ( {isFLUX ? : } {!isFLUX && !isSD3 && } )} - {!isFLUX && !isFlux2 && !isSD3 && !isZImage && ( + {!isFLUX && !isFlux2 && !isSD3 && !isZImage && !isAnima && ( <> @@ -142,6 +145,11 @@ export const AdvancedSettingsAccordion = memo(() => { )} + {isAnima && ( + + + + )} ); diff --git a/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx b/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx index ffdbc4ce778..50ddcf2bbae 100644 --- a/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx +++ b/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx @@ -6,6 +6,7 @@ import { useAppSelector } from 'app/store/storeHooks'; import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice'; import { selectFluxDypePreset, + selectIsAnima, selectIsCogView4, selectIsFLUX, selectIsFlux2, @@ -14,6 +15,7 @@ import { } from 'features/controlLayers/store/paramsSlice'; import { LoRAList } from 'features/lora/components/LoRAList'; import LoRASelect from 'features/lora/components/LoRASelect'; +import ParamAnimaScheduler from 'features/parameters/components/Core/ParamAnimaScheduler'; import ParamCFGScale from 'features/parameters/components/Core/ParamCFGScale'; import ParamFluxDypeExponent from 'features/parameters/components/Core/ParamFluxDypeExponent'; import ParamFluxDypePreset from 'features/parameters/components/Core/ParamFluxDypePreset'; @@ -44,6 +46,7 @@ export const GenerationSettingsAccordion = memo(() => { const isSD3 = useAppSelector(selectIsSD3); const isCogView4 = useAppSelector(selectIsCogView4); const isZImage = useAppSelector(selectIsZImage); + const isAnima = useAppSelector(selectIsAnima); const fluxDypePreset = useAppSelector(selectFluxDypePreset); const selectBadges = useMemo( @@ -82,9 +85,10 @@ export const GenerationSettingsAccordion = memo(() => { - {!isFLUX && !isFlux2 && !isSD3 && !isCogView4 && !isZImage && } + {!isFLUX && !isFlux2 && !isSD3 && !isCogView4 && !isZImage && !isAnima && } {isFLUX && } {isZImage && } + {isAnima && } {(isFLUX || isFlux2) && modelConfig && !isFluxFillMainModelModelConfig(modelConfig) && } {!isFLUX && !isFlux2 && } diff --git a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts index 98d7dd1e8df..9599440477f 100644 --- a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts +++ b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts @@ -11,6 +11,7 @@ import { } from 'services/api/endpoints/models'; import type { AnyModelConfig } from 'services/api/types'; import { + isAnimaVAEModelConfig, isCLIPEmbedModelConfigOrSubmodel, isControlLayerModelConfig, isControlNetModelConfig, @@ -68,6 +69,7 @@ export const useEmbeddingModels = buildModelsHook(isTIModelConfig); export const useVAEModels = () => buildModelsHook(isVAEModelConfigOrSubmodel)(); export const useFlux1VAEModels = () => buildModelsHook(isFlux1VAEModelConfig)(); export const useFlux2VAEModels = () => buildModelsHook(isFlux2VAEModelConfig)(); +export const useAnimaVAEModels = () => buildModelsHook(isAnimaVAEModelConfig)(); export const useZImageDiffusersModels = () => buildModelsHook(isZImageDiffusersMainModelConfig)(); export const useQwen3EncoderModels = () => buildModelsHook(isQwen3EncoderModelConfig)(); export const useGlobalReferenceImageModels = buildModelsHook( @@ -106,3 +108,4 @@ export const selectRegionalRefImageModels = buildModelsSelector( export const selectQwen3EncoderModels = buildModelsSelector(isQwen3EncoderModelConfig); export const selectZImageDiffusersModels = buildModelsSelector(isZImageDiffusersMainModelConfig); export const selectFluxVAEModels = buildModelsSelector(isFluxVAEModelConfig); +export const selectAnimaVAEModels = buildModelsSelector(isAnimaVAEModelConfig); diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 469a5ab334d..e0aac0f8486 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -2646,6 +2646,8 @@ export type components = { * * Uses rectified flow sampling with shift=3.0 and the Cosmos Predict2 DiT * backbone with integrated LLM Adapter for text conditioning. + * + * Supports txt2img, img2img (via latents input), and inpainting (via denoise_mask). */ AnimaDenoiseInvocation: { /** @@ -2665,6 +2667,34 @@ export type components = { * @default true */ use_cache?: boolean; + /** + * @description Latents tensor + * @default null + */ + latents?: components["schemas"]["LatentsField"] | null; + /** + * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. + * @default null + */ + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + /** + * Denoising Start + * @description When to start denoising, expressed a percentage of total steps + * @default 0 + */ + denoising_start?: number; + /** + * Denoising End + * @description When to stop denoising, expressed a percentage of total steps + * @default 1 + */ + denoising_end?: number; + /** + * Add Noise + * @description Add noise based on denoising start. + * @default true + */ + add_noise?: boolean; /** * Transformer * @description Anima transformer model. @@ -2725,6 +2755,55 @@ export type components = { */ type: "anima_denoise"; }; + /** + * Image to Latents - Anima + * @description Generates latents from an image using the Anima VAE (supports Wan 2.1 and FLUX VAE). + */ + AnimaImageToLatentsInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to encode. + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * @description VAE + * @default null + */ + vae?: components["schemas"]["VAEField"] | null; + /** + * type + * @default anima_i2l + * @constant + */ + type: "anima_i2l"; + }; /** * Latents to Image - Anima * @description Generates an image from latents using the Anima VAE. @@ -6708,7 +6787,7 @@ export type components = { * @description The generation mode that output this image * @default null */ - generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint" | "flux_txt2img" | "flux_img2img" | "flux_inpaint" | "flux_outpaint" | "flux2_txt2img" | "flux2_img2img" | "flux2_inpaint" | "flux2_outpaint" | "sd3_txt2img" | "sd3_img2img" | "sd3_inpaint" | "sd3_outpaint" | "cogview4_txt2img" | "cogview4_img2img" | "cogview4_inpaint" | "cogview4_outpaint" | "z_image_txt2img" | "z_image_img2img" | "z_image_inpaint" | "z_image_outpaint") | null; + generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint" | "flux_txt2img" | "flux_img2img" | "flux_inpaint" | "flux_outpaint" | "flux2_txt2img" | "flux2_img2img" | "flux2_inpaint" | "flux2_outpaint" | "sd3_txt2img" | "sd3_img2img" | "sd3_inpaint" | "sd3_outpaint" | "cogview4_txt2img" | "cogview4_img2img" | "cogview4_inpaint" | "cogview4_outpaint" | "z_image_txt2img" | "z_image_img2img" | "z_image_inpaint" | "z_image_outpaint" | "anima_txt2img" | "anima_img2img" | "anima_inpaint" | "anima_outpaint") | null; /** * Positive Prompt * @description The positive prompt parameter @@ -10945,7 +11024,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; }; /** * Edges @@ -14167,7 +14246,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -14231,7 +14310,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -14257,6 +14336,7 @@ export type components = { add: components["schemas"]["IntegerOutput"]; alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; anima_denoise: components["schemas"]["LatentsOutput"]; + anima_i2l: components["schemas"]["LatentsOutput"]; anima_l2i: components["schemas"]["ImageOutput"]; anima_model_loader: components["schemas"]["AnimaModelLoaderOutput"]; anima_text_encoder: components["schemas"]["AnimaConditioningOutput"]; @@ -14541,7 +14621,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -14616,7 +14696,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 5d56c346f87..4d5f6841aab 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -196,6 +196,13 @@ export const isFlux2VAEModelConfig = (config: AnyModelConfig, excludeSubmodels?: ); }; +export const isAnimaVAEModelConfig = (config: AnyModelConfig, excludeSubmodels?: boolean): config is VAEModelConfig => { + return ( + (config.type === 'vae' || (!excludeSubmodels && config.type === 'main' && checkSubmodels(['vae'], config))) && + config.base === 'anima' + ); +}; + export const isControlNetModelConfig = (config: AnyModelConfig): config is ControlNetModelConfig => { return config.type === 'controlnet'; }; From e422eb73150400687a5f2a11cf53b3b03a2d3783 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 12 Mar 2026 03:07:50 -0400 Subject: [PATCH 04/14] regional guidance --- invokeai/app/invocations/anima_denoise.py | 278 +++++++++++++----- .../app/invocations/anima_text_encoder.py | 9 +- invokeai/app/invocations/fields.py | 5 + .../backend/anima/anima_transformer_patch.py | 106 +++++++ invokeai/backend/anima/conditioning_data.py | 36 ++- invokeai/backend/anima/regional_prompting.py | 174 +++++++++++ .../controlLayers/store/validators.ts | 10 + .../web/src/features/modelManagerV2/models.ts | 2 +- .../nodes/util/graph/generation/addRegions.ts | 61 +++- .../util/graph/generation/buildAnimaGraph.ts | 50 +++- .../frontend/web/src/services/api/schema.ts | 16 +- 11 files changed, 660 insertions(+), 87 deletions(-) create mode 100644 invokeai/backend/anima/anima_transformer_patch.py create mode 100644 invokeai/backend/anima/regional_prompting.py diff --git a/invokeai/app/invocations/anima_denoise.py b/invokeai/app/invocations/anima_denoise.py index bef3249cc4b..8b289736c22 100644 --- a/invokeai/app/invocations/anima_denoise.py +++ b/invokeai/app/invocations/anima_denoise.py @@ -39,13 +39,16 @@ from invokeai.app.invocations.model import TransformerField from invokeai.app.invocations.primitives import LatentsOutput from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.anima.anima_transformer_patch import patch_anima_for_regional_prompting +from invokeai.backend.anima.conditioning_data import AnimaRegionalTextConditioning, AnimaTextConditioning +from invokeai.backend.anima.regional_prompting import AnimaRegionalPromptingExtension from invokeai.backend.flux.schedulers import ANIMA_SCHEDULER_LABELS, ANIMA_SCHEDULER_MAP, ANIMA_SCHEDULER_NAME_VALUES from invokeai.backend.model_manager.taxonomy import BaseModelType from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import ( RectifiedFlowInpaintExtension, assert_broadcastable, ) -from invokeai.backend.stable_diffusion.diffusion.conditioning_data import AnimaConditioningInfo +from invokeai.backend.stable_diffusion.diffusion.conditioning_data import AnimaConditioningInfo, Range from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState from invokeai.backend.util.devices import TorchDevice @@ -156,7 +159,7 @@ def merge_intermediate_latents_with_init_latents( title="Denoise - Anima", tags=["image", "anima"], category="image", - version="1.1.0", + version="1.2.0", classification=Classification.Prototype, ) class AnimaDenoiseInvocation(BaseInvocation): @@ -182,10 +185,10 @@ class AnimaDenoiseInvocation(BaseInvocation): transformer: TransformerField = InputField( description="Anima transformer model.", input=Input.Connection, title="Transformer" ) - positive_conditioning: AnimaConditioningField = InputField( + positive_conditioning: AnimaConditioningField | list[AnimaConditioningField] = InputField( description=FieldDescriptions.positive_cond, input=Input.Connection ) - negative_conditioning: AnimaConditioningField | None = InputField( + negative_conditioning: AnimaConditioningField | list[AnimaConditioningField] | None = InputField( default=None, description=FieldDescriptions.negative_cond, input=Input.Connection ) guidance_scale: float = InputField( @@ -286,32 +289,143 @@ def _load_conditioning( assert isinstance(cond_info, AnimaConditioningInfo) return cond_info.to(dtype=dtype, device=device) + def _load_text_conditionings( + self, + context: InvocationContext, + cond_field: AnimaConditioningField | list[AnimaConditioningField], + img_token_height: int, + img_token_width: int, + dtype: torch.dtype, + device: torch.device, + ) -> list[AnimaTextConditioning]: + """Load Anima text conditioning with optional regional masks. + + Args: + context: The invocation context. + cond_field: Single conditioning field or list of fields. + img_token_height: Height of the image token grid (H // patch_size). + img_token_width: Width of the image token grid (W // patch_size). + dtype: Target dtype. + device: Target device. + + Returns: + List of AnimaTextConditioning objects with optional masks. + """ + cond_list = cond_field if isinstance(cond_field, list) else [cond_field] + + text_conditionings: list[AnimaTextConditioning] = [] + for cond in cond_list: + cond_info = self._load_conditioning(context, cond, dtype, device) + + # Load the mask, if provided + mask: torch.Tensor | None = None + if cond.mask is not None: + mask = context.tensors.load(cond.mask.tensor_name) + mask = mask.to(device=device) + mask = AnimaRegionalPromptingExtension.preprocess_regional_prompt_mask( + mask, img_token_height, img_token_width, dtype, device + ) + + text_conditionings.append( + AnimaTextConditioning( + qwen3_embeds=cond_info.qwen3_embeds, + t5xxl_ids=cond_info.t5xxl_ids, + t5xxl_weights=cond_info.t5xxl_weights, + mask=mask, + ) + ) + + return text_conditionings + + def _run_llm_adapter_for_regions( + self, + transformer, + text_conditionings: list[AnimaTextConditioning], + dtype: torch.dtype, + ) -> AnimaRegionalTextConditioning: + """Run the LLM Adapter separately for each regional conditioning and concatenate. + + Args: + transformer: The AnimaTransformer instance (must be on device). + text_conditionings: List of per-region conditioning data. + dtype: Inference dtype. + + Returns: + AnimaRegionalTextConditioning with concatenated context and masks. + """ + context_embeds_list: list[torch.Tensor] = [] + context_ranges: list[Range] = [] + image_masks: list[torch.Tensor | None] = [] + cur_len = 0 + + for tc in text_conditionings: + qwen3_embeds = tc.qwen3_embeds.unsqueeze(0) # (1, seq_len, 1024) + t5xxl_ids = tc.t5xxl_ids.unsqueeze(0) # (1, seq_len) + t5xxl_weights = None + if tc.t5xxl_weights is not None: + t5xxl_weights = tc.t5xxl_weights.unsqueeze(0).unsqueeze(-1) # (1, seq_len, 1) + + # Run the LLM Adapter to produce context for this region + context = transformer.preprocess_text_embeds( + qwen3_embeds.to(dtype=dtype), + t5xxl_ids, + t5xxl_weights=t5xxl_weights.to(dtype=dtype) if t5xxl_weights is not None else None, + ) + # context shape: (1, 512, 1024) — squeeze batch dim + context_2d = context.squeeze(0) # (512, 1024) + + context_embeds_list.append(context_2d) + context_ranges.append(Range(start=cur_len, end=cur_len + context_2d.shape[0])) + image_masks.append(tc.mask) + cur_len += context_2d.shape[0] + + concatenated_context = torch.cat(context_embeds_list, dim=0) + + return AnimaRegionalTextConditioning( + context_embeds=concatenated_context, + image_masks=image_masks, + context_ranges=context_ranges, + ) + def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: device = TorchDevice.choose_torch_device() inference_dtype = TorchDevice.choose_bfloat16_safe_dtype(device) transformer_info = context.models.load(self.transformer.transformer) - # Load positive conditioning - pos_cond = self._load_conditioning(context, self.positive_conditioning, inference_dtype, device) - pos_qwen3_embeds = pos_cond.qwen3_embeds.unsqueeze(0) # Add batch dim: (1, seq_len, 1024) - pos_t5xxl_ids = pos_cond.t5xxl_ids.unsqueeze(0) # Add batch dim: (1, seq_len) - pos_t5xxl_weights = None - if pos_cond.t5xxl_weights is not None: - pos_t5xxl_weights = pos_cond.t5xxl_weights.unsqueeze(0).unsqueeze(-1) # (1, seq_len, 1) + # Compute image token grid dimensions for regional prompting + # Anima: 8x VAE compression, 2x patch size → 16x total + patch_size = 2 + latent_height = self.height // ANIMA_LATENT_SCALE_FACTOR + latent_width = self.width // ANIMA_LATENT_SCALE_FACTOR + img_token_height = latent_height // patch_size + img_token_width = latent_width // patch_size + img_seq_len = img_token_height * img_token_width + + # Load positive conditioning with optional regional masks + pos_text_conditionings = self._load_text_conditionings( + context=context, + cond_field=self.positive_conditioning, + img_token_height=img_token_height, + img_token_width=img_token_width, + dtype=inference_dtype, + device=device, + ) + has_regional = len(pos_text_conditionings) > 1 or any(tc.mask is not None for tc in pos_text_conditionings) # Load negative conditioning if CFG is enabled do_cfg = not math.isclose(self.guidance_scale, 1.0) and self.negative_conditioning is not None - neg_qwen3_embeds = None - neg_t5xxl_ids = None - neg_t5xxl_weights = None + neg_text_conditionings: list[AnimaTextConditioning] | None = None if do_cfg: assert self.negative_conditioning is not None - neg_cond = self._load_conditioning(context, self.negative_conditioning, inference_dtype, device) - neg_qwen3_embeds = neg_cond.qwen3_embeds.unsqueeze(0) - neg_t5xxl_ids = neg_cond.t5xxl_ids.unsqueeze(0) - if neg_cond.t5xxl_weights is not None: - neg_t5xxl_weights = neg_cond.t5xxl_weights.unsqueeze(0).unsqueeze(-1) + neg_text_conditionings = self._load_text_conditionings( + context=context, + cond_field=self.negative_conditioning, + img_token_height=img_token_height, + img_token_width=img_token_width, + dtype=inference_dtype, + device=device, + ) # Generate sigma schedule sigmas = self._get_sigmas(self.steps) @@ -339,7 +453,6 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: # Prepare input latents if init_latents is not None: if self.add_noise: - # Noise the init_latents for img2img: latents = s_0 * noise + (1 - s_0) * init_latents s_0 = sigmas[0] latents = s_0 * noise + (1.0 - s_0) * init_latents else: @@ -350,12 +463,9 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: latents = noise if total_steps <= 0: - return latents.squeeze(2) # Remove temporal dim for output + return latents.squeeze(2) - # Prepare inpaint extension (operates on squeezed 4D latents) - # Uses AnimaInpaintExtension which corrects for the time-SNR shift: - # - Linear t for gradient mask thresholding (correct progressive reveal) - # - Shifted sigma for noise mixing (matches denoiser's noise level) + # Prepare inpaint extension inpaint_mask = self._prep_inpaint_mask(context, latents.squeeze(2)) inpaint_extension: AnimaInpaintExtension | None = None if inpaint_mask is not None: @@ -390,6 +500,70 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: with ExitStack() as exit_stack: (cached_weights, transformer) = exit_stack.enter_context(transformer_info.model_on_device()) + # Run LLM Adapter for each regional conditioning to produce context vectors. + # This must happen with the transformer on device since it uses the adapter weights. + if has_regional: + pos_regional = self._run_llm_adapter_for_regions(transformer, pos_text_conditionings, inference_dtype) + pos_context = pos_regional.context_embeds.unsqueeze(0) # (1, total_ctx_len, 1024) + + # Build regional prompting extension with cross-attention mask + regional_extension = AnimaRegionalPromptingExtension.from_regional_conditioning( + pos_regional, img_seq_len + ) + + # For negative, concatenate all regions without masking (matches Z-Image behavior) + neg_context = None + if do_cfg and neg_text_conditionings is not None: + neg_regional = self._run_llm_adapter_for_regions( + transformer, neg_text_conditionings, inference_dtype + ) + neg_context = neg_regional.context_embeds.unsqueeze(0) + else: + # Single conditioning — run LLM Adapter via normal forward path + tc = pos_text_conditionings[0] + pos_qwen3_embeds = tc.qwen3_embeds.unsqueeze(0) + pos_t5xxl_ids = tc.t5xxl_ids.unsqueeze(0) + pos_t5xxl_weights = None + if tc.t5xxl_weights is not None: + pos_t5xxl_weights = tc.t5xxl_weights.unsqueeze(0).unsqueeze(-1) + + # Pre-compute context via LLM Adapter + pos_context = transformer.preprocess_text_embeds( + pos_qwen3_embeds.to(dtype=inference_dtype), + pos_t5xxl_ids, + t5xxl_weights=pos_t5xxl_weights.to(dtype=inference_dtype) if pos_t5xxl_weights is not None else None, + ) + + neg_context = None + if do_cfg and neg_text_conditionings is not None: + ntc = neg_text_conditionings[0] + neg_qwen3 = ntc.qwen3_embeds.unsqueeze(0) + neg_ids = ntc.t5xxl_ids.unsqueeze(0) + neg_weights = None + if ntc.t5xxl_weights is not None: + neg_weights = ntc.t5xxl_weights.unsqueeze(0).unsqueeze(-1) + neg_context = transformer.preprocess_text_embeds( + neg_qwen3.to(dtype=inference_dtype), + neg_ids, + t5xxl_weights=neg_weights.to(dtype=inference_dtype) if neg_weights is not None else None, + ) + + regional_extension = None + + # Apply regional prompting patch if we have regional masks + exit_stack.enter_context( + patch_anima_for_regional_prompting(transformer, regional_extension) + ) + + # Helper to run transformer with pre-computed context (bypasses LLM Adapter) + def _run_transformer(ctx: torch.Tensor) -> torch.Tensor: + return transformer( + x=latents.to(transformer.dtype if hasattr(transformer, 'dtype') else inference_dtype), + timesteps=timestep, + context=ctx, + # t5xxl_ids=None skips the LLM Adapter — context is already pre-computed + ) + if use_scheduler and scheduler is not None: # Scheduler-based denoising user_step = 0 @@ -401,31 +575,14 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: is_heun = hasattr(scheduler, "state_in_first_order") in_first_order = scheduler.state_in_first_order if is_heun else True - # Anima timestep convention: timestep = sigma * multiplier (1.0) timestep = torch.tensor( [sigma_curr * ANIMA_MULTIPLIER], device=device, dtype=inference_dtype ).expand(latents.shape[0]) - # Run transformer (positive) - model_output = transformer( - x=latents.to(transformer.dtype if hasattr(transformer, 'dtype') else inference_dtype), - timesteps=timestep, - context=pos_qwen3_embeds, - t5xxl_ids=pos_t5xxl_ids, - t5xxl_weights=pos_t5xxl_weights, - ) - noise_pred_cond = model_output.float() - - # Apply CFG - if do_cfg and neg_qwen3_embeds is not None: - model_output_uncond = transformer( - x=latents.to(transformer.dtype if hasattr(transformer, 'dtype') else inference_dtype), - timesteps=timestep, - context=neg_qwen3_embeds, - t5xxl_ids=neg_t5xxl_ids, - t5xxl_weights=neg_t5xxl_weights, - ) - noise_pred_uncond = model_output_uncond.float() + noise_pred_cond = _run_transformer(pos_context).float() + + if do_cfg and neg_context is not None: + noise_pred_uncond = _run_transformer(neg_context).float() noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) else: noise_pred = noise_pred_cond @@ -433,7 +590,6 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: step_output = scheduler.step(model_output=noise_pred, timestep=sched_timestep, sample=latents) latents = step_output.prev_sample - # Get sigma_prev for inpainting if step_index + 1 < len(scheduler.sigmas): sigma_prev = scheduler.sigmas[step_index + 1].item() else: @@ -470,38 +626,18 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: sigma_curr = sigmas[step_idx] sigma_prev = sigmas[step_idx + 1] - # Anima timestep: sigma * multiplier (1.0 = raw sigma) timestep = torch.tensor( [sigma_curr * ANIMA_MULTIPLIER], device=device, dtype=inference_dtype ).expand(latents.shape[0]) - # Run transformer (positive) - model_output = transformer( - x=latents.to(transformer.dtype if hasattr(transformer, 'dtype') else inference_dtype), - timesteps=timestep, - context=pos_qwen3_embeds, - t5xxl_ids=pos_t5xxl_ids, - t5xxl_weights=pos_t5xxl_weights, - ) + noise_pred_cond = _run_transformer(pos_context).float() - # CONST model: noise_pred = model_output (NO negation, unlike Z-Image v-pred) - noise_pred_cond = model_output.float() - - # Apply CFG - if do_cfg and neg_qwen3_embeds is not None: - model_output_uncond = transformer( - x=latents.to(transformer.dtype if hasattr(transformer, 'dtype') else inference_dtype), - timesteps=timestep, - context=neg_qwen3_embeds, - t5xxl_ids=neg_t5xxl_ids, - t5xxl_weights=neg_t5xxl_weights, - ) - noise_pred_uncond = model_output_uncond.float() + if do_cfg and neg_context is not None: + noise_pred_uncond = _run_transformer(neg_context).float() noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) else: noise_pred = noise_pred_cond - # Euler step: x_{t-1} = x_t + (sigma_{t-1} - sigma_t) * model_output latents_dtype = latents.dtype latents = latents.to(dtype=torch.float32) latents = latents + (sigma_prev - sigma_curr) * noise_pred @@ -520,7 +656,7 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: order=1, total_steps=total_steps, timestep=int(sigma_curr * 1000), - latents=latents.squeeze(2), # Remove temporal dim for preview + latents=latents.squeeze(2), ), ) diff --git a/invokeai/app/invocations/anima_text_encoder.py b/invokeai/app/invocations/anima_text_encoder.py index df724a66d11..9fe0ccb310a 100644 --- a/invokeai/app/invocations/anima_text_encoder.py +++ b/invokeai/app/invocations/anima_text_encoder.py @@ -24,6 +24,7 @@ FieldDescriptions, Input, InputField, + TensorField, UIComponent, ) from invokeai.app.invocations.model import Qwen3EncoderField @@ -51,7 +52,7 @@ title="Prompt - Anima", tags=["prompt", "conditioning", "anima"], category="conditioning", - version="1.0.1", + version="1.1.0", classification=Classification.Prototype, ) class AnimaTextEncoderInvocation(BaseInvocation): @@ -68,6 +69,10 @@ class AnimaTextEncoderInvocation(BaseInvocation): description=FieldDescriptions.qwen3_encoder, input=Input.Connection, ) + mask: TensorField | None = InputField( + default=None, + description="A mask defining the region that this conditioning prompt applies to.", + ) @torch.no_grad() def invoke(self, context: InvocationContext) -> AnimaConditioningOutput: @@ -89,7 +94,7 @@ def invoke(self, context: InvocationContext) -> AnimaConditioningOutput: ) conditioning_name = context.conditioning.save(conditioning_data) return AnimaConditioningOutput( - conditioning=AnimaConditioningField(conditioning_name=conditioning_name) + conditioning=AnimaConditioningField(conditioning_name=conditioning_name, mask=self.mask) ) def _encode_prompt( diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py index d888b8927b5..71b99d6687a 100644 --- a/invokeai/app/invocations/fields.py +++ b/invokeai/app/invocations/fields.py @@ -348,6 +348,11 @@ class AnimaConditioningField(BaseModel): """ conditioning_name: str = Field(description="The name of conditioning tensor") + mask: Optional[TensorField] = Field( + default=None, + description="The mask associated with this conditioning tensor for regional prompting. " + "Excluded regions should be set to False, included regions should be set to True.", + ) class ConditioningField(BaseModel): diff --git a/invokeai/backend/anima/anima_transformer_patch.py b/invokeai/backend/anima/anima_transformer_patch.py new file mode 100644 index 00000000000..7735355df7d --- /dev/null +++ b/invokeai/backend/anima/anima_transformer_patch.py @@ -0,0 +1,106 @@ +"""Utilities for patching the AnimaTransformer to support regional cross-attention masks.""" + +from contextlib import contextmanager +from typing import Optional + +import torch +import torch.nn.functional as F +from einops import rearrange + +from invokeai.backend.anima.regional_prompting import AnimaRegionalPromptingExtension + + +def _patched_cross_attn_forward( + original_forward, + attn_mask: torch.Tensor, +): + """Create a patched forward for CosmosAttention that injects a cross-attention mask. + + Args: + original_forward: The original CosmosAttention.forward method (bound to self). + attn_mask: Cross-attention mask of shape (img_seq_len, context_seq_len). + """ + def forward(x, context=None, rope_emb=None): + # If the context sequence length doesn't match the mask (e.g. negative conditioning + # has a different number of tokens than positive regional conditioning), skip masking + # and use the original unmasked forward. + actual_context = x if context is None else context + if actual_context.shape[-2] != attn_mask.shape[1]: + return original_forward(x, context, rope_emb=rope_emb) + + self = original_forward.__self__ + + q = self.q_proj(x) + context = x if context is None else context + k = self.k_proj(context) + v = self.v_proj(context) + q, k, v = (rearrange(t, "b ... (h d) -> b ... h d", h=self.n_heads, d=self.head_dim) for t in (q, k, v)) + + q = self.q_norm(q) + k = self.k_norm(k) + v = self.v_norm(v) + + if self.is_selfattn and rope_emb is not None: + from invokeai.backend.anima.anima_transformer import apply_rotary_pos_emb_cosmos + q = apply_rotary_pos_emb_cosmos(q, rope_emb) + k = apply_rotary_pos_emb_cosmos(k, rope_emb) + + in_q_shape = q.shape + in_k_shape = k.shape + q = rearrange(q, "b ... h d -> b h ... d").reshape(in_q_shape[0], in_q_shape[-2], -1, in_q_shape[-1]) + k = rearrange(k, "b ... h d -> b h ... d").reshape(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) + v = rearrange(v, "b ... h d -> b h ... d").reshape(in_k_shape[0], in_k_shape[-2], -1, in_k_shape[-1]) + + # Convert boolean mask to float additive mask for SDPA + # True (attend) -> 0.0, False (block) -> -inf + # Shape: (img_seq_len, context_seq_len) -> (1, 1, img_seq_len, context_seq_len) + float_mask = torch.zeros_like(attn_mask, dtype=q.dtype) + float_mask[~attn_mask] = float("-inf") + expanded_mask = float_mask.unsqueeze(0).unsqueeze(0) + + result = F.scaled_dot_product_attention(q, k, v, attn_mask=expanded_mask) + result = rearrange(result, "b h s d -> b s (h d)") + return self.output_dropout(self.output_proj(result)) + + return forward + + +@contextmanager +def patch_anima_for_regional_prompting( + transformer, + regional_extension: Optional[AnimaRegionalPromptingExtension], +): + """Context manager to temporarily patch the Anima transformer for regional prompting. + + Patches the cross-attention in each DiT block to use a regional attention mask. + Uses alternating pattern: masked on even blocks, unmasked on odd blocks for + global coherence. + + Args: + transformer: The AnimaTransformer instance. + regional_extension: The regional prompting extension. If None or no mask, no patching. + + Yields: + The (possibly patched) transformer. + """ + if regional_extension is None or regional_extension.cross_attn_mask is None: + yield transformer + return + + # Store original forwards + original_forwards = [] + for block_idx, block in enumerate(transformer.blocks): + original_forwards.append(block.cross_attn.forward) + + mask = regional_extension.get_cross_attn_mask(block_idx) + if mask is not None: + block.cross_attn.forward = _patched_cross_attn_forward( + block.cross_attn.forward, mask + ) + + try: + yield transformer + finally: + # Restore original forwards + for block_idx, block in enumerate(transformer.blocks): + block.cross_attn.forward = original_forwards[block_idx] diff --git a/invokeai/backend/anima/conditioning_data.py b/invokeai/backend/anima/conditioning_data.py index 067b1d29768..b96c807835d 100644 --- a/invokeai/backend/anima/conditioning_data.py +++ b/invokeai/backend/anima/conditioning_data.py @@ -5,16 +5,24 @@ - T5-XXL token IDs (discrete IDs, embedded by the LLM Adapter inside the transformer) Both are produced by the text encoder invocation and stored together. + +For regional prompting, multiple conditionings (each with an optional spatial mask) +are concatenated and processed together. The LLM Adapter runs on each region's +conditioning separately, producing per-region context vectors that are concatenated +for the DiT's cross-attention layers. An attention mask restricts which image tokens +attend to which regional context tokens. """ from dataclasses import dataclass import torch +from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range + @dataclass class AnimaTextConditioning: - """Anima text conditioning with Qwen3 hidden states and T5-XXL token IDs. + """Anima text conditioning with Qwen3 hidden states, T5-XXL token IDs, and optional mask. Attributes: qwen3_embeds: Text embeddings from Qwen3 0.6B encoder. @@ -23,8 +31,34 @@ class AnimaTextConditioning: Shape: (seq_len,). t5xxl_weights: Per-token weights for prompt weighting. Shape: (seq_len,). Defaults to all ones if not provided. + mask: Optional binary mask for regional prompting. If None, the prompt is global. + Shape: (1, 1, img_seq_len) where img_seq_len = (H // patch_size) * (W // patch_size). """ qwen3_embeds: torch.Tensor t5xxl_ids: torch.Tensor t5xxl_weights: torch.Tensor | None = None + mask: torch.Tensor | None = None + + +@dataclass +class AnimaRegionalTextConditioning: + """Container for multiple regional text conditionings processed by the LLM Adapter. + + After the LLM Adapter processes each region's conditioning, the outputs are concatenated. + The DiT cross-attention then uses an attention mask to restrict which image tokens + attend to which region's context tokens. + + Attributes: + context_embeds: Concatenated LLM Adapter outputs from all regional prompts. + Shape: (total_context_len, 1024). + image_masks: List of binary masks for each regional prompt. + If None, the prompt is global (applies to entire image). + Shape: (1, 1, img_seq_len). + context_ranges: List of ranges indicating which portion of context_embeds + corresponds to each regional prompt. + """ + + context_embeds: torch.Tensor + image_masks: list[torch.Tensor | None] + context_ranges: list[Range] diff --git a/invokeai/backend/anima/regional_prompting.py b/invokeai/backend/anima/regional_prompting.py new file mode 100644 index 00000000000..c25e75c9142 --- /dev/null +++ b/invokeai/backend/anima/regional_prompting.py @@ -0,0 +1,174 @@ +"""Regional prompting extension for Anima. + +Anima's architecture uses separate cross-attention in each DiT block: image tokens +(in 5D spatial layout) cross-attend to context tokens (LLM Adapter output). This is +different from Z-Image's unified [img, txt] sequence with self-attention. + +For regional prompting, we: +1. Run the LLM Adapter separately for each regional prompt +2. Concatenate the resulting context vectors +3. Build a cross-attention mask that restricts each image region to attend only to + its corresponding context tokens +4. Patch the DiT's cross-attention to use this mask + +The mask alternation strategy (masked on even blocks, full on odd blocks) helps +maintain global coherence across regions. +""" + +from typing import Optional + +import torch +import torchvision + +from invokeai.backend.anima.conditioning_data import AnimaRegionalTextConditioning, AnimaTextConditioning +from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range +from invokeai.backend.util.devices import TorchDevice +from invokeai.backend.util.mask import to_standard_float_mask + + +class AnimaRegionalPromptingExtension: + """Manages regional prompting for Anima's cross-attention. + + Unlike Z-Image which uses a unified [img, txt] sequence, Anima has separate + cross-attention where image tokens (query) attend to context tokens (key/value). + The cross-attention mask shape is (img_seq_len, context_seq_len). + """ + + def __init__( + self, + regional_text_conditioning: AnimaRegionalTextConditioning, + cross_attn_mask: torch.Tensor | None = None, + ): + self.regional_text_conditioning = regional_text_conditioning + self.cross_attn_mask = cross_attn_mask + + def get_cross_attn_mask(self, block_index: int) -> torch.Tensor | None: + """Get the cross-attention mask for a given block index. + + Uses alternating pattern: apply mask on even blocks, no mask on odd blocks. + This helps balance regional control with global coherence. + """ + if block_index % 2 == 0: + return self.cross_attn_mask + return None + + @classmethod + def from_regional_conditioning( + cls, + regional_text_conditioning: AnimaRegionalTextConditioning, + img_seq_len: int, + ) -> "AnimaRegionalPromptingExtension": + """Create extension from pre-processed regional conditioning. + + Args: + regional_text_conditioning: Regional conditioning with concatenated context and masks. + img_seq_len: Number of image tokens (H_patches * W_patches). + """ + cross_attn_mask = cls._prepare_cross_attn_mask(regional_text_conditioning, img_seq_len) + return cls( + regional_text_conditioning=regional_text_conditioning, + cross_attn_mask=cross_attn_mask, + ) + + @classmethod + def _prepare_cross_attn_mask( + cls, + regional_text_conditioning: AnimaRegionalTextConditioning, + img_seq_len: int, + ) -> torch.Tensor | None: + """Prepare a cross-attention mask for regional prompting. + + The mask shape is (img_seq_len, context_seq_len) where: + - Each image token can attend to context tokens from its assigned region + - Global prompts (mask=None) attend to background regions + + Args: + regional_text_conditioning: The regional text conditioning data. + img_seq_len: Number of image tokens. + + Returns: + Cross-attention mask of shape (img_seq_len, context_seq_len), or None + if no regional masks are present. + """ + has_regional_masks = any(mask is not None for mask in regional_text_conditioning.image_masks) + if not has_regional_masks: + return None + + # Identify background region (area not covered by any mask) + background_region_mask: torch.Tensor | None = None + for image_mask in regional_text_conditioning.image_masks: + if image_mask is not None: + mask_flat = image_mask.view(-1) + if background_region_mask is None: + background_region_mask = torch.ones_like(mask_flat) + background_region_mask = background_region_mask * (1 - mask_flat) + + device = TorchDevice.choose_torch_device() + context_seq_len = regional_text_conditioning.context_embeds.shape[0] + + # Cross-attention mask: (img_seq_len, context_seq_len) + # img tokens are queries, context tokens are keys/values + cross_attn_mask = torch.zeros((img_seq_len, context_seq_len), device=device, dtype=torch.float16) + + for image_mask, context_range in zip( + regional_text_conditioning.image_masks, + regional_text_conditioning.context_ranges, + strict=True, + ): + ctx_start = context_range.start + ctx_end = context_range.end + + if image_mask is not None: + # Regional prompt: only masked image tokens attend to this region's context + mask_flat = image_mask.view(img_seq_len) + cross_attn_mask[:, ctx_start:ctx_end] = mask_flat.view(img_seq_len, 1) + else: + # Global prompt: background image tokens attend to this context + if background_region_mask is not None: + cross_attn_mask[:, ctx_start:ctx_end] = background_region_mask.view(img_seq_len, 1) + else: + cross_attn_mask[:, ctx_start:ctx_end] = 1.0 + + # Convert to boolean + cross_attn_mask = cross_attn_mask > 0.5 + return cross_attn_mask + + @staticmethod + def preprocess_regional_prompt_mask( + mask: Optional[torch.Tensor], + target_height: int, + target_width: int, + dtype: torch.dtype, + device: torch.device, + ) -> torch.Tensor: + """Preprocess a regional prompt mask to match the target image token grid. + + Args: + mask: Input mask tensor. If None, returns a mask of all ones. + target_height: Height of the image token grid (H // patch_size). + target_width: Width of the image token grid (W // patch_size). + dtype: Target dtype for the mask. + device: Target device for the mask. + + Returns: + Processed mask of shape (1, 1, target_height * target_width). + """ + img_seq_len = target_height * target_width + + if mask is None: + return torch.ones((1, 1, img_seq_len), dtype=dtype, device=device) + + mask = to_standard_float_mask(mask, out_dtype=dtype) + + tf = torchvision.transforms.Resize( + (target_height, target_width), + interpolation=torchvision.transforms.InterpolationMode.NEAREST, + ) + + if mask.ndim == 2: + mask = mask.unsqueeze(0) + if mask.ndim == 3: + mask = mask.unsqueeze(0) + + resized_mask = tf(mask) + return resized_mask.flatten(start_dim=2).to(device=device) diff --git a/invokeai/frontend/web/src/features/controlLayers/store/validators.ts b/invokeai/frontend/web/src/features/controlLayers/store/validators.ts index 3406e9e7ee6..4633f8460b1 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/validators.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/validators.ts @@ -70,6 +70,16 @@ export const getRegionalGuidanceWarnings = ( } } + if (model.base === 'anima') { + // Reference images (IP Adapters) are not supported for Anima + if (entity.referenceImages.length > 0) { + warnings.push(WARNINGS.RG_REFERENCE_IMAGES_NOT_SUPPORTED); + } + if (entity.autoNegative) { + warnings.push(WARNINGS.RG_AUTO_NEGATIVE_NOT_SUPPORTED); + } + } + entity.referenceImages.forEach(({ config }) => { if (!config.model) { // No model selected diff --git a/invokeai/frontend/web/src/features/modelManagerV2/models.ts b/invokeai/frontend/web/src/features/modelManagerV2/models.ts index e1784bc965e..0888846ca82 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/models.ts +++ b/invokeai/frontend/web/src/features/modelManagerV2/models.ts @@ -143,7 +143,7 @@ export const MODEL_BASE_TO_COLOR: Record = { flux2: 'gold', cogview4: 'red', 'z-image': 'cyan', - anima: 'pink', + anima: 'invokePurple', unknown: 'red', }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addRegions.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addRegions.ts index 22973983a91..6f2b717b40e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addRegions.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addRegions.ts @@ -33,10 +33,20 @@ type AddRegionsArg = { bbox: Rect; model: MainModelConfig; posCond: Invocation< - 'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'flux2_klein_text_encoder' | 'z_image_text_encoder' + | 'compel' + | 'sdxl_compel_prompt' + | 'flux_text_encoder' + | 'flux2_klein_text_encoder' + | 'z_image_text_encoder' + | 'anima_text_encoder' >; negCond: Invocation< - 'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'flux2_klein_text_encoder' | 'z_image_text_encoder' + | 'compel' + | 'sdxl_compel_prompt' + | 'flux_text_encoder' + | 'flux2_klein_text_encoder' + | 'z_image_text_encoder' + | 'anima_text_encoder' > | null; posCondCollect: Invocation<'collect'>; negCondCollect: Invocation<'collect'> | null; @@ -76,6 +86,7 @@ export const addRegions = async ({ const isSDXL = model.base === 'sdxl'; const isFLUX = model.base === 'flux'; const isZImage = model.base === 'z-image'; + const isAnima = model.base === 'anima'; const validRegions = regions .filter((entity) => entity.isEnabled) @@ -116,7 +127,9 @@ export const addRegions = async ({ if (region.positivePrompt) { // The main positive conditioning node result.addedPositivePrompt = true; - let regionalPosCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'z_image_text_encoder'>; + let regionalPosCond: Invocation< + 'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'z_image_text_encoder' | 'anima_text_encoder' + >; if (isSDXL) { regionalPosCond = g.addNode({ type: 'sdxl_compel_prompt', @@ -136,6 +149,12 @@ export const addRegions = async ({ id: getPrefixedId('prompt_region_positive_cond'), prompt: region.positivePrompt, }); + } else if (isAnima) { + regionalPosCond = g.addNode({ + type: 'anima_text_encoder', + id: getPrefixedId('prompt_region_positive_cond'), + prompt: region.positivePrompt, + }); } else { regionalPosCond = g.addNode({ type: 'compel', @@ -172,6 +191,12 @@ export const addRegions = async ({ clone.destination.node_id = regionalPosCond.id; g.addEdgeFromObj(clone); } + } else if (posCond.type === 'anima_text_encoder') { + for (const edge of g.getEdgesTo(posCond, ['qwen3_encoder', 'mask'])) { + const clone = deepClone(edge); + clone.destination.node_id = regionalPosCond.id; + g.addEdgeFromObj(clone); + } } else { assert(false, 'Unsupported positive conditioning node type.'); } @@ -183,7 +208,9 @@ export const addRegions = async ({ // The main negative conditioning node result.addedNegativePrompt = true; - let regionalNegCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'z_image_text_encoder'>; + let regionalNegCond: Invocation< + 'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'z_image_text_encoder' | 'anima_text_encoder' + >; if (isSDXL) { regionalNegCond = g.addNode({ type: 'sdxl_compel_prompt', @@ -203,6 +230,12 @@ export const addRegions = async ({ id: getPrefixedId('prompt_region_negative_cond'), prompt: region.negativePrompt, }); + } else if (isAnima) { + regionalNegCond = g.addNode({ + type: 'anima_text_encoder', + id: getPrefixedId('prompt_region_negative_cond'), + prompt: region.negativePrompt, + }); } else { regionalNegCond = g.addNode({ type: 'compel', @@ -240,6 +273,12 @@ export const addRegions = async ({ clone.destination.node_id = regionalNegCond.id; g.addEdgeFromObj(clone); } + } else if (negCond.type === 'anima_text_encoder') { + for (const edge of g.getEdgesTo(negCond, ['qwen3_encoder', 'mask'])) { + const clone = deepClone(edge); + clone.destination.node_id = regionalNegCond.id; + g.addEdgeFromObj(clone); + } } else { assert(false, 'Unsupported negative conditioning node type.'); } @@ -259,7 +298,7 @@ export const addRegions = async ({ g.addEdge(maskToTensor, 'mask', invertTensorMask, 'mask'); // Create the conditioning node. It's going to be connected to the negative cond collector, but it uses the positive prompt let regionalPosCondInverted: Invocation< - 'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'z_image_text_encoder' + 'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'z_image_text_encoder' | 'anima_text_encoder' >; if (isSDXL) { regionalPosCondInverted = g.addNode({ @@ -280,6 +319,12 @@ export const addRegions = async ({ id: getPrefixedId('prompt_region_positive_cond_inverted'), prompt: region.positivePrompt, }); + } else if (isAnima) { + regionalPosCondInverted = g.addNode({ + type: 'anima_text_encoder', + id: getPrefixedId('prompt_region_positive_cond_inverted'), + prompt: region.positivePrompt, + }); } else { regionalPosCondInverted = g.addNode({ type: 'compel', @@ -316,6 +361,12 @@ export const addRegions = async ({ clone.destination.node_id = regionalPosCondInverted.id; g.addEdgeFromObj(clone); } + } else if (posCond.type === 'anima_text_encoder') { + for (const edge of g.getEdgesTo(posCond, ['qwen3_encoder', 'mask'])) { + const clone = deepClone(edge); + clone.destination.node_id = regionalPosCondInverted.id; + g.addEdgeFromObj(clone); + } } else { assert(false, 'Unsupported positive conditioning node type.'); } diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts index 6b1c06e7f40..b70b7d128c2 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts @@ -7,12 +7,13 @@ import { selectMainModelConfig, selectParamsSlice, } from 'features/controlLayers/store/paramsSlice'; -import { selectCanvasMetadata } from 'features/controlLayers/store/selectors'; +import { selectCanvasMetadata, selectCanvasSlice } from 'features/controlLayers/store/selectors'; import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers'; import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage'; import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint'; import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker'; import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint'; +import { addRegions } from 'features/nodes/util/graph/generation/addRegions'; import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage'; import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker'; import { Graph } from 'features/nodes/util/graph/generation/Graph'; @@ -70,17 +71,33 @@ export const buildAnimaGraph = async (arg: GraphBuilderArg): Promise 1 let negCond: Invocation<'anima_text_encoder'> | null = null; + let negCondCollect: Invocation<'collect'> | null = null; if (guidance_scale > 1) { negCond = g.addNode({ type: 'anima_text_encoder', id: getPrefixedId('neg_prompt'), prompt: prompts.negative, }); + negCondCollect = g.addNode({ + type: 'collect', + id: getPrefixedId('neg_cond_collect'), + }); } + // Placeholder collect node for IP adapters (not supported for Anima but needed for addRegions) + const ipAdapterCollect = g.addNode({ + type: 'collect', + id: getPrefixedId('ip_adapter_collect'), + }); + const seed = g.addNode({ id: getPrefixedId('seed'), type: 'integer', @@ -102,14 +119,16 @@ export const buildAnimaGraph = async (arg: GraphBuilderArg): Promise 1 - if (negCond !== null) { + if (negCond !== null && negCondCollect !== null) { g.addEdge(modelLoader, 'qwen3_encoder', negCond, 'qwen3_encoder'); - g.addEdge(negCond, 'conditioning', denoise, 'negative_conditioning'); + g.addEdge(negCond, 'conditioning', negCondCollect, 'item'); + g.addEdge(negCondCollect, 'collection', denoise, 'negative_conditioning'); } // Connect seed and denoiser to L2I @@ -131,6 +150,27 @@ export const buildAnimaGraph = async (arg: GraphBuilderArg): Promise = l2i; if (generationMode === 'txt2img') { diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index e0aac0f8486..9c435d88c92 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -2625,6 +2625,11 @@ export type components = { * @description The name of conditioning tensor */ conditioning_name: string; + /** + * @description The mask associated with this conditioning tensor for regional prompting. Excluded regions should be set to False, included regions should be set to True. + * @default null + */ + mask?: components["schemas"]["TensorField"] | null; }; /** * AnimaConditioningOutput @@ -2702,15 +2707,17 @@ export type components = { */ transformer?: components["schemas"]["TransformerField"] | null; /** + * Positive Conditioning * @description Positive conditioning tensor * @default null */ - positive_conditioning?: components["schemas"]["AnimaConditioningField"] | null; + positive_conditioning?: components["schemas"]["AnimaConditioningField"] | components["schemas"]["AnimaConditioningField"][] | null; /** + * Negative Conditioning * @description Negative conditioning tensor * @default null */ - negative_conditioning?: components["schemas"]["AnimaConditioningField"] | null; + negative_conditioning?: components["schemas"]["AnimaConditioningField"] | components["schemas"]["AnimaConditioningField"][] | null; /** * Guidance Scale * @description Guidance scale for classifier-free guidance. Recommended: 4.0-5.0 for Anima. @@ -2972,6 +2979,11 @@ export type components = { * @default null */ qwen3_encoder?: components["schemas"]["Qwen3EncoderField"] | null; + /** + * @description A mask defining the region that this conditioning prompt applies to. + * @default null + */ + mask?: components["schemas"]["TensorField"] | null; /** * type * @default anima_text_encoder From 707ae1764e8e6f073883b8f75a77a446b54e8592 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 12 Mar 2026 04:58:27 -0400 Subject: [PATCH 05/14] loras --- anima-plan.md | 955 ------------------ invokeai/app/invocations/anima_denoise.py | 29 +- invokeai/app/invocations/anima_lora_loader.py | 151 +++ .../app/invocations/anima_text_encoder.py | 65 +- .../backend/model_manager/configs/factory.py | 2 + .../backend/model_manager/configs/lora.py | 82 ++ .../model_manager/load/model_loaders/lora.py | 4 + .../lora_conversions/anima_lora_constants.py | 8 + .../anima_lora_conversion_utils.py | 321 ++++++ .../controlLayers/store/validators.ts | 4 +- .../util/graph/generation/addAnimaLoRAs.ts | 70 ++ .../util/graph/generation/buildAnimaGraph.ts | 4 + .../frontend/web/src/services/api/schema.ts | 242 ++++- 13 files changed, 935 insertions(+), 1002 deletions(-) delete mode 100644 anima-plan.md create mode 100644 invokeai/app/invocations/anima_lora_loader.py create mode 100644 invokeai/backend/patches/lora_conversions/anima_lora_constants.py create mode 100644 invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py create mode 100644 invokeai/frontend/web/src/features/nodes/util/graph/generation/addAnimaLoRAs.ts diff --git a/anima-plan.md b/anima-plan.md deleted file mode 100644 index 95ef366e79b..00000000000 --- a/anima-plan.md +++ /dev/null @@ -1,955 +0,0 @@ -# Comprehensive Plan: Adding Anima Model Support to InvokeAI - -## 1. Executive Summary - -**Anima** is a 2-billion-parameter anime-focused text-to-image model created by CircleStone Labs and Comfy Org, built on top of NVIDIA's Cosmos Predict2 architecture. It uses a **Cosmos DiT backbone** (`MiniTrainDIT`), a **Qwen3 0.6B text encoder**, a custom **LLM Adapter** (6-layer cross-attention transformer that fuses Qwen3 hidden states with learned T5-XXL token embeddings), and a **Qwen-Image VAE** (`AutoencoderKLQwenImage` — a fine-tuned Wan 2.1 VAE with 16 latent channels). - -The model uses **rectified flow** sampling (shift=3.0, multiplier=1000) — the same `CONST` + `ModelSamplingDiscreteFlow` formulation used by Flux and Z-Image, meaning the existing `FlowMatchEulerDiscreteScheduler` and `FlowMatchHeunDiscreteScheduler` can be reused. The initial implementation targets **basic text-to-image generation only** — LoRA, ControlNet, inpainting, regional prompting, and img2img will come later. - -**Key architectural difference from all existing InvokeAI models**: The LLM Adapter is a custom component embedded inside the diffusion model that cross-attends between Qwen3 encoder hidden states and learned T5-XXL token ID embeddings. This means the text encoding pipeline produces *two* outputs (Qwen3 hidden states + T5 token IDs) that are both fed into the transformer. - ---- - -## 2. Model Architecture Reference - -### 2.1 Components Overview - -| Component | Architecture | Source | Size | -|-----------|-------------|--------|------| -| **Diffusion Transformer** | `MiniTrainDIT` (Cosmos Predict2 DiT) + `LLMAdapter` | Single-file checkpoint (`anima-preview2.safetensors`) | ~2B params | -| **Text Encoder** | Qwen3 0.6B (causal LM, hidden states extracted) | Single-file (`qwen_3_06b_base.safetensors`) | ~0.6B params | -| **T5-XXL Tokenizer** | SentencePiece tokenizer only (no T5 model weights needed) | Bundled with transformers library | ~2MB | -| **VAE** | `AutoencoderKLQwenImage` (fine-tuned Wan 2.1 VAE) | Single-file (`qwen_image_vae.safetensors`) | ~100M params | - -### 2.2 Text Conditioning Pipeline - -``` -User Prompt - ├──> Qwen3 0.6B Tokenizer (Qwen2Tokenizer) - │ └──> Qwen3 0.6B Model → second-to-last hidden states [seq_len, 1024] - │ - ├──> T5-XXL Tokenizer (T5TokenizerFast) → token IDs [seq_len] (no T5 model needed) - │ - └──> LLM Adapter (inside transformer) - ├── Embed T5 token IDs via learned Embedding(32128, 1024) - ├── Cross-attend T5 embeddings ← Qwen3 hidden states (6 transformer layers with RoPE) - └── Output: [512, 1024] conditioning tensor (zero-padded if < 512 tokens) - └──> Fed to Cosmos DiT cross-attention layers -``` - -### 2.3 Latent Space - -- **Channels**: 16 -- **Spatial compression**: 8× (VAE downsamples by 2^3) -- **Dimensions**: 3D (`[B, C, T, H, W]`) — temporal dim is 1 for single images -- **Normalization**: Mean/std normalization using Wan 2.1 constants (not simple scaling) - - `process_in(latent) = (latent - latents_mean) / latents_std` - - `process_out(latent) = latent * latents_std + latents_mean` - -### 2.4 Sampling / Noise Schedule - -- **Type**: Rectified Flow (`CONST` model — `denoised = input - output * sigma`) -- **Shift**: 3.0 (via `time_snr_shift(alpha=3.0, t)` — same formula as Flux) -- **Multiplier**: 1000 -- **Sigma range**: 0.0 (clean) to 1.0 (noise), shifted by factor 3.0 -- **Compatible schedulers**: `FlowMatchEulerDiscreteScheduler`, `FlowMatchHeunDiscreteScheduler` (already in InvokeAI for Z-Image/Flux) -- **Recommended settings**: CFG 4–5, 30–50 steps - -### 2.5 Default Model Configuration (from ComfyUI) - -```python -# MiniTrainDIT default constructor args for Anima: -model_channels = 2048 # Transformer hidden dim -num_blocks = 28 # Number of DiT blocks -num_heads = 32 # Attention heads -crossattn_emb_channels = 1024 # Must match LLM Adapter output dim -patch_spatial = 2 # Spatial patch size -patch_temporal = 1 # Temporal patch size (1 for images) -in_channels = 16 # Latent channels -out_channels = 16 # Output channels -max_img_h = 240 # Max height in patches (240 * 2 * 8 = 3840px) -max_img_w = 240 # Max width in patches -max_frames = 1 # Single image -``` - ---- - -## 3. ComfyUI Reference Implementation - -The following ComfyUI source files contain the complete Anima implementation and should be reverse-engineered: - -| File | URL | Purpose | -|------|-----|---------| -| **Anima model** | [comfy/ldm/anima/model.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py) | `Anima(MiniTrainDIT)` + `LLMAdapter` + `RotaryEmbedding` + `TransformerBlock` + `Attention` | -| **Cosmos DiT base** | [comfy/ldm/cosmos/predict2.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/cosmos/predict2.py) | `MiniTrainDIT` — the Cosmos Predict2 backbone that Anima extends | -| **Text encoder** | [comfy/text_encoders/anima.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/text_encoders/anima.py) | Dual tokenizer (Qwen3 + T5-XXL), `AnimaTEModel` that outputs Qwen3 hidden states + T5 token IDs | -| **Model registration** | [comfy/supported_models.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/supported_models.py) | `Anima` config class: shift=3.0, `Wan21` latent format, dtype support | -| **Model base** | [comfy/model_base.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_base.py) | `Anima(BaseModel)` — `ModelType.FLOW`, pre-processes text embeds via LLM adapter in `extra_conds()` | -| **Latent format** | [comfy/latent_formats.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/latent_formats.py) | `Wan21` — 16ch, 3D, mean/std normalization constants | -| **Sampling** | [comfy/model_sampling.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_sampling.py) | `CONST` + `ModelSamplingDiscreteFlow` — rectified flow with shift | - -### 3.1 LLM Adapter Architecture (from ComfyUI source) - -The `LLMAdapter` is the critical custom component. From [comfy/ldm/anima/model.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py): - -- **Input**: `source_hidden_states` (Qwen3 output, dim=1024) + `target_input_ids` (T5-XXL token IDs) -- **Embedding**: `Embedding(32128, 1024)` — maps T5 token IDs to dense vectors -- **Projection**: `in_proj` (identity when `model_dim == target_dim`) -- **Positional encoding**: `RotaryEmbedding(head_dim=64)` — applied separately to query (target) and key (source) sequences -- **Blocks**: 6 × `TransformerBlock` each containing: - - Self-attention on the target (T5 embedding) sequence - - Cross-attention: target queries attend to source (Qwen3) keys/values - - MLP with GELU activation (4× expansion) - - RMSNorm (eps=1e-6) before each sub-layer -- **Output**: `norm(out_proj(x))` → `[batch, seq_len, 1024]`, zero-padded to 512 tokens - -### 3.2 Key Insight: LLM Adapter Lives Inside the Checkpoint - -The LLM Adapter weights are stored under the `llm_adapter.*` prefix in the main checkpoint file (`anima-preview2.safetensors`). They are **not** a separate file. The Anima class's `forward()` calls `preprocess_text_embeds()` which runs the adapter before passing to `MiniTrainDIT.forward()`. - -### 3.3 Full ComfyUI Anima Model Source - -```python -# comfy/ldm/anima/model.py — FULL SOURCE -from comfy.ldm.cosmos.predict2 import MiniTrainDIT -import torch -from torch import nn -import torch.nn.functional as F - - -def rotate_half(x): - x1 = x[..., : x.shape[-1] // 2] - x2 = x[..., x.shape[-1] // 2 :] - return torch.cat((-x2, x1), dim=-1) - - -def apply_rotary_pos_emb(x, cos, sin, unsqueeze_dim=1): - cos = cos.unsqueeze(unsqueeze_dim) - sin = sin.unsqueeze(unsqueeze_dim) - x_embed = (x * cos) + (rotate_half(x) * sin) - return x_embed - - -class RotaryEmbedding(nn.Module): - def __init__(self, head_dim): - super().__init__() - self.rope_theta = 10000 - inv_freq = 1.0 / (self.rope_theta ** (torch.arange(0, head_dim, 2, dtype=torch.int64).to(dtype=torch.float) / head_dim)) - self.register_buffer("inv_freq", inv_freq, persistent=False) - - @torch.no_grad() - def forward(self, x, position_ids): - inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) - position_ids_expanded = position_ids[:, None, :].float() - - device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" - with torch.autocast(device_type=device_type, enabled=False): # Force float32 - freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) - emb = torch.cat((freqs, freqs), dim=-1) - cos = emb.cos() - sin = emb.sin() - - return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) - - -class Attention(nn.Module): - def __init__(self, query_dim, context_dim, n_heads, head_dim, device=None, dtype=None, operations=None): - super().__init__() - - inner_dim = head_dim * n_heads - self.n_heads = n_heads - self.head_dim = head_dim - self.query_dim = query_dim - self.context_dim = context_dim - - self.q_proj = operations.Linear(query_dim, inner_dim, bias=False, device=device, dtype=dtype) - self.q_norm = operations.RMSNorm(self.head_dim, eps=1e-6, device=device, dtype=dtype) - - self.k_proj = operations.Linear(context_dim, inner_dim, bias=False, device=device, dtype=dtype) - self.k_norm = operations.RMSNorm(self.head_dim, eps=1e-6, device=device, dtype=dtype) - - self.v_proj = operations.Linear(context_dim, inner_dim, bias=False, device=device, dtype=dtype) - - self.o_proj = operations.Linear(inner_dim, query_dim, bias=False, device=device, dtype=dtype) - - def forward(self, x, mask=None, context=None, position_embeddings=None, position_embeddings_context=None): - context = x if context is None else context - input_shape = x.shape[:-1] - q_shape = (*input_shape, self.n_heads, self.head_dim) - context_shape = context.shape[:-1] - kv_shape = (*context_shape, self.n_heads, self.head_dim) - - query_states = self.q_norm(self.q_proj(x).view(q_shape)).transpose(1, 2) - key_states = self.k_norm(self.k_proj(context).view(kv_shape)).transpose(1, 2) - value_states = self.v_proj(context).view(kv_shape).transpose(1, 2) - - if position_embeddings is not None: - assert position_embeddings_context is not None - cos, sin = position_embeddings - query_states = apply_rotary_pos_emb(query_states, cos, sin) - cos, sin = position_embeddings_context - key_states = apply_rotary_pos_emb(key_states, cos, sin) - - attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=mask) - - attn_output = attn_output.transpose(1, 2).reshape(*input_shape, -1).contiguous() - attn_output = self.o_proj(attn_output) - return attn_output - - def init_weights(self): - torch.nn.init.zeros_(self.o_proj.weight) - - -class TransformerBlock(nn.Module): - def __init__(self, source_dim, model_dim, num_heads=16, mlp_ratio=4.0, use_self_attn=False, layer_norm=False, device=None, dtype=None, operations=None): - super().__init__() - self.use_self_attn = use_self_attn - - if self.use_self_attn: - self.norm_self_attn = operations.LayerNorm(model_dim, device=device, dtype=dtype) if layer_norm else operations.RMSNorm(model_dim, eps=1e-6, device=device, dtype=dtype) - self.self_attn = Attention( - query_dim=model_dim, - context_dim=model_dim, - n_heads=num_heads, - head_dim=model_dim//num_heads, - device=device, - dtype=dtype, - operations=operations, - ) - - self.norm_cross_attn = operations.LayerNorm(model_dim, device=device, dtype=dtype) if layer_norm else operations.RMSNorm(model_dim, eps=1e-6, device=device, dtype=dtype) - self.cross_attn = Attention( - query_dim=model_dim, - context_dim=source_dim, - n_heads=num_heads, - head_dim=model_dim//num_heads, - device=device, - dtype=dtype, - operations=operations, - ) - - self.norm_mlp = operations.LayerNorm(model_dim, device=device, dtype=dtype) if layer_norm else operations.RMSNorm(model_dim, eps=1e-6, device=device, dtype=dtype) - self.mlp = nn.Sequential( - operations.Linear(model_dim, int(model_dim * mlp_ratio), device=device, dtype=dtype), - nn.GELU(), - operations.Linear(int(model_dim * mlp_ratio), model_dim, device=device, dtype=dtype) - ) - - def forward(self, x, context, target_attention_mask=None, source_attention_mask=None, position_embeddings=None, position_embeddings_context=None): - if self.use_self_attn: - normed = self.norm_self_attn(x) - attn_out = self.self_attn(normed, mask=target_attention_mask, position_embeddings=position_embeddings, position_embeddings_context=position_embeddings) - x = x + attn_out - - normed = self.norm_cross_attn(x) - attn_out = self.cross_attn(normed, mask=source_attention_mask, context=context, position_embeddings=position_embeddings, position_embeddings_context=position_embeddings_context) - x = x + attn_out - - x = x + self.mlp(self.norm_mlp(x)) - return x - - def init_weights(self): - torch.nn.init.zeros_(self.mlp[2].weight) - self.cross_attn.init_weights() - - -class LLMAdapter(nn.Module): - def __init__( - self, - source_dim=1024, - target_dim=1024, - model_dim=1024, - num_layers=6, - num_heads=16, - use_self_attn=True, - layer_norm=False, - device=None, - dtype=None, - operations=None, - ): - super().__init__() - - self.embed = operations.Embedding(32128, target_dim, device=device, dtype=dtype) - if model_dim != target_dim: - self.in_proj = operations.Linear(target_dim, model_dim, device=device, dtype=dtype) - else: - self.in_proj = nn.Identity() - self.rotary_emb = RotaryEmbedding(model_dim//num_heads) - self.blocks = nn.ModuleList([ - TransformerBlock(source_dim, model_dim, num_heads=num_heads, use_self_attn=use_self_attn, layer_norm=layer_norm, device=device, dtype=dtype, operations=operations) - for _ in range(num_layers) - ]) - self.out_proj = operations.Linear(model_dim, target_dim, device=device, dtype=dtype) - self.norm = operations.RMSNorm(target_dim, eps=1e-6, device=device, dtype=dtype) - - def forward(self, source_hidden_states, target_input_ids, target_attention_mask=None, source_attention_mask=None): - if target_attention_mask is not None: - target_attention_mask = target_attention_mask.to(torch.bool) - if target_attention_mask.ndim == 2: - target_attention_mask = target_attention_mask.unsqueeze(1).unsqueeze(1) - - if source_attention_mask is not None: - source_attention_mask = source_attention_mask.to(torch.bool) - if source_attention_mask.ndim == 2: - source_attention_mask = source_attention_mask.unsqueeze(1).unsqueeze(1) - - context = source_hidden_states - x = self.in_proj(self.embed(target_input_ids, out_dtype=context.dtype)) - position_ids = torch.arange(x.shape[1], device=x.device).unsqueeze(0) - position_ids_context = torch.arange(context.shape[1], device=x.device).unsqueeze(0) - position_embeddings = self.rotary_emb(x, position_ids) - position_embeddings_context = self.rotary_emb(x, position_ids_context) - for block in self.blocks: - x = block(x, context, - target_attention_mask=target_attention_mask, - source_attention_mask=source_attention_mask, - position_embeddings=position_embeddings, - position_embeddings_context=position_embeddings_context) - return self.norm(self.out_proj(x)) - - -class Anima(MiniTrainDIT): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.llm_adapter = LLMAdapter(device=kwargs.get("device"), dtype=kwargs.get("dtype"), operations=kwargs.get("operations")) - - def preprocess_text_embeds(self, text_embeds, text_ids, t5xxl_weights=None): - if text_ids is not None: - out = self.llm_adapter(text_embeds, text_ids) - if t5xxl_weights is not None: - out = out * t5xxl_weights - - if out.shape[1] < 512: - out = torch.nn.functional.pad(out, (0, 0, 0, 512 - out.shape[1])) - return out - else: - return text_embeds - - def forward(self, x, timesteps, context, **kwargs): - t5xxl_ids = kwargs.pop("t5xxl_ids", None) - if t5xxl_ids is not None: - context = self.preprocess_text_embeds(context, t5xxl_ids, t5xxl_weights=kwargs.pop("t5xxl_weights", None)) - return super().forward(x, timesteps, context, **kwargs) -``` - -### 3.4 ComfyUI Text Encoder Source - -```python -# comfy/text_encoders/anima.py — FULL SOURCE -from transformers import Qwen2Tokenizer, T5TokenizerFast -import comfy.text_encoders.llama -from comfy import sd1_clip -import os -import torch - - -class Qwen3Tokenizer(sd1_clip.SDTokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}): - tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer") - super().__init__(tokenizer_path, - pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024, - embedding_key='qwen3_06b', tokenizer_class=Qwen2Tokenizer, - has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, - min_length=1, pad_token=151643, tokenizer_data=tokenizer_data) - -class T5XXLTokenizer(sd1_clip.SDTokenizer): - def __init__(self, embedding_directory=None, tokenizer_data={}): - tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") - super().__init__(tokenizer_path, - embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, - embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, - pad_to_max_length=False, max_length=99999999, min_length=1, - tokenizer_data=tokenizer_data) - -class AnimaTokenizer: - def __init__(self, embedding_directory=None, tokenizer_data={}): - self.qwen3_06b = Qwen3Tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) - self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data) - - def tokenize_with_weights(self, text:str, return_word_ids=False, **kwargs): - out = {} - qwen_ids = self.qwen3_06b.tokenize_with_weights(text, return_word_ids, **kwargs) - out["qwen3_06b"] = [[(k[0], 1.0, k[2]) if return_word_ids else (k[0], 1.0) for k in inner_list] for inner_list in qwen_ids] - out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids, **kwargs) - return out - - def untokenize(self, token_weight_pair): - return self.t5xxl.untokenize(token_weight_pair) - - def state_dict(self): - return {} - - def decode(self, token_ids, **kwargs): - return self.qwen3_06b.decode(token_ids, **kwargs) - -class Qwen3_06BModel(sd1_clip.SDClipModel): - def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=True, model_options={}): - super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, - dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, - model_class=comfy.text_encoders.llama.Qwen3_06B, enable_attention_masks=attention_mask, - return_attention_masks=attention_mask, model_options=model_options) - - -class AnimaTEModel(sd1_clip.SD1ClipModel): - def __init__(self, device="cpu", dtype=None, model_options={}): - super().__init__(device=device, dtype=dtype, - name="qwen3_06b", clip_model=Qwen3_06BModel, model_options=model_options) - - def encode_token_weights(self, token_weight_pairs): - out = super().encode_token_weights(token_weight_pairs) - out[2]["t5xxl_ids"] = torch.tensor(list(map(lambda a: a[0], token_weight_pairs["t5xxl"][0])), dtype=torch.int) - out[2]["t5xxl_weights"] = torch.tensor(list(map(lambda a: a[1], token_weight_pairs["t5xxl"][0]))) - return out - -def te(dtype_llama=None, llama_quantization_metadata=None): - class AnimaTEModel_(AnimaTEModel): - def __init__(self, device="cpu", dtype=None, model_options={}): - if dtype_llama is not None: - dtype = dtype_llama - if llama_quantization_metadata is not None: - model_options = model_options.copy() - model_options["quantization_metadata"] = llama_quantization_metadata - super().__init__(device=device, dtype=dtype, model_options=model_options) - return AnimaTEModel_ -``` - -### 3.5 ComfyUI Model Registration and Base - -```python -# comfy/supported_models.py — Anima class (excerpt) -class Anima(supported_models_base.BASE): - unet_config = { - "image_model": "anima", - } - - sampling_settings = { - "multiplier": 1.0, - "shift": 3.0, - } - - unet_extra_config = {} - latent_format = latent_formats.Wan21 - - memory_usage_factor = 1.0 - - supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32] - - def get_model(self, state_dict, prefix="", device=None): - out = model_base.Anima(self, device=device) - return out - - def clip_target(self, state_dict={}): - pref = self.text_encoder_key_prefix[0] - detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, - "{}qwen3_06b.transformer.".format(pref)) - return supported_models_base.ClipTarget( - comfy.text_encoders.anima.AnimaTokenizer, - comfy.text_encoders.anima.te(**detect)) - - def set_inference_dtype(self, dtype, manual_cast_dtype, **kwargs): - self.memory_usage_factor = (self.unet_config.get("model_channels", 2048) / 2048) * 0.95 - if dtype is torch.float16: - self.memory_usage_factor *= 1.4 - return super().set_inference_dtype(dtype, manual_cast_dtype, **kwargs) -``` - -```python -# comfy/model_base.py — Anima class (excerpt) -class Anima(BaseModel): - def __init__(self, model_config, model_type=ModelType.FLOW, device=None): - super().__init__(model_config, model_type, device=device, - unet_model=comfy.ldm.anima.model.Anima) - - def extra_conds(self, **kwargs): - out = super().extra_conds(**kwargs) - cross_attn = kwargs.get("cross_attn", None) - t5xxl_ids = kwargs.get("t5xxl_ids", None) - t5xxl_weights = kwargs.get("t5xxl_weights", None) - device = kwargs["device"] - if cross_attn is not None: - if t5xxl_ids is not None: - if t5xxl_weights is not None: - t5xxl_weights = t5xxl_weights.unsqueeze(0).unsqueeze(-1).to(cross_attn) - t5xxl_ids = t5xxl_ids.unsqueeze(0) - - if torch.is_inference_mode_enabled(): # if not we are training - cross_attn = self.diffusion_model.preprocess_text_embeds( - cross_attn.to(device=device, dtype=self.get_dtype_inference()), - t5xxl_ids.to(device=device), - t5xxl_weights=t5xxl_weights.to(device=device, dtype=self.get_dtype_inference())) - else: - out['t5xxl_ids'] = comfy.conds.CONDRegular(t5xxl_ids) - out['t5xxl_weights'] = comfy.conds.CONDRegular(t5xxl_weights) - - out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) - return out -``` - -### 3.6 ComfyUI Latent Format (Wan21) - -```python -# comfy/latent_formats.py — Wan21 class (used by Anima) -class Wan21(LatentFormat): - latent_channels = 16 - latent_dimensions = 3 - - latent_rgb_factors = [ - [-0.1299, -0.1692, 0.2932], - [ 0.0671, 0.0406, 0.0442], - [ 0.3568, 0.2548, 0.1747], - [ 0.0372, 0.2344, 0.1420], - [ 0.0313, 0.0189, -0.0328], - [ 0.0296, -0.0956, -0.0665], - [-0.3477, -0.4059, -0.2925], - [ 0.0166, 0.1902, 0.1975], - [-0.0412, 0.0267, -0.1364], - [-0.1293, 0.0740, 0.1636], - [ 0.0680, 0.3019, 0.1128], - [ 0.0032, 0.0581, 0.0639], - [-0.1251, 0.0927, 0.1699], - [ 0.0060, -0.0633, 0.0005], - [ 0.3477, 0.2275, 0.2950], - [ 0.1984, 0.0913, 0.1861] - ] - - latent_rgb_factors_bias = [-0.1835, -0.0868, -0.3360] - - def __init__(self): - self.scale_factor = 1.0 - self.latents_mean = torch.tensor([ - -0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, - 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921 - ]).view(1, 16, 1, 1, 1) - self.latents_std = torch.tensor([ - 2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, - 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160 - ]).view(1, 16, 1, 1, 1) - - self.taesd_decoder_name = "lighttaew2_1" - - def process_in(self, latent): - latents_mean = self.latents_mean.to(latent.device, latent.dtype) - latents_std = self.latents_std.to(latent.device, latent.dtype) - return (latent - latents_mean) * self.scale_factor / latents_std - - def process_out(self, latent): - latents_mean = self.latents_mean.to(latent.device, latent.dtype) - latents_std = self.latents_std.to(latent.device, latent.dtype) - return latent * latents_std / self.scale_factor + latents_mean -``` - -### 3.7 ComfyUI Sampling Constants - -```python -# comfy/model_sampling.py — relevant classes - -def time_snr_shift(alpha, t): - if alpha == 1.0: - return t - return alpha * t / (1 + (alpha - 1) * t) - -class CONST: - def calculate_input(self, sigma, noise): - return noise - - def calculate_denoised(self, sigma, model_output, model_input): - sigma = reshape_sigma(sigma, model_output.ndim) - return model_input - model_output * sigma - - def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): - sigma = reshape_sigma(sigma, noise.ndim) - return sigma * noise + (1.0 - sigma) * latent_image - - def inverse_noise_scaling(self, sigma, latent): - sigma = reshape_sigma(sigma, latent.ndim) - return latent / (1.0 - sigma) - -class ModelSamplingDiscreteFlow(torch.nn.Module): - def __init__(self, model_config=None): - super().__init__() - if model_config is not None: - sampling_settings = model_config.sampling_settings - else: - sampling_settings = {} - self.set_parameters(shift=sampling_settings.get("shift", 1.0), multiplier=sampling_settings.get("multiplier", 1000)) - - def set_parameters(self, shift=1.0, timesteps=1000, multiplier=1000): - self.shift = shift - self.multiplier = multiplier - ts = self.sigma((torch.arange(1, timesteps + 1, 1) / timesteps) * multiplier) - self.register_buffer('sigmas', ts) - - def timestep(self, sigma): - return sigma * self.multiplier - - def sigma(self, timestep): - return time_snr_shift(self.shift, timestep / self.multiplier) - - def percent_to_sigma(self, percent): - if percent <= 0.0: - return 1.0 - if percent >= 1.0: - return 0.0 - return time_snr_shift(self.shift, 1.0 - percent) -``` - ---- - -## 4. Existing InvokeAI Patterns (Z-Image as Template) - -The Z-Image integration is the closest architectural template. Here are the key files and patterns: - -### 4.1 Taxonomy / Enums - -- `invokeai/backend/model_manager/config/enums.py` — `BaseModelType` enum (Z-Image at line ~32), `ModelType` enum, `Qwen3Variant` enum (line ~75, has `Qwen3_4B` and `Qwen3_8B`) - -### 4.2 Model Configs - -- `invokeai/backend/model_manager/config/configs/main.py` — Z-Image configs at lines 1079–1167: `Main_Diffusers_ZImage_Config`, `Main_Checkpoint_ZImage_Config`, `Main_GGUFQuantized_ZImage_Config`, each with `probe()` and `_validate_z_image_checkpoint()` helper -- `invokeai/backend/model_manager/config/configs/qwen3_encoder.py` — Qwen3 encoder configs (lines 19–269): directory, checkpoint, and GGUF formats, with variant detection by `hidden_size` -- `invokeai/backend/model_manager/config/configs/factory.py` — `AnyModelConfig` discriminated union (lines 149–255) - -### 4.3 Model Loaders - -- `invokeai/backend/model_manager/load/model_loaders/z_image.py` — 1063 lines: `ZImageDiffusersLoader`, `ZImageCheckpointLoader`, `ZImageGGUFLoader`, plus Qwen3 encoder loaders and ControlNet loader - -### 4.4 Invocation Nodes - -| File | Node | Purpose | -|------|------|---------| -| `invokeai/app/invocations/z_image_model_loader.py` | `ZImageModelLoaderInvocation` | Loads transformer + Qwen3 encoder + VAE | -| `invokeai/app/invocations/z_image_text_encoder.py` | `ZImageTextEncoderInvocation` | Qwen3 with chat template → second-to-last hidden state | -| `invokeai/app/invocations/z_image_denoise.py` | `ZImageDenoiseInvocation` | Full denoising loop (771 lines) with flow matching | -| `invokeai/app/invocations/z_image_latents_to_image.py` | `ZImageLatentsToImageInvocation` | VAE decode (supports both AutoencoderKL and FluxAutoEncoder) | -| `invokeai/app/invocations/z_image_image_to_latents.py` | `ZImageImageToLatentsInvocation` | VAE encode | - -### 4.5 Backend Module - -- `invokeai/backend/z_image/` — `conditioning_data.py`, `z_image_patchify.py`, `z_image_regional_prompting.py`, control extensions, etc. - -### 4.6 Frontend - -- `frontend/web/src/features/nodes/util/graph/generation/buildZImageGraph.ts` — Graph builder for Z-Image generation -- `frontend/web/src/features/nodes/types/constants.ts` — UI constants (color, display name, grid size, features per base) -- `frontend/web/src/features/nodes/util/graph/generation/buildGraph.ts` — Main dispatch switch - -### 4.7 Schedulers - -- `invokeai/backend/flux/flow_match_schedulers.py` — `FLOW_MATCH_SCHEDULER_MAP`, `FLOW_MATCH_SCHEDULER_LABEL_MAP`: Euler, Heun, LCM — shared by Flux and Z-Image - -### 4.8 Starter Models - -- `invokeai/app/services/model_install/model_install_default.py` — Z-Image starter models at lines 803–860, Qwen3 encoder starters at lines 1017+ - ---- - -## 5. Diffusers Compatibility - -**Critical finding**: All needed diffusers classes exist in the pinned **v0.36.0**: - -| Class | Module | Purpose | -|-------|--------|---------| -| `CosmosTransformer3DModel` | `diffusers.models.transformers.transformer_cosmos` | Backbone transformer (but see note below) | -| `AutoencoderKLQwenImage` | `diffusers.models.autoencoders.autoencoder_kl_qwenimage` | VAE (fine-tuned Wan 2.1) | -| `AutoencoderKLWan` | `diffusers.models.autoencoders.autoencoder_kl_wan` | Alternative VAE class | - -**⚠️ Important caveat**: The diffusers `CosmosTransformer3DModel` is the *vanilla* Cosmos Predict2 model. Anima extends it with the custom `LLMAdapter`. We have two options: - -1. **Don't use diffusers' `CosmosTransformer3DModel` at all** — implement the full `MiniTrainDIT` + `LLMAdapter` as custom PyTorch modules (reverse-engineered from ComfyUI). This is the safer approach since the ComfyUI implementation is the reference. -2. **Use diffusers' `CosmosTransformer3DModel` for the backbone** and bolt on the `LLMAdapter` separately — requires key remapping between ComfyUI checkpoint format and diffusers' expected format. - -**Recommendation**: Option 1 (custom implementation) is recommended for the initial version. The checkpoint is in ComfyUI format and guaranteed to load. Key remapping is error-prone and the model is not officially in diffusers anyway. Diffusers compatibility can be added later as a second format option. - ---- - -## 6. Implementation Steps - -### Step 1: Register Anima Base Type and Qwen3 0.6B Variant - -**Files to modify:** - -- `invokeai/backend/model_manager/config/enums.py` - - Add `Anima = "anima"` to `BaseModelType` enum (after `ZImage`) - - Add `Qwen3_06B = "qwen3-0.6b"` to `Qwen3Variant` enum - -- `invokeai/backend/model_manager/config/configs/qwen3_encoder.py` - - Update the variant detection logic to recognize hidden_size ~1024 → `Qwen3_06B` - - The existing logic maps 2560 → `Qwen3_4B` and 4096 → `Qwen3_8B`; add 1024 → `Qwen3_06B` - -### Step 2: Create Model Config Classes - -**Files to modify:** - -- `invokeai/backend/model_manager/config/configs/main.py` - - Add `Main_Checkpoint_Anima_Config` class with: - - `base = BaseModelType.Anima`, `type = ModelType.Main`, `format = ModelFormat.Checkpoint` - - `probe()` method that validates state dict keys: look for `llm_adapter.` prefix (unique to Anima) plus Cosmos-style keys (`blocks.`, `t_embedder.`, `x_embedder.`, `final_layer.`) - - Default generation settings: `width=1024`, `height=1024`, `steps=35`, `cfg_scale=4.5` - -- `invokeai/backend/model_manager/config/configs/vae.py` - - Add a config class for the QwenImage VAE (if needed as a standalone model type), or handle it within the main loader - -- `invokeai/backend/model_manager/config/configs/factory.py` - - Add `Main_Checkpoint_Anima_Config` (and any VAE configs) to the `AnyModelConfig` union - -### Step 3: Create Backend Module - -**New directory**: `invokeai/backend/anima/` - -**New files:** - -- `invokeai/backend/anima/__init__.py` - -- `invokeai/backend/anima/llm_adapter.py` - - Port the `LLMAdapter`, `TransformerBlock`, `Attention`, and `RotaryEmbedding` classes from [comfy/ldm/anima/model.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py) - - These are standard PyTorch `nn.Module` classes using `nn.Linear`, `nn.Embedding`, `nn.RMSNorm`, `F.scaled_dot_product_attention` - - Replace ComfyUI's `operations.Linear` / `operations.RMSNorm` / `operations.Embedding` / `operations.LayerNorm` with standard `torch.nn` equivalents - - Key architecture: `Embedding(32128, 1024)` → `in_proj` → 6 × `TransformerBlock(source_dim=1024, model_dim=1024, num_heads=16, use_self_attn=True)` → `out_proj` → `RMSNorm` - -- `invokeai/backend/anima/anima_transformer.py` - - Two approaches (see Section 5 recommendation): - - **Option A (recommended)**: Port `MiniTrainDIT` from [comfy/ldm/cosmos/predict2.py](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/cosmos/predict2.py), create `AnimaTransformer` that extends it and adds the `LLMAdapter` - - **Option B**: Use `CosmosTransformer3DModel` from diffusers as backbone, wrap it with the `LLMAdapter`, implement key remapping - - Whichever approach: the `forward()` must accept `(x, timesteps, context, t5xxl_ids=None, t5xxl_weights=None)` and run `preprocess_text_embeds()` before the DiT forward pass - -- `invokeai/backend/anima/conditioning_data.py` - - Define `AnimaConditioningData` dataclass holding: - - `qwen3_embeds: torch.Tensor` — shape `[seq_len, 1024]` - - `t5xxl_ids: torch.Tensor` — shape `[seq_len]` (T5 token IDs) - - `t5xxl_weights: Optional[torch.Tensor]` — shape `[seq_len]` (token weights for prompt weighting) - - Follow the pattern in `invokeai/backend/z_image/conditioning_data.py` - -### Step 4: Create Model Loader - -**New file**: `invokeai/backend/model_manager/load/model_loaders/anima.py` - -- Register `AnimaCheckpointLoader` via `@ModelLoaderRegistry.register(base=BaseModelType.Anima, type=ModelType.Main, format=ModelFormat.Checkpoint)` -- **Loading logic**: - 1. Load the safetensors state dict - 2. Separate keys into two groups by prefix: - - `llm_adapter.*` → `LLMAdapter` weights - - Everything else (`blocks.*`, `t_embedder.*`, `x_embedder.*`, `final_layer.*`, etc.) → `MiniTrainDIT` / `CosmosTransformer3DModel` weights - 3. Instantiate the `AnimaTransformer` (which contains both components) - 4. Load state dict -- **VAE loading**: Register a loader for `AutoencoderKLQwenImage` from diffusers - - Load from single-file safetensors - - The VAE is a 3D causal conv VAE (processes single images as `[B, C, 1, H, W]`) - - Latent normalization uses the Wan 2.1 `latents_mean` / `latents_std` constants -- **Qwen3 0.6B**: Reuse the existing `Qwen3EncoderCheckpointLoader` from the Z-Image loader — it already handles single-file Qwen3 encoders via `Qwen3ForCausalLM`. Just ensure the config detection maps `hidden_size=1024` to the new `Qwen3_06B` variant. - -### Step 5: Create Invocation Nodes - -**New files in `invokeai/app/invocations/`:** - -- **`anima_model_loader.py`** — `AnimaModelLoaderInvocation` - - Inputs: `model` (Anima main model identifier), optional `qwen3_encoder` (standalone Qwen3 0.6B), optional `vae` (standalone QwenImage VAE) - - Outputs: `AnimaModelLoaderOutput` with `transformer: TransformerField`, `qwen3_encoder: Qwen3EncoderField`, `vae: VAEField` - - Follow pattern of `invokeai/app/invocations/z_image_model_loader.py` - -- **`anima_text_encoder.py`** — `AnimaTextEncoderInvocation` - - Inputs: `prompt` (string), `qwen3_encoder` (Qwen3EncoderField) - - Processing: - 1. Tokenize prompt with Qwen3 tokenizer (using chat template: `[{"role": "user", "content": prompt}]`) - 2. Run Qwen3 0.6B model → extract second-to-last hidden state → filter by attention mask - 3. Tokenize same prompt with T5-XXL tokenizer → get token IDs (no T5 model needed) - 4. Store both as conditioning tensors - - Output: conditioning info containing `qwen3_embeds`, `t5xxl_ids`, `t5xxl_weights` - - Follow pattern of `invokeai/app/invocations/z_image_text_encoder.py` for Qwen3 encoding - - **New aspect**: Must also produce T5 token IDs. Need to bundle `T5TokenizerFast` — the `sentencepiece` dependency is already in `pyproject.toml` (line 46), and `T5TokenizerFast` is used elsewhere in InvokeAI (for Flux/SD3 text encoding) - -- **`anima_denoise.py`** — `AnimaDenoiseInvocation` - - Inputs: `transformer`, `positive_conditioning`, `negative_conditioning`, `width`, `height`, `num_steps`, `guidance_scale`, `seed`, `scheduler` (Euler/Heun from existing flow match scheduler map) - - Processing: - 1. Generate random noise in latent space: `[1, 16, 1, H//8, W//8]` (note: 3D latents with T=1) - 2. Apply Wan 2.1 `process_in()` normalization if doing img2img (for txt2img, start from pure noise) - 3. Create sigma schedule using rectified flow with shift=3.0 (same `time_snr_shift` as Flux/Z-Image) - 4. Denoising loop: for each timestep, run transformer forward with conditioning, compute `denoised = input - output * sigma` - 5. CFG: when `guidance_scale > 1.0`, run both conditional and unconditional forward passes, blend: `output = uncond + guidance * (cond - uncond)` - 6. Apply scheduler step (Euler or Heun) - - Output: latents tensor - - Follow the flow-matching denoising pattern from `invokeai/app/invocations/z_image_denoise.py` (simplified: no regional prompting, no ControlNet, no inpainting for initial version) - - **Key difference from Z-Image**: The transformer expects `[B, C, T, H, W]` 5D input (Cosmos format), not `[B, C, H, W]` 4D. Temporal dim = 1 for images. - -- **`anima_latents_to_image.py`** — `AnimaLatentsToImageInvocation` - - Inputs: `latents`, `vae` (VAEField) - - Processing: - 1. Load `AutoencoderKLQwenImage` from diffusers - 2. Apply Wan 2.1 `process_out()` denormalization: `latent * latents_std + latents_mean` - 3. Decode: VAE expects `[B, C, T, H, W]` → outputs `[B, C, T, H, W]` → squeeze temporal dim → convert to image - - Output: PIL Image - - Follow pattern of `invokeai/app/invocations/z_image_latents_to_image.py`, but adapted for `AutoencoderKLQwenImage` instead of `FluxAutoEncoder`/`AutoencoderKL` - -### Step 6: Update Frontend - -**Files to modify:** - -- `frontend/web/src/features/nodes/types/constants.ts` - - Add `'anima'` to `BASE_COLOR_MAP` (suggest a unique color, e.g., `'pink'` or `'rose'` for anime association) - - Add `'anima'` to `BASE_LABEL_MAP` with display name `'Anima'` - - Add `'anima'` to feature support arrays (only `SUPPORTS_CFG_RESCALE_BASE_MODELS` and similar that apply; omit from LoRA/ControlNet/IP-Adapter arrays initially) - -- `frontend/web/src/features/parameters/hooks/useMainModelDefaultSettings.ts` (or equivalent) - - Add `'anima'` with defaults: width=1024, height=1024, steps=35, cfg_scale=4.5 - -- **New file**: `frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts` - - Create the graph builder function that assembles: `anima_model_loader` → `anima_text_encoder` (positive + negative) → `anima_denoise` → `anima_latents_to_image` - - Follow pattern of `frontend/web/src/features/nodes/util/graph/generation/buildZImageGraph.ts` but simplified (no ControlNet, no regional prompting, no img2img initially) - -- `frontend/web/src/features/nodes/util/graph/generation/buildGraph.ts` - - Add `case 'anima': return await buildAnimaGraph(arg);` to the dispatch switch - -- Various Zod schema files and node type union files — these will need `'anima'` added wherever `'z-image'` appears, following the same pattern - -- Grid size / scale factor mappings: - - Grid size: **8** (spatial compression is 8×, unlike Flux/Z-Image's 16×) - - Default dimensions: 1024×1024 - -### Step 7: Register Starter Models - -**File to modify:** - -- `invokeai/app/services/model_install/model_install_default.py` - - Add starter model entries for: - - Anima Preview2 transformer: `circlestone-labs/Anima` → `anima-preview2.safetensors` - - Qwen3 0.6B text encoder: `circlestone-labs/Anima` → `qwen_3_06b_base.safetensors` - - QwenImage VAE: `circlestone-labs/Anima` → `qwen_image_vae.safetensors` - - Follow the pattern of Z-Image starter models at lines 803–860 - -### Step 8: Regenerate OpenAPI Schema - -- After all backend changes, run the schema generation script to update the auto-generated OpenAPI schema that the frontend consumes -- This is typically done via `python scripts/generate_openapi_schema.py` - ---- - -## 7. Key Technical Challenges & Decisions - -### 7.1 Cosmos DiT Implementation Strategy - -**Decision needed**: Use diffusers' `CosmosTransformer3DModel` or port ComfyUI's `MiniTrainDIT`? - -| Approach | Pros | Cons | -|----------|------|------| -| **Port MiniTrainDIT from ComfyUI** | Exact checkpoint compatibility, no key remapping, reference implementation | More code to maintain, must port supporting classes (`Block`, `PatchEmbed`, `FinalLayer`, `Timesteps`, etc.) | -| **Use diffusers CosmosTransformer3DModel** | Less custom code, maintained by diffusers team | Key names may differ from checkpoint, needs investigation, may have subtle behavioral differences | - -**Recommendation**: Start with porting from ComfyUI. The checkpoint is in ComfyUI format and guaranteed to load. Diffusers compatibility can be added later as a second format option. - -### 7.2 T5 Tokenizer Handling - -The LLM Adapter needs T5-XXL token IDs but *not* the T5-XXL model. InvokeAI already has `T5TokenizerFast` usage for Flux/SD3 (see `invokeai/backend/flux/text_conditioning.py`). The tokenizer files are small (~2MB) and can be loaded from the `transformers` library cache. - -**Approach**: Load `T5TokenizerFast` in the text encoder invocation using `T5TokenizerFast.from_pretrained("google/t5-v1_1-xxl")` (or bundle tokenizer files). No T5 model weights are needed. - -### 7.3 VAE 3D Tensor Handling - -The `AutoencoderKLQwenImage` is a 3D causal conv VAE that expects `[B, C, T, H, W]` tensors. For single images, `T=1`. The encode/decode calls must: -- **Encode**: `image_tensor.unsqueeze(2)` → `[B, C, 1, H, W]` → VAE encode → latents `[B, 16, 1, H//8, W//8]` -- **Decode**: latents `[B, 16, 1, H//8, W//8]` → VAE decode → `[B, C, 1, H, W]` → `.squeeze(2)` → `[B, C, H, W]` - -Apply Wan 2.1 mean/std normalization (not simple scaling): -```python -latents_mean = torch.tensor([-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, - 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921]) -latents_std = torch.tensor([2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, - 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160]) -``` - -### 7.4 Noise Schedule: Rectified Flow with Shift=3.0 - -The sigma schedule uses the same `time_snr_shift` formula as Flux: -```python -def time_snr_shift(alpha, t): - if alpha == 1.0: - return t - return alpha * t / (1 + (alpha - 1) * t) -``` - -With `alpha=3.0` and `multiplier=1000`. The existing `FlowMatchEulerDiscreteScheduler` in `invokeai/backend/flux/flow_match_schedulers.py` should work, but may need the shift parameter exposed or configured. Check if the scheduler's `shift` parameter matches Anima's 3.0 (Flux uses a different shift value). - -### 7.5 Qwen3 0.6B vs 4B/8B Differences - -The Qwen3 0.6B model has `hidden_size=1024` compared to 4B's 2560 and 8B's 4096. The existing Qwen3 encoder infrastructure in InvokeAI handles 4B and 8B. Adding 0.6B requires: -- New variant enum value -- Updated variant detection (hidden_size → variant mapping) -- The model class (`Qwen3ForCausalLM` from transformers) should work for any size — it's architecture-agnostic - -### 7.6 State Dict Key Mapping (Checkpoint → Model) - -The Anima checkpoint likely uses keys like: -``` -llm_adapter.embed.weight -llm_adapter.blocks.0.self_attn.q_proj.weight -llm_adapter.blocks.0.cross_attn.k_proj.weight -llm_adapter.blocks.0.mlp.0.weight -llm_adapter.out_proj.weight -llm_adapter.norm.weight -llm_adapter.rotary_emb.inv_freq -blocks.0.attn.to_q.weight (Cosmos DiT attention) -blocks.0.attn.to_k.weight -blocks.0.crossattn.to_q.weight (Cosmos DiT cross-attention) -t_embedder.0.freqs (Timestep embedding) -t_embedder.1.linear_1.weight -x_embedder.proj.weight (Patch embedding) -final_layer.linear.weight -``` - -**This key structure must be verified by inspecting the actual checkpoint file.** The loader must correctly instantiate the model architecture and load these keys. If using the ComfyUI `MiniTrainDIT` port, keys should match directly. If using diffusers' `CosmosTransformer3DModel`, a key remapping function will be needed. - ---- - -## 8. File Change Summary - -### New Files (Backend — Python) - -| File | Purpose | -|------|---------| -| `invokeai/backend/anima/__init__.py` | Package init | -| `invokeai/backend/anima/llm_adapter.py` | `LLMAdapter`, `TransformerBlock`, `Attention`, `RotaryEmbedding` | -| `invokeai/backend/anima/anima_transformer.py` | `AnimaTransformer` (MiniTrainDIT + LLMAdapter) or wrapper around `CosmosTransformer3DModel` | -| `invokeai/backend/anima/conditioning_data.py` | `AnimaConditioningData` dataclass | -| `invokeai/backend/model_manager/load/model_loaders/anima.py` | `AnimaCheckpointLoader`, VAE loader | -| `invokeai/app/invocations/anima_model_loader.py` | `AnimaModelLoaderInvocation` | -| `invokeai/app/invocations/anima_text_encoder.py` | `AnimaTextEncoderInvocation` | -| `invokeai/app/invocations/anima_denoise.py` | `AnimaDenoiseInvocation` | -| `invokeai/app/invocations/anima_latents_to_image.py` | `AnimaLatentsToImageInvocation` | - -### Modified Files (Backend — Python) - -| File | Change | -|------|--------| -| `invokeai/backend/model_manager/config/enums.py` | Add `Anima` to `BaseModelType`, `Qwen3_06B` to `Qwen3Variant` | -| `invokeai/backend/model_manager/config/configs/main.py` | Add `Main_Checkpoint_Anima_Config` | -| `invokeai/backend/model_manager/config/configs/qwen3_encoder.py` | Add hidden_size=1024 → `Qwen3_06B` detection | -| `invokeai/backend/model_manager/config/configs/factory.py` | Add Anima configs to `AnyModelConfig` union | -| `invokeai/app/services/model_install/model_install_default.py` | Add Anima starter models | - -### New Files (Frontend — TypeScript) - -| File | Purpose | -|------|---------| -| `frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts` | Anima graph builder | - -### Modified Files (Frontend — TypeScript) - -| File | Change | -|------|--------| -| `frontend/web/src/features/nodes/types/constants.ts` | Add `'anima'` to all base model maps | -| `frontend/web/src/features/nodes/util/graph/generation/buildGraph.ts` | Add `'anima'` case to dispatch switch | -| Default settings hook | Add Anima defaults (1024×1024, CFG 4.5, 35 steps) | -| Zod schemas / node type unions | Add `'anima'` entries | - ---- - -## 9. Out of Scope (Future Work) - -The following features are explicitly deferred to follow-up implementations: - -- **LoRA support** — requires LoRA config classes, patcher logic, and a loader node -- **ControlNet** — requires Cosmos ControlNet support (available in diffusers 0.37.0 as `CosmosControlNetModel`) -- **Inpainting / Outpainting** — requires latent masking and noise injection logic -- **Image-to-Image** — requires VAE encode path + denoising from partial noise -- **Regional Prompting** — requires mask-based attention manipulation -- **IP Adapter** — architecture-specific, if even applicable to Cosmos-based models -- **GGUF / Quantized model support** — can be added later following Z-Image's GGUF loader pattern -- **Diffusers format loading** — if/when an official Anima diffusers pipeline is created diff --git a/invokeai/app/invocations/anima_denoise.py b/invokeai/app/invocations/anima_denoise.py index 8b289736c22..0cf9586e29a 100644 --- a/invokeai/app/invocations/anima_denoise.py +++ b/invokeai/app/invocations/anima_denoise.py @@ -19,7 +19,7 @@ import inspect import math from contextlib import ExitStack -from typing import Callable, Optional +from typing import Callable, Iterator, Optional, Tuple import torch import torchvision.transforms as tv_transforms @@ -44,6 +44,9 @@ from invokeai.backend.anima.regional_prompting import AnimaRegionalPromptingExtension from invokeai.backend.flux.schedulers import ANIMA_SCHEDULER_LABELS, ANIMA_SCHEDULER_MAP, ANIMA_SCHEDULER_NAME_VALUES from invokeai.backend.model_manager.taxonomy import BaseModelType +from invokeai.backend.patches.layer_patcher import LayerPatcher +from invokeai.backend.patches.lora_conversions.anima_lora_constants import ANIMA_LORA_TRANSFORMER_PREFIX +from invokeai.backend.patches.model_patch_raw import ModelPatchRaw from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import ( RectifiedFlowInpaintExtension, assert_broadcastable, @@ -500,6 +503,18 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: with ExitStack() as exit_stack: (cached_weights, transformer) = exit_stack.enter_context(transformer_info.model_on_device()) + # Apply LoRA models to the transformer. + # Note: We apply the LoRA after the transformer has been moved to its target device for faster patching. + exit_stack.enter_context( + LayerPatcher.apply_smart_model_patches( + model=transformer, + patches=self._lora_iterator(context), + prefix=ANIMA_LORA_TRANSFORMER_PREFIX, + dtype=inference_dtype, + cached_weights=cached_weights, + ) + ) + # Run LLM Adapter for each regional conditioning to produce context vectors. # This must happen with the transformer on device since it uses the adapter weights. if has_regional: @@ -668,3 +683,15 @@ def step_callback(state: PipelineIntermediateState) -> None: context.util.sd_step_callback(state, BaseModelType.Anima) return step_callback + + def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]: + """Iterate over LoRA models to apply to the transformer.""" + for lora in self.transformer.loras: + lora_info = context.models.load(lora.lora) + if not isinstance(lora_info.model, ModelPatchRaw): + raise TypeError( + f"Expected ModelPatchRaw for LoRA '{lora.lora.key}', got {type(lora_info.model).__name__}. " + "The LoRA model may be corrupted or incompatible." + ) + yield (lora_info.model, lora.weight) + del lora_info diff --git a/invokeai/app/invocations/anima_lora_loader.py b/invokeai/app/invocations/anima_lora_loader.py new file mode 100644 index 00000000000..0f09b795b88 --- /dev/null +++ b/invokeai/app/invocations/anima_lora_loader.py @@ -0,0 +1,151 @@ +from typing import Optional + +from invokeai.app.invocations.baseinvocation import ( + BaseInvocation, + BaseInvocationOutput, + invocation, + invocation_output, +) +from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField +from invokeai.app.invocations.model import LoRAField, ModelIdentifierField, Qwen3EncoderField, TransformerField +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType + + +@invocation_output("anima_lora_loader_output") +class AnimaLoRALoaderOutput(BaseInvocationOutput): + """Anima LoRA Loader Output""" + + transformer: Optional[TransformerField] = OutputField( + default=None, description=FieldDescriptions.transformer, title="Anima Transformer" + ) + qwen3_encoder: Optional[Qwen3EncoderField] = OutputField( + default=None, description=FieldDescriptions.qwen3_encoder, title="Qwen3 Encoder" + ) + + +@invocation( + "anima_lora_loader", + title="Apply LoRA - Anima", + tags=["lora", "model", "anima"], + category="model", + version="1.0.0", +) +class AnimaLoRALoaderInvocation(BaseInvocation): + """Apply a LoRA model to an Anima transformer and/or Qwen3 text encoder.""" + + lora: ModelIdentifierField = InputField( + description=FieldDescriptions.lora_model, + title="LoRA", + ui_model_base=BaseModelType.Anima, + ui_model_type=ModelType.LoRA, + ) + weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight) + transformer: TransformerField | None = InputField( + default=None, + description=FieldDescriptions.transformer, + input=Input.Connection, + title="Anima Transformer", + ) + qwen3_encoder: Qwen3EncoderField | None = InputField( + default=None, + title="Qwen3 Encoder", + description=FieldDescriptions.qwen3_encoder, + input=Input.Connection, + ) + + def invoke(self, context: InvocationContext) -> AnimaLoRALoaderOutput: + lora_key = self.lora.key + + if not context.models.exists(lora_key): + raise ValueError(f"Unknown lora: {lora_key}!") + + if self.transformer and any(lora.lora.key == lora_key for lora in self.transformer.loras): + raise ValueError(f'LoRA "{lora_key}" already applied to transformer.') + if self.qwen3_encoder and any(lora.lora.key == lora_key for lora in self.qwen3_encoder.loras): + raise ValueError(f'LoRA "{lora_key}" already applied to Qwen3 encoder.') + + output = AnimaLoRALoaderOutput() + + if self.transformer is not None: + output.transformer = self.transformer.model_copy(deep=True) + output.transformer.loras.append( + LoRAField( + lora=self.lora, + weight=self.weight, + ) + ) + if self.qwen3_encoder is not None: + output.qwen3_encoder = self.qwen3_encoder.model_copy(deep=True) + output.qwen3_encoder.loras.append( + LoRAField( + lora=self.lora, + weight=self.weight, + ) + ) + + return output + + +@invocation( + "anima_lora_collection_loader", + title="Apply LoRA Collection - Anima", + tags=["lora", "model", "anima"], + category="model", + version="1.0.0", +) +class AnimaLoRACollectionLoader(BaseInvocation): + """Applies a collection of LoRAs to an Anima transformer.""" + + loras: Optional[LoRAField | list[LoRAField]] = InputField( + default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs" + ) + + transformer: Optional[TransformerField] = InputField( + default=None, + description=FieldDescriptions.transformer, + input=Input.Connection, + title="Transformer", + ) + qwen3_encoder: Qwen3EncoderField | None = InputField( + default=None, + title="Qwen3 Encoder", + description=FieldDescriptions.qwen3_encoder, + input=Input.Connection, + ) + + def invoke(self, context: InvocationContext) -> AnimaLoRALoaderOutput: + output = AnimaLoRALoaderOutput() + loras = self.loras if isinstance(self.loras, list) else [self.loras] + added_loras: list[str] = [] + + if self.transformer is not None: + output.transformer = self.transformer.model_copy(deep=True) + + if self.qwen3_encoder is not None: + output.qwen3_encoder = self.qwen3_encoder.model_copy(deep=True) + + for lora in loras: + if lora is None: + continue + if lora.lora.key in added_loras: + continue + + if not context.models.exists(lora.lora.key): + raise Exception(f"Unknown lora: {lora.lora.key}!") + + if lora.lora.base is not BaseModelType.Anima: + raise ValueError( + f"LoRA '{lora.lora.key}' is for {lora.lora.base.value if lora.lora.base else 'unknown'} models, " + "not Anima models. Ensure you are using an Anima compatible LoRA." + ) + + added_loras.append(lora.lora.key) + + if self.transformer is not None and output.transformer is not None: + output.transformer.loras.append(lora) + + if self.qwen3_encoder is not None and output.qwen3_encoder is not None: + output.qwen3_encoder.loras.append(lora) + + return output diff --git a/invokeai/app/invocations/anima_text_encoder.py b/invokeai/app/invocations/anima_text_encoder.py index 9fe0ccb310a..1e0730346b6 100644 --- a/invokeai/app/invocations/anima_text_encoder.py +++ b/invokeai/app/invocations/anima_text_encoder.py @@ -14,6 +14,7 @@ """ from contextlib import ExitStack +from typing import Iterator, Tuple import torch from transformers import PreTrainedModel, PreTrainedTokenizerBase, T5TokenizerFast @@ -30,15 +31,14 @@ from invokeai.app.invocations.model import Qwen3EncoderField from invokeai.app.invocations.primitives import AnimaConditioningOutput from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.patches.layer_patcher import LayerPatcher +from invokeai.backend.patches.lora_conversions.anima_lora_constants import ANIMA_LORA_QWEN3_PREFIX +from invokeai.backend.patches.model_patch_raw import ModelPatchRaw from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( AnimaConditioningInfo, ConditioningFieldData, ) - -# Qwen3 max sequence length — ComfyUI's SDClipModel uses max_length=77 for Qwen3. -# We match this to ensure the LLM Adapter's cross-attention sees the same number of -# source positions (including padding) as during training. -QWEN3_MAX_SEQ_LEN = 77 +from invokeai.backend.util.devices import TorchDevice # T5-XXL max sequence length for token IDs T5_MAX_SEQ_LEN = 512 @@ -122,6 +122,17 @@ def _encode_prompt( device = text_encoder.device + # Apply LoRA models to the text encoder + lora_dtype = TorchDevice.choose_bfloat16_safe_dtype(device) + exit_stack.enter_context( + LayerPatcher.apply_smart_model_patches( + model=text_encoder, + patches=self._lora_iterator(context), + prefix=ANIMA_LORA_QWEN3_PREFIX, + dtype=lora_dtype, + ) + ) + if not isinstance(text_encoder, PreTrainedModel): raise TypeError( f"Expected PreTrainedModel for text encoder, got {type(text_encoder).__name__}." @@ -133,13 +144,13 @@ def _encode_prompt( context.util.signal_progress("Running Qwen3 0.6B text encoder") - # Anima uses base Qwen3 (not instruct) — tokenize directly, no chat template - # ComfyUI uses max_length=77 (SDClipModel default) for Qwen3 + # Anima uses base Qwen3 (not instruct) — tokenize directly, no chat template. + # No padding or truncation: the LLM Adapter uses rotary position embeddings + # with no fixed positional limit, so the Qwen3 source sequence can be any length. text_inputs = tokenizer( prompt, - padding="max_length", - max_length=QWEN3_MAX_SEQ_LEN, - truncation=True, + padding=False, + truncation=False, return_attention_mask=True, return_tensors="pt", ) @@ -149,20 +160,13 @@ def _encode_prompt( if not isinstance(text_input_ids, torch.Tensor) or not isinstance(attention_mask, torch.Tensor): raise TypeError("Tokenizer returned unexpected types.") - # Check for truncation - untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = tokenizer.batch_decode(untruncated_ids[:, QWEN3_MAX_SEQ_LEN - 1 : -1]) - context.logger.warning( - f"Prompt truncated at {QWEN3_MAX_SEQ_LEN} tokens. Removed: {removed_text}" - ) + # Ensure at least 1 token (empty prompts produce 0 tokens with padding=False) + if text_input_ids.shape[-1] == 0: + pad_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + text_input_ids = torch.tensor([[pad_id]]) + attention_mask = torch.tensor([[1]]) # Get last hidden state from Qwen3 (ComfyUI uses layer="last") - # Pass attention mask so padding tokens don't attend to each other, - # but keep ALL positions in the output (including padding) to match - # ComfyUI's SDClipModel which returns full padded sequences. prompt_mask = attention_mask.to(device).bool() outputs = text_encoder( text_input_ids.to(device), @@ -175,9 +179,8 @@ def _encode_prompt( if len(outputs.hidden_states) < 1: raise RuntimeError(f"Expected at least 1 hidden state, got {len(outputs.hidden_states)}.") - # Use last hidden state — keep all positions (including padding) - # ComfyUI's SDClipModel returns all positions without filtering. - qwen3_embeds = outputs.hidden_states[-1][0] # Shape: (QWEN3_MAX_SEQ_LEN, 1024) + # Use last hidden state — only real tokens, no padding + qwen3_embeds = outputs.hidden_states[-1][0] # Shape: (seq_len, 1024) # --- Step 2: Tokenize with T5-XXL tokenizer (IDs only, no model) --- context.util.signal_progress("Tokenizing with T5-XXL") @@ -192,3 +195,15 @@ def _encode_prompt( t5xxl_ids = t5_tokens.input_ids[0] # Shape: (seq_len,) return qwen3_embeds, t5xxl_ids, None + + def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]: + """Iterate over LoRA models to apply to the Qwen3 text encoder.""" + for lora in self.qwen3_encoder.loras: + lora_info = context.models.load(lora.lora) + if not isinstance(lora_info.model, ModelPatchRaw): + raise TypeError( + f"Expected ModelPatchRaw for LoRA '{lora.lora.key}', got {type(lora_info.model).__name__}. " + "The LoRA model may be corrupted or incompatible." + ) + yield (lora_info.model, lora.weight) + del lora_info diff --git a/invokeai/backend/model_manager/configs/factory.py b/invokeai/backend/model_manager/configs/factory.py index e5b096a931f..7eb391f610a 100644 --- a/invokeai/backend/model_manager/configs/factory.py +++ b/invokeai/backend/model_manager/configs/factory.py @@ -51,6 +51,7 @@ LoRA_LyCORIS_SD1_Config, LoRA_LyCORIS_SD2_Config, LoRA_LyCORIS_SDXL_Config, + LoRA_LyCORIS_Anima_Config, LoRA_LyCORIS_ZImage_Config, LoRA_OMI_FLUX_Config, LoRA_OMI_SDXL_Config, @@ -211,6 +212,7 @@ Annotated[LoRA_LyCORIS_Flux2_Config, LoRA_LyCORIS_Flux2_Config.get_tag()], Annotated[LoRA_LyCORIS_FLUX_Config, LoRA_LyCORIS_FLUX_Config.get_tag()], Annotated[LoRA_LyCORIS_ZImage_Config, LoRA_LyCORIS_ZImage_Config.get_tag()], + Annotated[LoRA_LyCORIS_Anima_Config, LoRA_LyCORIS_Anima_Config.get_tag()], # LoRA - OMI format Annotated[LoRA_OMI_SDXL_Config, LoRA_OMI_SDXL_Config.get_tag()], Annotated[LoRA_OMI_FLUX_Config, LoRA_OMI_FLUX_Config.get_tag()], diff --git a/invokeai/backend/model_manager/configs/lora.py b/invokeai/backend/model_manager/configs/lora.py index 1619c9d6f06..fa3526930da 100644 --- a/invokeai/backend/model_manager/configs/lora.py +++ b/invokeai/backend/model_manager/configs/lora.py @@ -757,6 +757,88 @@ def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: raise NotAMatchError("model does not look like a Z-Image LoRA") +class LoRA_LyCORIS_Anima_Config(LoRA_LyCORIS_Config_Base, Config_Base): + """Model config for Anima LoRA models in LyCORIS format.""" + + base: Literal[BaseModelType.Anima] = Field(default=BaseModelType.Anima) + + @classmethod + def _validate_looks_like_lora(cls, mod: ModelOnDisk) -> None: + """Anima LoRAs use Kohya-style keys targeting Cosmos DiT blocks. + + Anima LoRAs have keys like: + - lora_unet_blocks_0_cross_attn_k_proj.lora_down.weight (Kohya format) + - diffusion_model.blocks.0.cross_attn.k_proj.lora_A.weight (diffusers PEFT format) + - transformer.blocks.0.cross_attn.k_proj.lora_A.weight (diffusers PEFT format) + """ + state_dict = mod.load_state_dict() + + # Check for Kohya-style Anima LoRA keys + has_kohya_keys = state_dict_has_any_keys_starting_with( + state_dict, + { + "lora_unet_blocks_", + }, + ) + + # Check for diffusers PEFT format with Cosmos DiT layer names + has_cosmos_dit_keys = state_dict_has_any_keys_starting_with( + state_dict, + { + "diffusion_model.blocks.", + "transformer.blocks.", + "base_model.model.transformer.blocks.", + }, + ) + + # Also check for LoRA/LoKR weight suffixes + has_lora_suffix = state_dict_has_any_keys_ending_with( + state_dict, + { + "lora_A.weight", + "lora_B.weight", + "lora_down.weight", + "lora_up.weight", + "dora_scale", + ".lokr_w1", + ".lokr_w2", + }, + ) + + if (has_kohya_keys or has_cosmos_dit_keys) and has_lora_suffix: + return + + raise NotAMatchError("model does not match Anima LoRA heuristics") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + """Anima LoRAs target Cosmos DiT blocks (blocks.X.cross_attn, blocks.X.self_attn, etc.).""" + state_dict = mod.load_state_dict() + + # Kohya format: lora_unet_blocks_X_... + has_kohya_keys = state_dict_has_any_keys_starting_with( + state_dict, + { + "lora_unet_blocks_", + }, + ) + + # Diffusers PEFT format with Cosmos DiT structure + has_cosmos_dit_keys = state_dict_has_any_keys_starting_with( + state_dict, + { + "diffusion_model.blocks.", + "transformer.blocks.", + "base_model.model.transformer.blocks.", + }, + ) + + if has_kohya_keys or has_cosmos_dit_keys: + return BaseModelType.Anima + + raise NotAMatchError("model does not look like an Anima LoRA") + + class ControlAdapter_Config_Base(ABC, BaseModel): default_settings: ControlAdapterDefaultSettings | None = Field(None) diff --git a/invokeai/backend/model_manager/load/model_loaders/lora.py b/invokeai/backend/model_manager/load/model_loaders/lora.py index d39982456af..2f9a353124d 100644 --- a/invokeai/backend/model_manager/load/model_loaders/lora.py +++ b/invokeai/backend/model_manager/load/model_loaders/lora.py @@ -54,6 +54,7 @@ ) from invokeai.backend.patches.lora_conversions.sd_lora_conversion_utils import lora_model_from_sd_state_dict from invokeai.backend.patches.lora_conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format +from invokeai.backend.patches.lora_conversions.anima_lora_conversion_utils import lora_model_from_anima_state_dict from invokeai.backend.patches.lora_conversions.z_image_lora_conversion_utils import lora_model_from_z_image_state_dict @@ -155,6 +156,9 @@ def _load_model( # Z-Image LoRAs use diffusers PEFT format with transformer and/or Qwen3 encoder layers. # We set alpha=None to use rank as alpha (common default). model = lora_model_from_z_image_state_dict(state_dict=state_dict, alpha=None) + elif self._model_base == BaseModelType.Anima: + # Anima LoRAs use Kohya-style or diffusers PEFT format targeting Cosmos DiT blocks. + model = lora_model_from_anima_state_dict(state_dict=state_dict, alpha=None) else: raise ValueError(f"Unsupported LoRA base model: {self._model_base}") diff --git a/invokeai/backend/patches/lora_conversions/anima_lora_constants.py b/invokeai/backend/patches/lora_conversions/anima_lora_constants.py new file mode 100644 index 00000000000..f44a25d9809 --- /dev/null +++ b/invokeai/backend/patches/lora_conversions/anima_lora_constants.py @@ -0,0 +1,8 @@ +# Anima LoRA prefix constants +# These prefixes are used for key mapping when applying LoRA patches to Anima models + +# Prefix for Anima transformer (Cosmos DiT architecture) LoRA layers +ANIMA_LORA_TRANSFORMER_PREFIX = "lora_transformer-" + +# Prefix for Qwen3 text encoder LoRA layers +ANIMA_LORA_QWEN3_PREFIX = "lora_qwen3-" diff --git a/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py new file mode 100644 index 00000000000..2c9800cab33 --- /dev/null +++ b/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py @@ -0,0 +1,321 @@ +"""Anima LoRA conversion utilities. + +Anima uses a Cosmos Predict2 DiT transformer architecture. +LoRAs for Anima typically follow the Kohya-style format with underscore-separated keys +(e.g., lora_unet_blocks_0_cross_attn_k_proj) that map to model parameter paths +(e.g., blocks.0.cross_attn.k_proj). + +Some Anima LoRAs also target the Qwen3 text encoder with lora_te_ prefix keys +(e.g., lora_te_layers_0_self_attn_q_proj -> layers.0.self_attn.q_proj). +""" + +import re +from typing import Dict + +import torch + +from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch +from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict +from invokeai.backend.util.logging import InvokeAILogger + +logger = InvokeAILogger.get_logger(__name__) +from invokeai.backend.patches.lora_conversions.anima_lora_constants import ( + ANIMA_LORA_QWEN3_PREFIX, + ANIMA_LORA_TRANSFORMER_PREFIX, +) +from invokeai.backend.patches.model_patch_raw import ModelPatchRaw + + +def is_state_dict_likely_anima_lora(state_dict: dict[str | int, torch.Tensor]) -> bool: + """Checks if the provided state dict is likely an Anima LoRA. + + Anima LoRAs use Kohya-style naming with lora_unet_ prefix and underscore-separated + model key paths targeting Cosmos DiT blocks. + """ + str_keys = [k for k in state_dict.keys() if isinstance(k, str)] + + # Anima LoRAs use Kohya-style keys: lora_unet_blocks_X_... + has_kohya_keys = any(k.startswith("lora_unet_blocks_") for k in str_keys) + + if has_kohya_keys: + return True + + # Also check for diffusers PEFT format with Anima-specific layer names + # (blocks.X.cross_attn, blocks.X.self_attn, blocks.X.mlp — Cosmos DiT structure) + has_cosmos_dit_keys = any( + k.startswith(("diffusion_model.blocks.", "transformer.blocks.", "base_model.model.transformer.blocks.")) + for k in str_keys + ) + + return has_cosmos_dit_keys + + +# Mapping from Kohya underscore-style substrings to model parameter names. +# Order matters: longer/more specific patterns should come first to avoid partial matches. +_KOHYA_UNET_KEY_REPLACEMENTS = [ + ("adaln_modulation_cross_attn_", "adaln_modulation_cross_attn."), + ("adaln_modulation_self_attn_", "adaln_modulation_self_attn."), + ("adaln_modulation_mlp_", "adaln_modulation_mlp."), + ("cross_attn_k_proj", "cross_attn.k_proj"), + ("cross_attn_q_proj", "cross_attn.q_proj"), + ("cross_attn_v_proj", "cross_attn.v_proj"), + ("cross_attn_output_proj", "cross_attn.output_proj"), + ("self_attn_k_proj", "self_attn.k_proj"), + ("self_attn_q_proj", "self_attn.q_proj"), + ("self_attn_v_proj", "self_attn.v_proj"), + ("self_attn_output_proj", "self_attn.output_proj"), + ("mlp_layer1", "mlp.layer1"), + ("mlp_layer2", "mlp.layer2"), +] + +# Mapping for Qwen3 text encoder Kohya keys. +_KOHYA_TE_KEY_REPLACEMENTS = [ + ("self_attn_k_proj", "self_attn.k_proj"), + ("self_attn_q_proj", "self_attn.q_proj"), + ("self_attn_v_proj", "self_attn.v_proj"), + ("self_attn_o_proj", "self_attn.o_proj"), + ("mlp_down_proj", "mlp.down_proj"), + ("mlp_gate_proj", "mlp.gate_proj"), + ("mlp_up_proj", "mlp.up_proj"), +] + + +def _convert_kohya_unet_key(kohya_layer_name: str) -> str: + """Convert a Kohya-style LoRA layer name to a model parameter path. + + Example: lora_unet_blocks_0_cross_attn_k_proj -> blocks.0.cross_attn.k_proj + """ + key = kohya_layer_name + if key.startswith("lora_unet_"): + key = key[len("lora_unet_"):] + + # Convert blocks_N_ to blocks.N. + key = re.sub(r"^blocks_(\d+)_", r"blocks.\1.", key) + + # Apply known replacements for subcomponent names + for old, new in _KOHYA_UNET_KEY_REPLACEMENTS: + if old in key: + key = key.replace(old, new) + break + + return key + + +def _convert_kohya_te_key(kohya_layer_name: str) -> str: + """Convert a Kohya-style text encoder LoRA layer name to a model parameter path. + + The Qwen3 text encoder is loaded as Qwen3ForCausalLM which wraps the base model + under a `model.` prefix, so the final path must include it. + + Example: lora_te_layers_0_self_attn_q_proj -> model.layers.0.self_attn.q_proj + """ + key = kohya_layer_name + if key.startswith("lora_te_"): + key = key[len("lora_te_"):] + + # Convert layers_N_ to layers.N. + key = re.sub(r"^layers_(\d+)_", r"layers.\1.", key) + + # Apply known replacements + for old, new in _KOHYA_TE_KEY_REPLACEMENTS: + if old in key: + key = key.replace(old, new) + break + + # Qwen3ForCausalLM wraps the base Qwen3Model under `model.` + key = f"model.{key}" + + return key + + +def _make_layer_patch(layer_dict: dict[str, torch.Tensor]) -> BaseLayerPatch: + """Create a layer patch from a layer dict, handling DoRA+LoKR edge case. + + Some Anima LoRAs combine DoRA (dora_scale) with LoKR (lokr_w1/lokr_w2) weights. + The shared any_lora_layer_from_state_dict checks dora_scale first and expects + lora_up/lora_down keys, which don't exist in LoKR layers. We strip dora_scale + from LoKR layers so they fall through to the LoKR handler instead. + """ + has_lokr = "lokr_w1" in layer_dict or "lokr_w1_a" in layer_dict + has_dora = "dora_scale" in layer_dict + if has_lokr and has_dora: + layer_dict = {k: v for k, v in layer_dict.items() if k != "dora_scale"} + logger.debug("Stripped dora_scale from LoKR layer (DoRA+LoKR combination not supported, using LoKR only)") + return any_lora_layer_from_state_dict(layer_dict) + + +def lora_model_from_anima_state_dict( + state_dict: Dict[str, torch.Tensor], alpha: float | None = None +) -> ModelPatchRaw: + """Convert an Anima LoRA state dict to a ModelPatchRaw. + + Supports both Kohya-style keys (lora_unet_blocks_0_...) and diffusers PEFT format. + Also supports text encoder LoRA keys (lora_te_layers_0_...) targeting the Qwen3 encoder. + + Args: + state_dict: The LoRA state dict + alpha: The alpha value for LoRA scaling. If None, uses rank as alpha. + + Returns: + A ModelPatchRaw containing the LoRA layers + """ + layers: dict[str, BaseLayerPatch] = {} + + # Detect format + str_keys = [k for k in state_dict.keys() if isinstance(k, str)] + is_kohya = any(k.startswith(("lora_unet_", "lora_te_")) for k in str_keys) + + if is_kohya: + # Kohya format: group by layer name (everything before .lora_down/.lora_up/.alpha) + grouped = _group_kohya_keys(state_dict) + for kohya_layer_name, layer_dict in grouped.items(): + if kohya_layer_name.startswith("lora_te_"): + model_key = _convert_kohya_te_key(kohya_layer_name) + final_key = f"{ANIMA_LORA_QWEN3_PREFIX}{model_key}" + else: + model_key = _convert_kohya_unet_key(kohya_layer_name) + final_key = f"{ANIMA_LORA_TRANSFORMER_PREFIX}{model_key}" + layer = _make_layer_patch(layer_dict) + layers[final_key] = layer + else: + # Diffusers PEFT format + grouped = _group_by_layer(state_dict) + for layer_key, layer_dict in grouped.items(): + values = _get_lora_layer_values(layer_dict, alpha) + clean_key = layer_key + + # Check for text encoder prefixes + text_encoder_prefixes = [ + "base_model.model.text_encoder.", + "text_encoder.", + ] + + is_text_encoder = False + for prefix in text_encoder_prefixes: + if layer_key.startswith(prefix): + clean_key = layer_key[len(prefix):] + is_text_encoder = True + break + + # If not text encoder, check transformer prefixes + if not is_text_encoder: + for prefix in [ + "base_model.model.transformer.", + "transformer.", + "diffusion_model.", + ]: + if layer_key.startswith(prefix): + clean_key = layer_key[len(prefix):] + break + + if is_text_encoder: + final_key = f"{ANIMA_LORA_QWEN3_PREFIX}{clean_key}" + else: + final_key = f"{ANIMA_LORA_TRANSFORMER_PREFIX}{clean_key}" + + layer = _make_layer_patch(values) + layers[final_key] = layer + + return ModelPatchRaw(layers=layers) + + +def _group_kohya_keys(state_dict: Dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]: + """Group Kohya-style LoRA keys by layer name. + + Kohya keys look like: lora_unet_blocks_0_cross_attn_k_proj.lora_down.weight + Layer name: lora_unet_blocks_0_cross_attn_k_proj + Key suffix: lora_down.weight + """ + layer_dict: dict[str, dict[str, torch.Tensor]] = {} + + known_suffixes = [ + ".lora_A.weight", + ".lora_B.weight", + ".lora_down.weight", + ".lora_up.weight", + ".dora_scale", + ".alpha", + ] + + for key in state_dict: + if not isinstance(key, str): + continue + + layer_name = None + key_name = None + for suffix in known_suffixes: + if key.endswith(suffix): + layer_name = key[: -len(suffix)] + key_name = suffix[1:] # Remove leading dot + break + + if layer_name is None: + parts = key.rsplit(".", maxsplit=2) + layer_name = parts[0] + key_name = ".".join(parts[1:]) + + if layer_name not in layer_dict: + layer_dict[layer_name] = {} + layer_dict[layer_name][key_name] = state_dict[key] + + return layer_dict + + +def _get_lora_layer_values(layer_dict: dict[str, torch.Tensor], alpha: float | None) -> dict[str, torch.Tensor]: + """Convert layer dict keys from PEFT format to internal format.""" + if "lora_A.weight" in layer_dict: + values = { + "lora_down.weight": layer_dict["lora_A.weight"], + "lora_up.weight": layer_dict["lora_B.weight"], + } + if alpha is not None: + values["alpha"] = torch.tensor(alpha) + return values + elif "lora_down.weight" in layer_dict: + return layer_dict + else: + return layer_dict + + +def _group_by_layer(state_dict: Dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]: + """Groups keys in the state dict by layer (for diffusers PEFT format).""" + layer_dict: dict[str, dict[str, torch.Tensor]] = {} + + known_suffixes = [ + ".lora_A.weight", + ".lora_B.weight", + ".lora_down.weight", + ".lora_up.weight", + ".dora_scale", + ".alpha", + # LoKR suffixes + ".lokr_w1", + ".lokr_w2", + ".lokr_w1_a", + ".lokr_w1_b", + ".lokr_w2_a", + ".lokr_w2_b", + ] + + for key in state_dict: + if not isinstance(key, str): + continue + + layer_name = None + key_name = None + for suffix in known_suffixes: + if key.endswith(suffix): + layer_name = key[: -len(suffix)] + key_name = suffix[1:] + break + + if layer_name is None: + parts = key.rsplit(".", maxsplit=2) + layer_name = parts[0] + key_name = ".".join(parts[1:]) + + if layer_name not in layer_dict: + layer_dict[layer_name] = {} + layer_dict[layer_name][key_name] = state_dict[key] + + return layer_dict diff --git a/invokeai/frontend/web/src/features/controlLayers/store/validators.ts b/invokeai/frontend/web/src/features/controlLayers/store/validators.ts index 4633f8460b1..923b2b62532 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/validators.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/validators.ts @@ -127,7 +127,7 @@ export const getGlobalReferenceImageWarnings = ( const warnings: WarningTKey[] = []; if (model) { - if (model.base === 'sd-3' || model.base === 'sd-2') { + if (model.base === 'sd-3' || model.base === 'sd-2' || model.base === 'anima') { // Unsupported model architecture warnings.push(WARNINGS.UNSUPPORTED_MODEL); return warnings; @@ -170,7 +170,7 @@ export const getControlLayerWarnings = ( // No model selected warnings.push(WARNINGS.CONTROL_ADAPTER_NO_MODEL_SELECTED); } else if (model) { - if (model.base === 'sd-3' || model.base === 'sd-2') { + if (model.base === 'sd-3' || model.base === 'sd-2' || model.base === 'anima') { // Unsupported model architecture warnings.push(WARNINGS.UNSUPPORTED_MODEL); } else if (entity.controlAdapter.model.base !== model.base) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addAnimaLoRAs.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addAnimaLoRAs.ts new file mode 100644 index 00000000000..df0c95ea717 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addAnimaLoRAs.ts @@ -0,0 +1,70 @@ +import type { RootState } from 'app/store/store'; +import { getPrefixedId } from 'features/controlLayers/konva/util'; +import { zModelIdentifierField } from 'features/nodes/types/common'; +import type { Graph } from 'features/nodes/util/graph/generation/Graph'; +import type { Invocation, S } from 'services/api/types'; + +export const addAnimaLoRAs = ( + state: RootState, + g: Graph, + denoise: Invocation<'anima_denoise'>, + modelLoader: Invocation<'anima_model_loader'>, + posCond: Invocation<'anima_text_encoder'>, + negCond: Invocation<'anima_text_encoder'> | null +): void => { + const enabledLoRAs = state.loras.loras.filter((l) => l.isEnabled && l.model.base === 'anima'); + const loraCount = enabledLoRAs.length; + + if (loraCount === 0) { + return; + } + + const loraMetadata: S['LoRAMetadataField'][] = []; + + // We will collect LoRAs into a single collection node, then pass them to the LoRA collection loader, which applies + // each LoRA to the transformer and Qwen3 encoder. + const loraCollector = g.addNode({ + id: getPrefixedId('lora_collector'), + type: 'collect', + }); + const loraCollectionLoader = g.addNode({ + type: 'anima_lora_collection_loader', + id: getPrefixedId('anima_lora_collection_loader'), + }); + + g.addEdge(loraCollector, 'collection', loraCollectionLoader, 'loras'); + // Use model loader as transformer input + g.addEdge(modelLoader, 'transformer', loraCollectionLoader, 'transformer'); + g.addEdge(modelLoader, 'qwen3_encoder', loraCollectionLoader, 'qwen3_encoder'); + // Reroute model connections through the LoRA collection loader + g.deleteEdgesTo(denoise, ['transformer']); + g.deleteEdgesTo(posCond, ['qwen3_encoder']); + g.addEdge(loraCollectionLoader, 'transformer', denoise, 'transformer'); + g.addEdge(loraCollectionLoader, 'qwen3_encoder', posCond, 'qwen3_encoder'); + // Only reroute negCond if it exists (guidance_scale > 0) + if (negCond !== null) { + g.deleteEdgesTo(negCond, ['qwen3_encoder']); + g.addEdge(loraCollectionLoader, 'qwen3_encoder', negCond, 'qwen3_encoder'); + } + + for (const lora of enabledLoRAs) { + const { weight } = lora; + const parsedModel = zModelIdentifierField.parse(lora.model); + + const loraSelector = g.addNode({ + type: 'lora_selector', + id: getPrefixedId('lora_selector'), + lora: parsedModel, + weight, + }); + + loraMetadata.push({ + model: parsedModel, + weight, + }); + + g.addEdge(loraSelector, 'lora', loraCollector, 'item'); + } + + g.upsertMetadata({ loras: loraMetadata }); +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts index b70b7d128c2..e180dc8289a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts @@ -9,6 +9,7 @@ import { } from 'features/controlLayers/store/paramsSlice'; import { selectCanvasMetadata, selectCanvasSlice } from 'features/controlLayers/store/selectors'; import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers'; +import { addAnimaLoRAs } from 'features/nodes/util/graph/generation/addAnimaLoRAs'; import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage'; import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint'; import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker'; @@ -171,6 +172,9 @@ export const buildAnimaGraph = async (arg: GraphBuilderArg): Promise = l2i; if (generationMode === 'txt2img') { diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 9c435d88c92..56fa2b2591d 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -2863,6 +2863,130 @@ export type components = { */ type: "anima_l2i"; }; + /** + * Apply LoRA Collection - Anima + * @description Applies a collection of LoRAs to an Anima transformer. + */ + AnimaLoRACollectionLoader: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * LoRAs + * @description LoRA models and weights. May be a single LoRA or collection. + * @default null + */ + loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; + /** + * Transformer + * @description Transformer + * @default null + */ + transformer?: components["schemas"]["TransformerField"] | null; + /** + * Qwen3 Encoder + * @description Qwen3 tokenizer and text encoder + * @default null + */ + qwen3_encoder?: components["schemas"]["Qwen3EncoderField"] | null; + /** + * type + * @default anima_lora_collection_loader + * @constant + */ + type: "anima_lora_collection_loader"; + }; + /** + * Apply LoRA - Anima + * @description Apply a LoRA model to an Anima transformer and/or Qwen3 text encoder. + */ + AnimaLoRALoaderInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * LoRA + * @description LoRA model to load + * @default null + */ + lora?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Weight + * @description The weight at which the LoRA is applied to each model + * @default 0.75 + */ + weight?: number; + /** + * Anima Transformer + * @description Transformer + * @default null + */ + transformer?: components["schemas"]["TransformerField"] | null; + /** + * Qwen3 Encoder + * @description Qwen3 tokenizer and text encoder + * @default null + */ + qwen3_encoder?: components["schemas"]["Qwen3EncoderField"] | null; + /** + * type + * @default anima_lora_loader + * @constant + */ + type: "anima_lora_loader"; + }; + /** + * AnimaLoRALoaderOutput + * @description Anima LoRA Loader Output + */ + AnimaLoRALoaderOutput: { + /** + * Anima Transformer + * @description Transformer + * @default null + */ + transformer: components["schemas"]["TransformerField"] | null; + /** + * Qwen3 Encoder + * @description Qwen3 tokenizer and text encoder + * @default null + */ + qwen3_encoder: components["schemas"]["Qwen3EncoderField"] | null; + /** + * type + * @default anima_lora_loader_output + * @constant + */ + type: "anima_lora_loader_output"; + }; /** * Main Model - Anima * @description Loads an Anima model, outputting its submodels. @@ -2991,7 +3115,7 @@ export type components = { */ type: "anima_text_encoder"; }; - AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** * AppVersion * @description App Version Response @@ -11036,7 +11160,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; }; /** * Edges @@ -11073,7 +11197,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; + [key: string]: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaLoRALoaderOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; }; /** * Errors @@ -14258,7 +14382,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -14268,7 +14392,7 @@ export type components = { * Result * @description The result of the invocation */ - result: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; + result: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaLoRALoaderOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"]; }; /** * InvocationErrorEvent @@ -14322,7 +14446,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -14350,6 +14474,8 @@ export type components = { anima_denoise: components["schemas"]["LatentsOutput"]; anima_i2l: components["schemas"]["LatentsOutput"]; anima_l2i: components["schemas"]["ImageOutput"]; + anima_lora_collection_loader: components["schemas"]["AnimaLoRALoaderOutput"]; + anima_lora_loader: components["schemas"]["AnimaLoRALoaderOutput"]; anima_model_loader: components["schemas"]["AnimaModelLoaderOutput"]; anima_text_encoder: components["schemas"]["AnimaConditioningOutput"]; apply_mask_to_image: components["schemas"]["ImageOutput"]; @@ -14633,7 +14759,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -14708,7 +14834,7 @@ export type components = { * Invocation * @description The ID of the invocation */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"]; /** * Invocation Source Id * @description The ID of the prepared invocation's source node @@ -16942,6 +17068,84 @@ export type components = { base: "z-image"; variant: components["schemas"]["ZImageVariantType"] | null; }; + /** + * LoRA_LyCORIS_Anima_Config + * @description Model config for Anima LoRA models in LyCORIS format. + */ + LoRA_LyCORIS_Anima_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default lycoris + * @constant + */ + format: "lycoris"; + /** + * Base + * @default anima + * @constant + */ + base: "anima"; + }; /** LoRA_LyCORIS_FLUX_Config */ LoRA_LyCORIS_FLUX_Config: { /** @@ -21367,7 +21571,7 @@ export type components = { * Config * @description The installed model's config */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; /** * ModelInstallDownloadProgressEvent @@ -21533,7 +21737,7 @@ export type components = { * Config Out * @description After successful installation, this will hold the configuration object. */ - config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]) | null; + config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]) | null; /** * Inplace * @description Leave model in its current location; otherwise install under models directory @@ -21619,7 +21823,7 @@ export type components = { * Config * @description The model's config */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** * @description The submodel type, if any * @default null @@ -21640,7 +21844,7 @@ export type components = { * Config * @description The model's config */ - config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; /** * @description The submodel type, if any * @default null @@ -21814,7 +22018,7 @@ export type components = { */ ModelsList: { /** Models */ - models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"])[]; + models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"])[]; }; /** * Multiply Integers @@ -29670,7 +29874,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Validation Error */ @@ -29702,7 +29906,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Validation Error */ @@ -29752,7 +29956,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -29857,7 +30061,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -29928,7 +30132,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -30628,7 +30832,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ From c1bf4171307997315874b7b8ac703cc5b98227e7 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 12 Mar 2026 05:09:43 -0400 Subject: [PATCH 06/14] last fixes --- invokeai/frontend/web/public/locales/en.json | 2 ++ .../web/src/features/queue/store/readiness.ts | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index c66be903997..8c75043b027 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1503,6 +1503,8 @@ "noQwen3EncoderModelSelected": "No Qwen3 Encoder model selected for FLUX2 Klein generation", "noZImageVaeSourceSelected": "No VAE source: Select VAE (FLUX) or Qwen3 Source model", "noZImageQwen3EncoderSourceSelected": "No Qwen3 Encoder source: Select Qwen3 Encoder or Qwen3 Source model", + "noAnimaVaeModelSelected": "No Anima VAE model selected", + "noAnimaQwen3EncoderModelSelected": "No Anima Qwen3 Encoder model selected", "fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bbox width is {{width}}", "fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bbox height is {{height}}", "fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox width is {{width}}", diff --git a/invokeai/frontend/web/src/features/queue/store/readiness.ts b/invokeai/frontend/web/src/features/queue/store/readiness.ts index 8fa97eff4a9..67f7ef7ddde 100644 --- a/invokeai/frontend/web/src/features/queue/store/readiness.ts +++ b/invokeai/frontend/web/src/features/queue/store/readiness.ts @@ -270,6 +270,15 @@ const getReasonsWhyCannotEnqueueGenerateTab = (arg: { } } + if (model?.base === 'anima') { + if (!params.animaVaeModel) { + reasons.push({ content: i18n.t('parameters.invoke.noAnimaVaeModelSelected') }); + } + if (!params.animaQwen3EncoderModel) { + reasons.push({ content: i18n.t('parameters.invoke.noAnimaQwen3EncoderModelSelected') }); + } + } + if (model) { for (const lora of loras.filter(({ isEnabled }) => isEnabled === true)) { if (model.base !== lora.model.base) { @@ -648,6 +657,15 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: { } } + if (model?.base === 'anima') { + if (!params.animaVaeModel) { + reasons.push({ content: i18n.t('parameters.invoke.noAnimaVaeModelSelected') }); + } + if (!params.animaQwen3EncoderModel) { + reasons.push({ content: i18n.t('parameters.invoke.noAnimaQwen3EncoderModelSelected') }); + } + } + if (model) { for (const lora of loras.filter(({ isEnabled }) => isEnabled === true)) { if (model.base !== lora.model.base) { From d81a299870e0ee7b24b22ae7e189cf4530bd667e Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 12 Mar 2026 05:29:52 -0400 Subject: [PATCH 07/14] tests --- tests/app/invocations/test_anima_denoise.py | 109 ++++++++++ .../anima_lora_kohya_format.py | 42 ++++ .../anima_lora_kohya_with_te_format.py | 34 +++ .../anima_lora_lokr_format.py | 18 ++ .../anima_lora_peft_format.py | 19 ++ .../test_anima_lora_conversion_utils.py | 193 ++++++++++++++++++ 6 files changed, 415 insertions(+) create mode 100644 tests/app/invocations/test_anima_denoise.py create mode 100644 tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_kohya_format.py create mode 100644 tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_kohya_with_te_format.py create mode 100644 tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_lokr_format.py create mode 100644 tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_peft_format.py create mode 100644 tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py diff --git a/tests/app/invocations/test_anima_denoise.py b/tests/app/invocations/test_anima_denoise.py new file mode 100644 index 00000000000..4a1e134421c --- /dev/null +++ b/tests/app/invocations/test_anima_denoise.py @@ -0,0 +1,109 @@ +import pytest + +from invokeai.app.invocations.anima_denoise import ( + ANIMA_SHIFT, + AnimaDenoiseInvocation, + inverse_time_snr_shift, + time_snr_shift, +) + + +class TestTimeSnrShift: + """Test the time-SNR shift function used for Anima's noise schedule.""" + + def test_shift_1_is_identity(self): + """With alpha=1.0, time_snr_shift should be identity.""" + for t in [0.0, 0.25, 0.5, 0.75, 1.0]: + assert time_snr_shift(1.0, t) == t + + def test_shift_at_zero(self): + """At t=0, shifted sigma should be 0 regardless of alpha.""" + assert time_snr_shift(3.0, 0.0) == 0.0 + + def test_shift_at_one(self): + """At t=1, shifted sigma should be 1 regardless of alpha.""" + assert time_snr_shift(3.0, 1.0) == pytest.approx(1.0) + + def test_shift_3_increases_sigma(self): + """With alpha=3.0, sigma should be larger than t (spends more time at high noise).""" + for t in [0.1, 0.25, 0.5, 0.75, 0.9]: + sigma = time_snr_shift(3.0, t) + assert sigma > t, f"At t={t}, sigma={sigma} should be > t" + + def test_shift_monotonic(self): + """Shifted sigmas should be monotonically increasing with t.""" + prev = 0.0 + for i in range(1, 101): + t = i / 100.0 + sigma = time_snr_shift(3.0, t) + assert sigma > prev, f"Not monotonic at t={t}" + prev = sigma + + def test_known_value(self): + """Test a known value: at t=0.5, alpha=3.0, sigma = 3*0.5 / (1 + 2*0.5) = 0.75.""" + assert time_snr_shift(3.0, 0.5) == pytest.approx(0.75) + + +class TestInverseTimeSnrShift: + """Test the inverse time-SNR shift (used for inpainting mask correction).""" + + def test_inverse_shift_1_is_identity(self): + """With alpha=1.0, inverse should be identity.""" + for sigma in [0.0, 0.25, 0.5, 0.75, 1.0]: + assert inverse_time_snr_shift(1.0, sigma) == sigma + + def test_roundtrip(self): + """shift(inverse(sigma)) should recover sigma, and inverse(shift(t)) should recover t.""" + for t in [0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0]: + sigma = time_snr_shift(3.0, t) + recovered_t = inverse_time_snr_shift(3.0, sigma) + assert recovered_t == pytest.approx(t, abs=1e-7), ( + f"Roundtrip failed: t={t} -> sigma={sigma} -> recovered_t={recovered_t}" + ) + + def test_known_value(self): + """At sigma=0.75, alpha=3.0, t should be 0.5 (inverse of the known shift value).""" + assert inverse_time_snr_shift(3.0, 0.75) == pytest.approx(0.5) + + +class TestGetSigmas: + """Test the sigma schedule generation.""" + + def test_schedule_length(self): + """Schedule should have num_steps + 1 entries.""" + inv = AnimaDenoiseInvocation( + positive_conditioning=None, # type: ignore + transformer=None, # type: ignore + ) + sigmas = inv._get_sigmas(30) + assert len(sigmas) == 31 + + def test_schedule_endpoints(self): + """Schedule should start near 1.0 and end at 0.0.""" + inv = AnimaDenoiseInvocation( + positive_conditioning=None, # type: ignore + transformer=None, # type: ignore + ) + sigmas = inv._get_sigmas(30) + assert sigmas[0] == pytest.approx(time_snr_shift(ANIMA_SHIFT, 1.0)) + assert sigmas[-1] == pytest.approx(0.0) + + def test_schedule_monotonically_decreasing(self): + """Sigmas should decrease from noise to clean.""" + inv = AnimaDenoiseInvocation( + positive_conditioning=None, # type: ignore + transformer=None, # type: ignore + ) + sigmas = inv._get_sigmas(30) + for i in range(len(sigmas) - 1): + assert sigmas[i] > sigmas[i + 1], f"Not decreasing at index {i}: {sigmas[i]} <= {sigmas[i + 1]}" + + def test_schedule_uses_shift(self): + """With shift=3.0, middle sigmas should be larger than the linear midpoint.""" + inv = AnimaDenoiseInvocation( + positive_conditioning=None, # type: ignore + transformer=None, # type: ignore + ) + sigmas = inv._get_sigmas(10) + # At step 5/10, linear t = 0.5, shifted sigma should be 0.75 + assert sigmas[5] == pytest.approx(time_snr_shift(3.0, 0.5)) diff --git a/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_kohya_format.py b/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_kohya_format.py new file mode 100644 index 00000000000..3bb3ec00f76 --- /dev/null +++ b/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_kohya_format.py @@ -0,0 +1,42 @@ +# A sample state dict in the Kohya Anima LoRA format. +# These keys are based on Anima LoRAs targeting the Cosmos Predict2 DiT transformer. +# Keys follow the pattern: lora_unet_blocks_{N}_{component}.{suffix} +state_dict_keys: dict[str, list[int]] = { + # Block 0 - cross attention + "lora_unet_blocks_0_cross_attn_k_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_cross_attn_k_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_cross_attn_k_proj.alpha": [], + "lora_unet_blocks_0_cross_attn_q_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_cross_attn_q_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_cross_attn_q_proj.alpha": [], + "lora_unet_blocks_0_cross_attn_v_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_cross_attn_v_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_cross_attn_v_proj.alpha": [], + "lora_unet_blocks_0_cross_attn_output_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_cross_attn_output_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_cross_attn_output_proj.alpha": [], + # Block 0 - self attention + "lora_unet_blocks_0_self_attn_k_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_self_attn_k_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_self_attn_k_proj.alpha": [], + "lora_unet_blocks_0_self_attn_q_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_self_attn_q_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_self_attn_q_proj.alpha": [], + "lora_unet_blocks_0_self_attn_v_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_self_attn_v_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_self_attn_v_proj.alpha": [], + "lora_unet_blocks_0_self_attn_output_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_self_attn_output_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_self_attn_output_proj.alpha": [], + # Block 0 - MLP + "lora_unet_blocks_0_mlp_layer1.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_mlp_layer1.lora_up.weight": [8192, 8], + "lora_unet_blocks_0_mlp_layer1.alpha": [], + "lora_unet_blocks_0_mlp_layer2.lora_down.weight": [8, 8192], + "lora_unet_blocks_0_mlp_layer2.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_mlp_layer2.alpha": [], + # Block 0 - adaln modulation + "lora_unet_blocks_0_adaln_modulation_cross_attn_1.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_adaln_modulation_cross_attn_1.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_adaln_modulation_cross_attn_1.alpha": [], +} diff --git a/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_kohya_with_te_format.py b/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_kohya_with_te_format.py new file mode 100644 index 00000000000..9499690f28a --- /dev/null +++ b/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_kohya_with_te_format.py @@ -0,0 +1,34 @@ +# A sample state dict in the Kohya Anima LoRA format with Qwen3 text encoder layers. +# Contains both lora_unet_ (transformer) and lora_te_ (Qwen3 encoder) keys. +state_dict_keys: dict[str, list[int]] = { + # Transformer block 0 - cross attention + "lora_unet_blocks_0_cross_attn_k_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_cross_attn_k_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_cross_attn_k_proj.alpha": [], + "lora_unet_blocks_0_cross_attn_q_proj.lora_down.weight": [8, 2048], + "lora_unet_blocks_0_cross_attn_q_proj.lora_up.weight": [2048, 8], + "lora_unet_blocks_0_cross_attn_q_proj.alpha": [], + # Qwen3 text encoder layer 0 - self attention + "lora_te_layers_0_self_attn_q_proj.lora_down.weight": [8, 1024], + "lora_te_layers_0_self_attn_q_proj.lora_up.weight": [1024, 8], + "lora_te_layers_0_self_attn_q_proj.alpha": [], + "lora_te_layers_0_self_attn_k_proj.lora_down.weight": [8, 1024], + "lora_te_layers_0_self_attn_k_proj.lora_up.weight": [1024, 8], + "lora_te_layers_0_self_attn_k_proj.alpha": [], + "lora_te_layers_0_self_attn_v_proj.lora_down.weight": [8, 1024], + "lora_te_layers_0_self_attn_v_proj.lora_up.weight": [1024, 8], + "lora_te_layers_0_self_attn_v_proj.alpha": [], + "lora_te_layers_0_self_attn_o_proj.lora_down.weight": [8, 1024], + "lora_te_layers_0_self_attn_o_proj.lora_up.weight": [1024, 8], + "lora_te_layers_0_self_attn_o_proj.alpha": [], + # Qwen3 text encoder layer 0 - MLP + "lora_te_layers_0_mlp_gate_proj.lora_down.weight": [8, 1024], + "lora_te_layers_0_mlp_gate_proj.lora_up.weight": [2816, 8], + "lora_te_layers_0_mlp_gate_proj.alpha": [], + "lora_te_layers_0_mlp_down_proj.lora_down.weight": [8, 2816], + "lora_te_layers_0_mlp_down_proj.lora_up.weight": [1024, 8], + "lora_te_layers_0_mlp_down_proj.alpha": [], + "lora_te_layers_0_mlp_up_proj.lora_down.weight": [8, 1024], + "lora_te_layers_0_mlp_up_proj.lora_up.weight": [2816, 8], + "lora_te_layers_0_mlp_up_proj.alpha": [], +} diff --git a/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_lokr_format.py b/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_lokr_format.py new file mode 100644 index 00000000000..089208cca62 --- /dev/null +++ b/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_lokr_format.py @@ -0,0 +1,18 @@ +# A sample state dict in the LoKR Anima LoRA format (with DoRA). +# Some Anima LoRAs use LoKR weights (lokr_w1/lokr_w2) combined with DoRA (dora_scale). +# The dora_scale should be stripped from LoKR layers during conversion. +state_dict_keys: dict[str, list[int]] = { + # Block 0 - cross attention with LoKR + DoRA + "diffusion_model.blocks.0.cross_attn.k_proj.lokr_w1": [2048, 8], + "diffusion_model.blocks.0.cross_attn.k_proj.lokr_w2": [8, 2048], + "diffusion_model.blocks.0.cross_attn.k_proj.alpha": [], + "diffusion_model.blocks.0.cross_attn.k_proj.dora_scale": [2048], + "diffusion_model.blocks.0.cross_attn.q_proj.lokr_w1": [2048, 8], + "diffusion_model.blocks.0.cross_attn.q_proj.lokr_w2": [8, 2048], + "diffusion_model.blocks.0.cross_attn.q_proj.alpha": [], + "diffusion_model.blocks.0.cross_attn.q_proj.dora_scale": [2048], + # Block 0 - self attention with LoKR (no DoRA) + "diffusion_model.blocks.0.self_attn.k_proj.lokr_w1": [2048, 8], + "diffusion_model.blocks.0.self_attn.k_proj.lokr_w2": [8, 2048], + "diffusion_model.blocks.0.self_attn.k_proj.alpha": [], +} diff --git a/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_peft_format.py b/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_peft_format.py new file mode 100644 index 00000000000..0edcd876f12 --- /dev/null +++ b/tests/backend/patches/lora_conversions/lora_state_dicts/anima_lora_peft_format.py @@ -0,0 +1,19 @@ +# A sample state dict in the diffusers PEFT Anima LoRA format. +# Keys follow the pattern: diffusion_model.blocks.{N}.{component}.lora_{A|B}.weight +state_dict_keys: dict[str, list[int]] = { + # Block 0 - cross attention + "diffusion_model.blocks.0.cross_attn.k_proj.lora_A.weight": [8, 2048], + "diffusion_model.blocks.0.cross_attn.k_proj.lora_B.weight": [2048, 8], + "diffusion_model.blocks.0.cross_attn.q_proj.lora_A.weight": [8, 2048], + "diffusion_model.blocks.0.cross_attn.q_proj.lora_B.weight": [2048, 8], + "diffusion_model.blocks.0.cross_attn.v_proj.lora_A.weight": [8, 2048], + "diffusion_model.blocks.0.cross_attn.v_proj.lora_B.weight": [2048, 8], + # Block 0 - self attention + "diffusion_model.blocks.0.self_attn.k_proj.lora_A.weight": [8, 2048], + "diffusion_model.blocks.0.self_attn.k_proj.lora_B.weight": [2048, 8], + "diffusion_model.blocks.0.self_attn.q_proj.lora_A.weight": [8, 2048], + "diffusion_model.blocks.0.self_attn.q_proj.lora_B.weight": [2048, 8], + # Block 0 - MLP + "diffusion_model.blocks.0.mlp.layer1.lora_A.weight": [8, 2048], + "diffusion_model.blocks.0.mlp.layer1.lora_B.weight": [8192, 8], +} diff --git a/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py b/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py new file mode 100644 index 00000000000..10666619d8d --- /dev/null +++ b/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py @@ -0,0 +1,193 @@ +import pytest +import torch + +from invokeai.backend.patches.lora_conversions.anima_lora_constants import ( + ANIMA_LORA_QWEN3_PREFIX, + ANIMA_LORA_TRANSFORMER_PREFIX, +) +from invokeai.backend.patches.lora_conversions.anima_lora_conversion_utils import ( + _convert_kohya_te_key, + _convert_kohya_unet_key, + is_state_dict_likely_anima_lora, + lora_model_from_anima_state_dict, +) +from tests.backend.patches.lora_conversions.lora_state_dicts.anima_lora_kohya_format import ( + state_dict_keys as anima_kohya_keys, +) +from tests.backend.patches.lora_conversions.lora_state_dicts.anima_lora_kohya_with_te_format import ( + state_dict_keys as anima_kohya_te_keys, +) +from tests.backend.patches.lora_conversions.lora_state_dicts.anima_lora_lokr_format import ( + state_dict_keys as anima_lokr_keys, +) +from tests.backend.patches.lora_conversions.lora_state_dicts.anima_lora_peft_format import ( + state_dict_keys as anima_peft_keys, +) +from tests.backend.patches.lora_conversions.lora_state_dicts.utils import keys_to_mock_state_dict + + +# --- Detection Tests --- + + +@pytest.mark.parametrize( + "sd_keys", + [anima_kohya_keys, anima_kohya_te_keys, anima_peft_keys, anima_lokr_keys], + ids=["kohya", "kohya_te", "peft", "lokr"], +) +def test_is_state_dict_likely_anima_lora_true(sd_keys: dict[str, list[int]]): + """Test that is_state_dict_likely_anima_lora() correctly identifies Anima LoRA state dicts.""" + state_dict = keys_to_mock_state_dict(sd_keys) + assert is_state_dict_likely_anima_lora(state_dict) + + +def test_is_state_dict_likely_anima_lora_false_for_flux(): + """Test that is_state_dict_likely_anima_lora() returns False for a FLUX LoRA state dict.""" + state_dict = { + "lora_unet_double_blocks_0_img_attn_proj.lora_down.weight": torch.empty([16, 3072]), + "lora_unet_double_blocks_0_img_attn_proj.lora_up.weight": torch.empty([3072, 16]), + } + assert not is_state_dict_likely_anima_lora(state_dict) + + +def test_is_state_dict_likely_anima_lora_false_for_random(): + """Test that is_state_dict_likely_anima_lora() returns False for unrelated state dicts.""" + state_dict = { + "some_random_key.weight": torch.empty([64, 64]), + "another_key.bias": torch.empty([64]), + } + assert not is_state_dict_likely_anima_lora(state_dict) + + +# --- Kohya Key Conversion Tests --- + + +@pytest.mark.parametrize( + ["kohya_key", "expected"], + [ + ("lora_unet_blocks_0_cross_attn_k_proj", "blocks.0.cross_attn.k_proj"), + ("lora_unet_blocks_0_cross_attn_q_proj", "blocks.0.cross_attn.q_proj"), + ("lora_unet_blocks_0_cross_attn_v_proj", "blocks.0.cross_attn.v_proj"), + ("lora_unet_blocks_0_cross_attn_output_proj", "blocks.0.cross_attn.output_proj"), + ("lora_unet_blocks_0_self_attn_k_proj", "blocks.0.self_attn.k_proj"), + ("lora_unet_blocks_0_self_attn_q_proj", "blocks.0.self_attn.q_proj"), + ("lora_unet_blocks_0_self_attn_v_proj", "blocks.0.self_attn.v_proj"), + ("lora_unet_blocks_0_self_attn_output_proj", "blocks.0.self_attn.output_proj"), + ("lora_unet_blocks_0_mlp_layer1", "blocks.0.mlp.layer1"), + ("lora_unet_blocks_0_mlp_layer2", "blocks.0.mlp.layer2"), + ("lora_unet_blocks_27_cross_attn_k_proj", "blocks.27.cross_attn.k_proj"), + ("lora_unet_blocks_0_adaln_modulation_cross_attn_1", "blocks.0.adaln_modulation_cross_attn.1"), + ("lora_unet_blocks_0_adaln_modulation_self_attn_1", "blocks.0.adaln_modulation_self_attn.1"), + ("lora_unet_blocks_0_adaln_modulation_mlp_1", "blocks.0.adaln_modulation_mlp.1"), + ], +) +def test_convert_kohya_unet_key(kohya_key: str, expected: str): + """Test that Kohya unet keys are correctly converted to model parameter paths.""" + assert _convert_kohya_unet_key(kohya_key) == expected + + +@pytest.mark.parametrize( + ["kohya_key", "expected"], + [ + ("lora_te_layers_0_self_attn_q_proj", "model.layers.0.self_attn.q_proj"), + ("lora_te_layers_0_self_attn_k_proj", "model.layers.0.self_attn.k_proj"), + ("lora_te_layers_0_self_attn_v_proj", "model.layers.0.self_attn.v_proj"), + ("lora_te_layers_0_self_attn_o_proj", "model.layers.0.self_attn.o_proj"), + ("lora_te_layers_0_mlp_gate_proj", "model.layers.0.mlp.gate_proj"), + ("lora_te_layers_0_mlp_down_proj", "model.layers.0.mlp.down_proj"), + ("lora_te_layers_0_mlp_up_proj", "model.layers.0.mlp.up_proj"), + ("lora_te_layers_15_self_attn_q_proj", "model.layers.15.self_attn.q_proj"), + ], +) +def test_convert_kohya_te_key(kohya_key: str, expected: str): + """Test that Kohya TE keys are correctly converted to Qwen3 model parameter paths. + + The Qwen3 text encoder is loaded as Qwen3ForCausalLM which wraps the base model + under a `model.` prefix, so all converted paths must include it. + """ + assert _convert_kohya_te_key(kohya_key) == expected + + +# --- End-to-End Conversion Tests --- + + +@pytest.mark.parametrize( + "sd_keys", + [anima_kohya_keys, anima_kohya_te_keys, anima_peft_keys, anima_lokr_keys], + ids=["kohya", "kohya_te", "peft", "lokr"], +) +def test_lora_model_from_anima_state_dict(sd_keys: dict[str, list[int]]): + """Test that a ModelPatchRaw can be created from all supported Anima LoRA formats.""" + state_dict = keys_to_mock_state_dict(sd_keys) + lora_model = lora_model_from_anima_state_dict(state_dict) + assert len(lora_model.layers) > 0 + + +def test_kohya_unet_keys_get_transformer_prefix(): + """Test that Kohya unet keys are prefixed with the transformer prefix.""" + state_dict = keys_to_mock_state_dict(anima_kohya_keys) + lora_model = lora_model_from_anima_state_dict(state_dict) + + for key in lora_model.layers.keys(): + assert key.startswith(ANIMA_LORA_TRANSFORMER_PREFIX), ( + f"Expected transformer prefix '{ANIMA_LORA_TRANSFORMER_PREFIX}', got key: {key}" + ) + + +def test_kohya_te_keys_get_qwen3_prefix(): + """Test that Kohya TE keys are prefixed with the Qwen3 prefix.""" + state_dict = keys_to_mock_state_dict(anima_kohya_te_keys) + lora_model = lora_model_from_anima_state_dict(state_dict) + + has_transformer_keys = False + has_qwen3_keys = False + for key in lora_model.layers.keys(): + if key.startswith(ANIMA_LORA_TRANSFORMER_PREFIX): + has_transformer_keys = True + elif key.startswith(ANIMA_LORA_QWEN3_PREFIX): + has_qwen3_keys = True + else: + raise AssertionError(f"Key has unexpected prefix: {key}") + + assert has_transformer_keys, "Expected at least one transformer key" + assert has_qwen3_keys, "Expected at least one Qwen3 key" + + +def test_qwen3_keys_include_model_prefix(): + """Test that converted Qwen3 TE keys include 'model.' prefix for Qwen3ForCausalLM.""" + state_dict = keys_to_mock_state_dict(anima_kohya_te_keys) + lora_model = lora_model_from_anima_state_dict(state_dict) + + for key in lora_model.layers.keys(): + if key.startswith(ANIMA_LORA_QWEN3_PREFIX): + inner_key = key[len(ANIMA_LORA_QWEN3_PREFIX):] + assert inner_key.startswith("model."), ( + f"Qwen3 key should start with 'model.' after prefix, got: {inner_key}" + ) + + +def test_lokr_dora_keys_dont_crash(): + """Test that LoKR layers with dora_scale don't cause a KeyError. + + Some Anima LoRAs combine DoRA (dora_scale) with LoKR (lokr_w1/lokr_w2). + The dora_scale should be stripped from LoKR layers since shared code + doesn't support DoRA+LoKR combination. + """ + state_dict = keys_to_mock_state_dict(anima_lokr_keys) + lora_model = lora_model_from_anima_state_dict(state_dict) + assert len(lora_model.layers) > 0 + + +def test_peft_keys_get_transformer_prefix(): + """Test that diffusers PEFT keys are prefixed with the transformer prefix.""" + state_dict = keys_to_mock_state_dict(anima_peft_keys) + lora_model = lora_model_from_anima_state_dict(state_dict) + + for key in lora_model.layers.keys(): + assert key.startswith(ANIMA_LORA_TRANSFORMER_PREFIX), ( + f"Expected transformer prefix, got key: {key}" + ) + # Verify the diffusion_model. prefix is stripped + inner_key = key[len(ANIMA_LORA_TRANSFORMER_PREFIX):] + assert not inner_key.startswith("diffusion_model."), ( + f"diffusion_model. prefix should be stripped, got: {inner_key}" + ) From 3bcc1bb438af445753dd9bf7bdd0c86a97b5842a Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 12 Mar 2026 06:10:54 -0400 Subject: [PATCH 08/14] fix attributions --- invokeai/backend/anima/__init__.py | 2 +- invokeai/backend/anima/anima_transformer.py | 27 ++++++++++++--------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/invokeai/backend/anima/__init__.py b/invokeai/backend/anima/__init__.py index 380f67416b7..fc65f30fd92 100644 --- a/invokeai/backend/anima/__init__.py +++ b/invokeai/backend/anima/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Thhe InvokeAI Development Team +# Copyright (c) 2024, The InvokeAI Development Team """Anima model backend module. Anima is a 2B-parameter anime-focused text-to-image model built on NVIDIA's diff --git a/invokeai/backend/anima/anima_transformer.py b/invokeai/backend/anima/anima_transformer.py index e8779582b5c..e25ef247cf7 100644 --- a/invokeai/backend/anima/anima_transformer.py +++ b/invokeai/backend/anima/anima_transformer.py @@ -1,19 +1,16 @@ """Anima transformer model: Cosmos Predict2 MiniTrainDIT + LLM Adapter. -Ported from the ComfyUI implementation: -- comfy/ldm/cosmos/predict2.py (MiniTrainDIT backbone) -- comfy/ldm/anima/model.py (LLMAdapter + Anima wrapper) - The Anima architecture combines: 1. MiniTrainDIT: A Cosmos Predict2 DiT backbone with 28 blocks, 2048-dim hidden state, and 3D RoPE positional embeddings. 2. LLMAdapter: A 6-layer cross-attention transformer that fuses Qwen3 0.6B hidden states with learned T5-XXL token embeddings to produce conditioning for the DiT. -References: -- https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/cosmos/predict2.py -- https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py -- https://github.com/nvidia-cosmos/cosmos-predict2 +Original source code: +- MiniTrainDIT backbone and positional embeddings: https://github.com/nvidia-cosmos/cosmos-predict2 + SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 +- LLMAdapter and Anima wrapper: https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py """ import logging @@ -30,7 +27,9 @@ # ============================================================================ -# Positional Embeddings (from comfy/ldm/cosmos/position_embedding.py) +# Positional Embeddings +# Original source: https://github.com/nvidia-cosmos/cosmos-predict2 +# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. Apache-2.0 # ============================================================================ @@ -195,7 +194,9 @@ def generate_embeddings( # ============================================================================ -# Cosmos Predict2 MiniTrainDIT (from comfy/ldm/cosmos/predict2.py) +# Cosmos Predict2 MiniTrainDIT +# Original source: https://github.com/nvidia-cosmos/cosmos-predict2 +# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. Apache-2.0 # ============================================================================ @@ -740,7 +741,8 @@ def forward( # ============================================================================ -# LLM Adapter (from comfy/ldm/anima/model.py) +# LLM Adapter +# Source: https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py # ============================================================================ @@ -979,7 +981,8 @@ def forward( # ============================================================================ -# Anima: MiniTrainDIT + LLMAdapter (from comfy/ldm/anima/model.py) +# Anima: MiniTrainDIT + LLMAdapter +# Source: https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py # ============================================================================ From 44d99e5e0fba590fc782a6ae18763a2400e2439a Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 12 Mar 2026 06:11:45 -0400 Subject: [PATCH 09/14] fix attributions --- invokeai/backend/anima/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/backend/anima/__init__.py b/invokeai/backend/anima/__init__.py index fc65f30fd92..01a1a952e96 100644 --- a/invokeai/backend/anima/__init__.py +++ b/invokeai/backend/anima/__init__.py @@ -1,4 +1,3 @@ -# Copyright (c) 2024, The InvokeAI Development Team """Anima model backend module. Anima is a 2B-parameter anime-focused text-to-image model built on NVIDIA's From be8660afc2d71ec583f1a279a48a233ee70ca270 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 12 Mar 2026 23:43:23 -0400 Subject: [PATCH 10/14] refactor to use diffusers reference --- invokeai/backend/anima/anima_transformer.py | 255 +++++++++----------- 1 file changed, 113 insertions(+), 142 deletions(-) diff --git a/invokeai/backend/anima/anima_transformer.py b/invokeai/backend/anima/anima_transformer.py index e25ef247cf7..6c852cd4fba 100644 --- a/invokeai/backend/anima/anima_transformer.py +++ b/invokeai/backend/anima/anima_transformer.py @@ -10,7 +10,8 @@ - MiniTrainDIT backbone and positional embeddings: https://github.com/nvidia-cosmos/cosmos-predict2 SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: Apache-2.0 -- LLMAdapter and Anima wrapper: https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py +- LLMAdapter and Anima wrapper: Clean-room implementation based on + https://github.com/hdae/diffusers-anima (Apache-2.0) """ import logging @@ -742,54 +743,48 @@ def forward( # ============================================================================ # LLM Adapter -# Source: https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py +# Reference implementation: https://github.com/hdae/diffusers-anima +# SPDX-License-Identifier: Apache-2.0 # ============================================================================ def _rotate_half(x: torch.Tensor) -> torch.Tensor: - x1 = x[..., : x.shape[-1] // 2] - x2 = x[..., x.shape[-1] // 2 :] - return torch.cat((-x2, x1), dim=-1) + """Split the last dimension in half and negate-swap: [-x2, x1].""" + half = x.shape[-1] // 2 + first, second = x[..., :half], x[..., half:] + return torch.cat((-second, first), dim=-1) -def _apply_rotary_pos_emb_llm( - x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, unsqueeze_dim: int = 1 -) -> torch.Tensor: - cos = cos.unsqueeze(unsqueeze_dim) - sin = sin.unsqueeze(unsqueeze_dim) - return (x * cos) + (_rotate_half(x) * sin) +def _apply_rope(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: + """Apply rotary position embeddings to tensor x given precomputed cos/sin.""" + return (x * cos.unsqueeze(1)) + (_rotate_half(x) * sin.unsqueeze(1)) class LLMAdapterRotaryEmbedding(nn.Module): """Rotary position embedding for the LLM Adapter's attention layers.""" - def __init__(self, head_dim: int): + def __init__(self, head_dim: int, theta: float = 10000.0): super().__init__() - self.rope_theta = 10000 - inv_freq = 1.0 / (self.rope_theta ** (torch.arange(0, head_dim, 2, dtype=torch.int64).float() / head_dim)) + half_dim = head_dim // 2 + index = torch.arange(half_dim, dtype=torch.float32) + exponent = (2.0 / float(head_dim)) * index + inv_freq = torch.reciprocal( + torch.pow(torch.tensor(theta, dtype=torch.float32), exponent) + ) self.register_buffer("inv_freq", inv_freq, persistent=False) - @torch.no_grad() - def forward(self, x: torch.Tensor, position_ids: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) - position_ids_expanded = position_ids[:, None, :].float() - - device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" - with torch.autocast(device_type=device_type, enabled=False): - freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) - emb = torch.cat((freqs, freqs), dim=-1) - cos = emb.cos() - sin = emb.sin() - - return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + def forward( + self, x: torch.Tensor, position_ids: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + pos = position_ids.to(device=x.device, dtype=torch.float32) + inv = self.inv_freq.to(device=x.device, dtype=torch.float32) + freqs = torch.einsum("bl,d->bld", pos, inv) + emb = freqs.repeat(1, 1, 2) + return emb.cos().to(dtype=x.dtype), emb.sin().to(dtype=x.dtype) class LLMAdapterAttention(nn.Module): - """Attention for the LLM Adapter's transformer blocks. - - Supports both self-attention and cross-attention with separate rotary - position embeddings for query and key sequences. - """ + """Attention for the LLM Adapter with QK normalization and rotary position embeddings.""" def __init__(self, query_dim: int, context_dim: int, n_heads: int, head_dim: int): super().__init__() @@ -807,40 +802,47 @@ def __init__(self, query_dim: int, context_dim: int, n_heads: int, head_dim: int def forward( self, x: torch.Tensor, - mask: Optional[torch.Tensor] = None, + *, context: Optional[torch.Tensor] = None, - position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - position_embeddings_context: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + attn_mask: Optional[torch.Tensor] = None, + pos_q: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + pos_k: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, ) -> torch.Tensor: context = x if context is None else context - input_shape = x.shape[:-1] - q_shape = (*input_shape, self.n_heads, self.head_dim) - context_shape = context.shape[:-1] - kv_shape = (*context_shape, self.n_heads, self.head_dim) - query_states = self.q_norm(self.q_proj(x).view(q_shape)).transpose(1, 2) - key_states = self.k_norm(self.k_proj(context).view(kv_shape)).transpose(1, 2) - value_states = self.v_proj(context).view(kv_shape).transpose(1, 2) + q = ( + self.q_proj(x) + .view(x.shape[0], x.shape[1], self.n_heads, self.head_dim) + .transpose(1, 2) + ) + k = ( + self.k_proj(context) + .view(context.shape[0], context.shape[1], self.n_heads, self.head_dim) + .transpose(1, 2) + ) + v = ( + self.v_proj(context) + .view(context.shape[0], context.shape[1], self.n_heads, self.head_dim) + .transpose(1, 2) + ) + + q = self.q_norm(q) + k = self.k_norm(k) - if position_embeddings is not None: - assert position_embeddings_context is not None - cos, sin = position_embeddings - query_states = _apply_rotary_pos_emb_llm(query_states, cos, sin) - cos, sin = position_embeddings_context - key_states = _apply_rotary_pos_emb_llm(key_states, cos, sin) + if pos_q is not None and pos_k is not None: + q = _apply_rope(q, *pos_q) + k = _apply_rope(k, *pos_k) - attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=mask) - attn_output = attn_output.transpose(1, 2).reshape(*input_shape, -1).contiguous() - return self.o_proj(attn_output) + y = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask) + y = y.transpose(1, 2).reshape(x.shape[0], x.shape[1], -1).contiguous() + return self.o_proj(y) class LLMAdapterTransformerBlock(nn.Module): """Single transformer block in the LLM Adapter. - Each block contains: - - Optional self-attention on the target (T5 embedding) sequence - - Cross-attention: target queries attend to source (Qwen3) keys/values - - MLP with GELU activation + Each block contains self-attention, cross-attention, and MLP with + RMSNorm pre-normalization. """ def __init__( @@ -848,51 +850,46 @@ def __init__( source_dim: int, model_dim: int, num_heads: int = 16, - mlp_ratio: float = 4.0, - use_self_attn: bool = False, ): super().__init__() - self.use_self_attn = use_self_attn head_dim = model_dim // num_heads - if self.use_self_attn: - self.norm_self_attn = nn.RMSNorm(model_dim, eps=1e-6) - self.self_attn = LLMAdapterAttention(model_dim, model_dim, num_heads, head_dim) + self.norm_self_attn = nn.RMSNorm(model_dim, eps=1e-6) + self.self_attn = LLMAdapterAttention(model_dim, model_dim, num_heads, head_dim) self.norm_cross_attn = nn.RMSNorm(model_dim, eps=1e-6) self.cross_attn = LLMAdapterAttention(model_dim, source_dim, num_heads, head_dim) self.norm_mlp = nn.RMSNorm(model_dim, eps=1e-6) self.mlp = nn.Sequential( - nn.Linear(model_dim, int(model_dim * mlp_ratio)), + nn.Linear(model_dim, model_dim * 4), nn.GELU(), - nn.Linear(int(model_dim * mlp_ratio), model_dim), + nn.Linear(model_dim * 4, model_dim), ) def forward( self, x: torch.Tensor, + *, context: torch.Tensor, - target_attention_mask: Optional[torch.Tensor] = None, - source_attention_mask: Optional[torch.Tensor] = None, - position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - position_embeddings_context: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + target_mask: Optional[torch.Tensor] = None, + source_mask: Optional[torch.Tensor] = None, + pos_target: Tuple[torch.Tensor, torch.Tensor], + pos_source: Tuple[torch.Tensor, torch.Tensor], ) -> torch.Tensor: - if self.use_self_attn: - normed = self.norm_self_attn(x) - attn_out = self.self_attn( - normed, mask=target_attention_mask, - position_embeddings=position_embeddings, position_embeddings_context=position_embeddings, - ) - x = x + attn_out - - normed = self.norm_cross_attn(x) - attn_out = self.cross_attn( - normed, mask=source_attention_mask, context=context, - position_embeddings=position_embeddings, position_embeddings_context=position_embeddings_context, + x = x + self.self_attn( + self.norm_self_attn(x), + attn_mask=target_mask, + pos_q=pos_target, + pos_k=pos_target, + ) + x = x + self.cross_attn( + self.norm_cross_attn(x), + context=context, + attn_mask=source_mask, + pos_q=pos_target, + pos_k=pos_source, ) - x = x + attn_out - x = x + self.mlp(self.norm_mlp(x)) return x @@ -900,48 +897,32 @@ def forward( class LLMAdapter(nn.Module): """LLM Adapter: bridges Qwen3 hidden states and T5-XXL token embeddings. - This is the key custom component in Anima. It takes: - - source_hidden_states: Qwen3 0.6B hidden states (dim=1024) - - target_input_ids: T5-XXL token IDs - - And produces conditioning embeddings for the Cosmos DiT via: - 1. Embedding T5 token IDs via learned Embedding(32128, 1024) - 2. Cross-attending T5 embeddings to Qwen3 hidden states through 6 transformer layers - 3. Projecting and normalizing the output - - The output is zero-padded to 512 tokens for the DiT cross-attention. + Takes Qwen3 hidden states and T5-XXL token IDs, produces conditioning + embeddings for the Cosmos DiT via cross-attention through 6 transformer layers. Args: - source_dim: Dimension of source (Qwen3) hidden states. - target_dim: Dimension of target (T5) embeddings. - model_dim: Internal model dimension. + vocab_size: Size of the T5 token vocabulary. + dim: Model dimension (used for embeddings, projections, and all layers). num_layers: Number of transformer layers. num_heads: Number of attention heads. - use_self_attn: Whether to use self-attention in transformer blocks. """ def __init__( self, - source_dim: int = 1024, - target_dim: int = 1024, - model_dim: int = 1024, + vocab_size: int = 32128, + dim: int = 1024, num_layers: int = 6, num_heads: int = 16, - use_self_attn: bool = True, ): super().__init__() - self.embed = nn.Embedding(32128, target_dim) - if model_dim != target_dim: - self.in_proj = nn.Linear(target_dim, model_dim) - else: - self.in_proj = nn.Identity() - self.rotary_emb = LLMAdapterRotaryEmbedding(model_dim // num_heads) + self.embed = nn.Embedding(vocab_size, dim) self.blocks = nn.ModuleList([ - LLMAdapterTransformerBlock(source_dim, model_dim, num_heads=num_heads, use_self_attn=use_self_attn) + LLMAdapterTransformerBlock(source_dim=dim, model_dim=dim, num_heads=num_heads) for _ in range(num_layers) ]) - self.out_proj = nn.Linear(model_dim, target_dim) - self.norm = nn.RMSNorm(target_dim, eps=1e-6) + self.out_proj = nn.Linear(dim, dim) + self.norm = nn.RMSNorm(dim, eps=1e-6) + self.rotary_emb = LLMAdapterRotaryEmbedding(dim // num_heads) def forward( self, @@ -950,39 +931,42 @@ def forward( target_attention_mask: Optional[torch.Tensor] = None, source_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: + # Expand attention masks for multi-head attention if target_attention_mask is not None: target_attention_mask = target_attention_mask.to(torch.bool) if target_attention_mask.ndim == 2: - target_attention_mask = target_attention_mask.unsqueeze(1).unsqueeze(1) + target_attention_mask = target_attention_mask[:, None, None, :] if source_attention_mask is not None: source_attention_mask = source_attention_mask.to(torch.bool) if source_attention_mask.ndim == 2: - source_attention_mask = source_attention_mask.unsqueeze(1).unsqueeze(1) + source_attention_mask = source_attention_mask[:, None, None, :] context = source_hidden_states - # Standard nn.Embedding doesn't support out_dtype; cast after forward - x = self.in_proj(self.embed(target_input_ids).to(dtype=context.dtype)) + x = self.embed(target_input_ids).to(dtype=context.dtype) - position_ids = torch.arange(x.shape[1], device=x.device).unsqueeze(0) - position_ids_context = torch.arange(context.shape[1], device=x.device).unsqueeze(0) - position_embeddings = self.rotary_emb(x, position_ids) - position_embeddings_context = self.rotary_emb(x, position_ids_context) + # Build position IDs and compute rotary embeddings + target_pos_ids = torch.arange(x.shape[1], device=x.device, dtype=torch.long).unsqueeze(0) + source_pos_ids = torch.arange(context.shape[1], device=x.device, dtype=torch.long).unsqueeze(0) + pos_target = self.rotary_emb(x, target_pos_ids) + pos_source = self.rotary_emb(x, source_pos_ids) for block in self.blocks: x = block( - x, context, - target_attention_mask=target_attention_mask, - source_attention_mask=source_attention_mask, - position_embeddings=position_embeddings, - position_embeddings_context=position_embeddings_context, + x, + context=context, + target_mask=target_attention_mask, + source_mask=source_attention_mask, + pos_target=pos_target, + pos_source=pos_source, ) return self.norm(self.out_proj(x)) # ============================================================================ # Anima: MiniTrainDIT + LLMAdapter -# Source: https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/ldm/anima/model.py +# Reference implementation: https://github.com/hdae/diffusers-anima +# SPDX-License-Identifier: Apache-2.0 # ============================================================================ @@ -991,18 +975,6 @@ class AnimaTransformer(MiniTrainDIT): Extends MiniTrainDIT by adding the LLMAdapter component that preprocesses text embeddings before they are fed to the DiT cross-attention layers. - - The forward pass: - 1. Runs the LLM Adapter to produce conditioning from Qwen3 hidden states + T5 token IDs - 2. Zero-pads the conditioning to 512 tokens - 3. Passes the conditioning to MiniTrainDIT's cross-attention - - Default configuration for Anima: - - model_channels=2048, num_blocks=28, num_heads=16 - - crossattn_emb_channels=1024, patch_spatial=2, patch_temporal=1 - - in_channels=16, out_channels=16 - - use_adaln_lora=True, adaln_lora_dim=256 - - extra_per_block_abs_pos_emb=True """ def __init__(self, *args, **kwargs): @@ -1025,15 +997,14 @@ def preprocess_text_embeds( Returns: Conditioning tensor. Shape: (batch, 512, 1024), zero-padded if needed. """ - if text_ids is not None: - out = self.llm_adapter(text_embeds, text_ids) - if t5xxl_weights is not None: - out = out * t5xxl_weights - if out.shape[1] < 512: - out = F.pad(out, (0, 0, 0, 512 - out.shape[1])) - return out - else: + if text_ids is None: return text_embeds + out = self.llm_adapter(text_embeds, text_ids) + if t5xxl_weights is not None: + out = out * t5xxl_weights + if out.shape[1] < 512: + out = F.pad(out, (0, 0, 0, 512 - out.shape[1])) + return out def forward( self, From 53d6f8954174d0a0140e09459335a966799e11de Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 13 Mar 2026 17:06:53 -0400 Subject: [PATCH 11/14] fix an additional lora type --- .../lora_conversions/anima_lora_conversion_utils.py | 11 ++++++++++- .../test_anima_lora_conversion_utils.py | 8 ++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py index 2c9800cab33..ab9f8500027 100644 --- a/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py +++ b/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py @@ -60,10 +60,12 @@ def is_state_dict_likely_anima_lora(state_dict: dict[str | int, torch.Tensor]) - ("cross_attn_q_proj", "cross_attn.q_proj"), ("cross_attn_v_proj", "cross_attn.v_proj"), ("cross_attn_output_proj", "cross_attn.output_proj"), + ("cross_attn_o_proj", "cross_attn.o_proj"), ("self_attn_k_proj", "self_attn.k_proj"), ("self_attn_q_proj", "self_attn.q_proj"), ("self_attn_v_proj", "self_attn.v_proj"), ("self_attn_output_proj", "self_attn.output_proj"), + ("self_attn_o_proj", "self_attn.o_proj"), ("mlp_layer1", "mlp.layer1"), ("mlp_layer2", "mlp.layer2"), ] @@ -84,11 +86,18 @@ def _convert_kohya_unet_key(kohya_layer_name: str) -> str: """Convert a Kohya-style LoRA layer name to a model parameter path. Example: lora_unet_blocks_0_cross_attn_k_proj -> blocks.0.cross_attn.k_proj + Example: lora_unet_llm_adapter_blocks_0_cross_attn_k_proj -> llm_adapter.blocks.0.cross_attn.k_proj """ key = kohya_layer_name if key.startswith("lora_unet_"): key = key[len("lora_unet_"):] + # Handle llm_adapter prefix: strip it, run the standard block conversion, then re-add with dot + llm_adapter_prefix = "" + if key.startswith("llm_adapter_"): + key = key[len("llm_adapter_"):] + llm_adapter_prefix = "llm_adapter." + # Convert blocks_N_ to blocks.N. key = re.sub(r"^blocks_(\d+)_", r"blocks.\1.", key) @@ -98,7 +107,7 @@ def _convert_kohya_unet_key(kohya_layer_name: str) -> str: key = key.replace(old, new) break - return key + return llm_adapter_prefix + key def _convert_kohya_te_key(kohya_layer_name: str) -> str: diff --git a/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py b/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py index 10666619d8d..e911e7d738e 100644 --- a/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py +++ b/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py @@ -78,6 +78,14 @@ def test_is_state_dict_likely_anima_lora_false_for_random(): ("lora_unet_blocks_0_adaln_modulation_cross_attn_1", "blocks.0.adaln_modulation_cross_attn.1"), ("lora_unet_blocks_0_adaln_modulation_self_attn_1", "blocks.0.adaln_modulation_self_attn.1"), ("lora_unet_blocks_0_adaln_modulation_mlp_1", "blocks.0.adaln_modulation_mlp.1"), + # LLM Adapter keys + ("lora_unet_llm_adapter_blocks_0_cross_attn_k_proj", "llm_adapter.blocks.0.cross_attn.k_proj"), + ("lora_unet_llm_adapter_blocks_0_cross_attn_q_proj", "llm_adapter.blocks.0.cross_attn.q_proj"), + ("lora_unet_llm_adapter_blocks_0_cross_attn_v_proj", "llm_adapter.blocks.0.cross_attn.v_proj"), + ("lora_unet_llm_adapter_blocks_0_self_attn_k_proj", "llm_adapter.blocks.0.self_attn.k_proj"), + ("lora_unet_llm_adapter_blocks_0_self_attn_q_proj", "llm_adapter.blocks.0.self_attn.q_proj"), + ("lora_unet_llm_adapter_blocks_0_self_attn_v_proj", "llm_adapter.blocks.0.self_attn.v_proj"), + ("lora_unet_llm_adapter_blocks_5_cross_attn_k_proj", "llm_adapter.blocks.5.cross_attn.k_proj"), ], ) def test_convert_kohya_unet_key(kohya_key: str, expected: str): From 1928e257b27cf21890fc421a602d687e2cfe8270 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 17 Mar 2026 00:48:58 -0400 Subject: [PATCH 12/14] some adjustments to follow flux 2 paper implementation --- invokeai/app/invocations/anima_denoise.py | 33 ++++++++-------- .../app/invocations/anima_latents_to_image.py | 3 +- .../app/invocations/anima_text_encoder.py | 6 +-- tests/app/invocations/test_anima_denoise.py | 38 +++++++++---------- 4 files changed, 41 insertions(+), 39 deletions(-) diff --git a/invokeai/app/invocations/anima_denoise.py b/invokeai/app/invocations/anima_denoise.py index 0cf9586e29a..a284b5dcd4c 100644 --- a/invokeai/app/invocations/anima_denoise.py +++ b/invokeai/app/invocations/anima_denoise.py @@ -1,8 +1,8 @@ """Anima denoising invocation. Implements the rectified flow denoising loop for Anima models: -- CONST model type: denoised = input - output * sigma -- Fixed shift=3.0 via time_snr_shift (same formula as Flux) +- Direct prediction: denoised = input - output * sigma +- Fixed shift=3.0 via loglinear_timestep_shift (Flux paper by Black Forest Labs) - Timestep convention: timestep = sigma * 1.0 (raw sigma, NOT 1-sigma like Z-Image) - NO v-prediction negation (unlike Z-Image) - 3D latent space: [B, C, T, H, W] with T=1 for images @@ -11,7 +11,7 @@ Key differences from Z-Image denoise: - Anima uses fixed shift=3.0, Z-Image uses dynamic shift based on resolution - Anima: timestep = sigma (raw), Z-Image: model_t = 1.0 - sigma -- Anima: noise_pred = model_output (CONST), Z-Image: noise_pred = -model_output (v-pred) +- Anima: noise_pred = model_output (direct), Z-Image: noise_pred = -model_output (v-pred) - Anima transformer takes (x, timesteps, context, t5xxl_ids, t5xxl_weights) - Anima uses 3D latents directly, Z-Image converts 4D -> list of 5D """ @@ -61,18 +61,21 @@ ANIMA_LATENT_CHANNELS = 16 # Anima uses fixed shift=3.0 for the rectified flow schedule ANIMA_SHIFT = 3.0 -# Anima uses multiplier=1.0 (raw sigma values as timesteps, per ComfyUI config) +# Anima uses raw sigma values as timesteps (no rescaling) ANIMA_MULTIPLIER = 1.0 -def time_snr_shift(alpha: float, t: float) -> float: - """Apply time-SNR shift to a timestep value. +def loglinear_timestep_shift(alpha: float, t: float) -> float: + """Apply log-linear timestep shift to a noise schedule value. - This is the same formula used by Flux and ComfyUI's ModelSamplingDiscreteFlow. - With alpha=3.0, this shifts the noise schedule to spend more time at higher noise levels. + This shift biases the noise schedule toward higher noise levels, as described + in the Flux model (Black Forest Labs, 2024). With alpha > 1, the model spends + proportionally more denoising steps at higher noise levels. + + Formula: sigma = alpha * t / (1 + (alpha - 1) * t) Args: - alpha: Shift factor (3.0 for Anima). + alpha: Shift factor (3.0 for Anima, resolution-dependent for Flux). t: Timestep value in [0, 1]. Returns: @@ -83,10 +86,10 @@ def time_snr_shift(alpha: float, t: float) -> float: return alpha * t / (1 + (alpha - 1) * t) -def inverse_time_snr_shift(alpha: float, sigma: float) -> float: +def inverse_loglinear_timestep_shift(alpha: float, sigma: float) -> float: """Recover linear t from a shifted sigma value. - Inverse of time_snr_shift: given sigma = alpha * t / (1 + (alpha-1) * t), + Inverse of loglinear_timestep_shift: given sigma = alpha * t / (1 + (alpha-1) * t), solve for t = sigma / (alpha - (alpha-1) * sigma). This is needed for the inpainting extension, which expects linear t values @@ -146,7 +149,7 @@ def merge_intermediate_latents_with_init_latents( """ # Recover linear t from shifted sigma for gradient mask thresholding. # This ensures the gradient mask is revealed at the correct pace. - t_prev = inverse_time_snr_shift(self._shift, sigma_prev) + t_prev = inverse_loglinear_timestep_shift(self._shift, sigma_prev) mask = self._apply_mask_gradient_adjustment(t_prev) # Use shifted sigma for noise mixing to match the denoiser's noise level. @@ -265,8 +268,8 @@ def _get_noise( def _get_sigmas(self, num_steps: int) -> list[float]: """Generate sigma schedule with fixed shift=3.0. - Uses the same time_snr_shift formula as Flux/ComfyUI but with - a fixed shift factor of 3.0 (no dynamic resolution-based shift). + Uses the log-linear timestep shift from the Flux model (Black Forest Labs) + with a fixed shift factor of 3.0 (no dynamic resolution-based shift). Returns: List of num_steps + 1 sigma values from ~1.0 (noise) to 0.0 (clean). @@ -274,7 +277,7 @@ def _get_sigmas(self, num_steps: int) -> list[float]: sigmas = [] for i in range(num_steps + 1): t = 1.0 - i / num_steps - sigma = time_snr_shift(ANIMA_SHIFT, t) + sigma = loglinear_timestep_shift(ANIMA_SHIFT, t) sigmas.append(sigma) return sigmas diff --git a/invokeai/app/invocations/anima_latents_to_image.py b/invokeai/app/invocations/anima_latents_to_image.py index 7eb03ebd766..4ea3fac9319 100644 --- a/invokeai/app/invocations/anima_latents_to_image.py +++ b/invokeai/app/invocations/anima_latents_to_image.py @@ -5,8 +5,7 @@ Latents from the denoiser are in normalized space (zero-centered). Before VAE decode, they must be denormalized using the Wan 2.1 per-channel -mean/std: latents = latents * std + mean (matching diffusers WanPipeline -and ComfyUI's Wan21 latent_format.process_out). +mean/std: latents = latents * std + mean (matching diffusers WanPipeline). The VAE expects 5D latents [B, C, T, H, W] — for single images, T=1. """ diff --git a/invokeai/app/invocations/anima_text_encoder.py b/invokeai/app/invocations/anima_text_encoder.py index 1e0730346b6..4b9add1c947 100644 --- a/invokeai/app/invocations/anima_text_encoder.py +++ b/invokeai/app/invocations/anima_text_encoder.py @@ -10,7 +10,7 @@ Key differences from Z-Image text encoder: - Anima uses Qwen3 0.6B (base model, NOT instruct) — no chat template - Anima additionally tokenizes with T5-XXL tokenizer to get token IDs -- Qwen3 output includes all positions (including padding) to match ComfyUI +- Qwen3 output uses all positions (including padding) for full context """ from contextlib import ExitStack @@ -106,7 +106,7 @@ def _encode_prompt( Returns: Tuple of (qwen3_embeds, t5xxl_ids, t5xxl_weights). - qwen3_embeds: Shape (max_seq_len, 1024) — includes all positions (including padding) - to match ComfyUI's SDClipModel behavior. + to preserve full sequence context for the LLM Adapter. - t5xxl_ids: Shape (seq_len,) — T5-XXL token IDs (unpadded). - t5xxl_weights: None (uniform weights for now). """ @@ -166,7 +166,7 @@ def _encode_prompt( text_input_ids = torch.tensor([[pad_id]]) attention_mask = torch.tensor([[1]]) - # Get last hidden state from Qwen3 (ComfyUI uses layer="last") + # Get last hidden state from Qwen3 (final layer output) prompt_mask = attention_mask.to(device).bool() outputs = text_encoder( text_input_ids.to(device), diff --git a/tests/app/invocations/test_anima_denoise.py b/tests/app/invocations/test_anima_denoise.py index 4a1e134421c..733b33f75ce 100644 --- a/tests/app/invocations/test_anima_denoise.py +++ b/tests/app/invocations/test_anima_denoise.py @@ -3,31 +3,31 @@ from invokeai.app.invocations.anima_denoise import ( ANIMA_SHIFT, AnimaDenoiseInvocation, - inverse_time_snr_shift, - time_snr_shift, + inverse_loglinear_timestep_shift, + loglinear_timestep_shift, ) -class TestTimeSnrShift: - """Test the time-SNR shift function used for Anima's noise schedule.""" +class TestLoglinearTimestepShift: + """Test the log-linear timestep shift function used for Anima's noise schedule.""" def test_shift_1_is_identity(self): - """With alpha=1.0, time_snr_shift should be identity.""" + """With alpha=1.0, shift should be identity.""" for t in [0.0, 0.25, 0.5, 0.75, 1.0]: - assert time_snr_shift(1.0, t) == t + assert loglinear_timestep_shift(1.0, t) == t def test_shift_at_zero(self): """At t=0, shifted sigma should be 0 regardless of alpha.""" - assert time_snr_shift(3.0, 0.0) == 0.0 + assert loglinear_timestep_shift(3.0, 0.0) == 0.0 def test_shift_at_one(self): """At t=1, shifted sigma should be 1 regardless of alpha.""" - assert time_snr_shift(3.0, 1.0) == pytest.approx(1.0) + assert loglinear_timestep_shift(3.0, 1.0) == pytest.approx(1.0) def test_shift_3_increases_sigma(self): """With alpha=3.0, sigma should be larger than t (spends more time at high noise).""" for t in [0.1, 0.25, 0.5, 0.75, 0.9]: - sigma = time_snr_shift(3.0, t) + sigma = loglinear_timestep_shift(3.0, t) assert sigma > t, f"At t={t}, sigma={sigma} should be > t" def test_shift_monotonic(self): @@ -35,35 +35,35 @@ def test_shift_monotonic(self): prev = 0.0 for i in range(1, 101): t = i / 100.0 - sigma = time_snr_shift(3.0, t) + sigma = loglinear_timestep_shift(3.0, t) assert sigma > prev, f"Not monotonic at t={t}" prev = sigma def test_known_value(self): """Test a known value: at t=0.5, alpha=3.0, sigma = 3*0.5 / (1 + 2*0.5) = 0.75.""" - assert time_snr_shift(3.0, 0.5) == pytest.approx(0.75) + assert loglinear_timestep_shift(3.0, 0.5) == pytest.approx(0.75) -class TestInverseTimeSnrShift: - """Test the inverse time-SNR shift (used for inpainting mask correction).""" +class TestInverseLoglinearTimestepShift: + """Test the inverse log-linear timestep shift (used for inpainting mask correction).""" def test_inverse_shift_1_is_identity(self): """With alpha=1.0, inverse should be identity.""" for sigma in [0.0, 0.25, 0.5, 0.75, 1.0]: - assert inverse_time_snr_shift(1.0, sigma) == sigma + assert inverse_loglinear_timestep_shift(1.0, sigma) == sigma def test_roundtrip(self): """shift(inverse(sigma)) should recover sigma, and inverse(shift(t)) should recover t.""" for t in [0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0]: - sigma = time_snr_shift(3.0, t) - recovered_t = inverse_time_snr_shift(3.0, sigma) + sigma = loglinear_timestep_shift(3.0, t) + recovered_t = inverse_loglinear_timestep_shift(3.0, sigma) assert recovered_t == pytest.approx(t, abs=1e-7), ( f"Roundtrip failed: t={t} -> sigma={sigma} -> recovered_t={recovered_t}" ) def test_known_value(self): """At sigma=0.75, alpha=3.0, t should be 0.5 (inverse of the known shift value).""" - assert inverse_time_snr_shift(3.0, 0.75) == pytest.approx(0.5) + assert inverse_loglinear_timestep_shift(3.0, 0.75) == pytest.approx(0.5) class TestGetSigmas: @@ -85,7 +85,7 @@ def test_schedule_endpoints(self): transformer=None, # type: ignore ) sigmas = inv._get_sigmas(30) - assert sigmas[0] == pytest.approx(time_snr_shift(ANIMA_SHIFT, 1.0)) + assert sigmas[0] == pytest.approx(loglinear_timestep_shift(ANIMA_SHIFT, 1.0)) assert sigmas[-1] == pytest.approx(0.0) def test_schedule_monotonically_decreasing(self): @@ -106,4 +106,4 @@ def test_schedule_uses_shift(self): ) sigmas = inv._get_sigmas(10) # At step 5/10, linear t = 0.5, shifted sigma should be 0.75 - assert sigmas[5] == pytest.approx(time_snr_shift(3.0, 0.5)) + assert sigmas[5] == pytest.approx(loglinear_timestep_shift(3.0, 0.5)) From 0b8765ce8170dc1a39d2e307d557bfe3322e1444 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 17 Mar 2026 02:55:08 -0400 Subject: [PATCH 13/14] use t5 from model manager instead of downloading --- .../app/invocations/anima_model_loader.py | 27 +++++++++- .../app/invocations/anima_text_encoder.py | 33 ++++++------ .../backend/model_manager/starter_models.py | 3 +- invokeai/frontend/web/public/locales/en.json | 3 ++ .../listeners/modelSelected.ts | 21 +++++++- .../controlLayers/store/paramsSlice.ts | 10 ++++ .../src/features/controlLayers/store/types.ts | 2 + .../util/graph/generation/buildAnimaGraph.ts | 6 +++ .../Advanced/ParamAnimaModelSelect.tsx | 53 +++++++++++++++++-- .../web/src/features/queue/store/readiness.ts | 6 +++ .../src/services/api/hooks/modelsByType.ts | 1 + .../frontend/web/src/services/api/schema.ts | 18 +++++++ 12 files changed, 161 insertions(+), 22 deletions(-) diff --git a/invokeai/app/invocations/anima_model_loader.py b/invokeai/app/invocations/anima_model_loader.py index 3c134631805..01eb588624e 100644 --- a/invokeai/app/invocations/anima_model_loader.py +++ b/invokeai/app/invocations/anima_model_loader.py @@ -11,10 +11,15 @@ from invokeai.app.invocations.model import ( ModelIdentifierField, Qwen3EncoderField, + T5EncoderField, TransformerField, VAEField, ) from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.app.util.t5_model_identifier import ( + preprocess_t5_encoder_model_identifier, + preprocess_t5_tokenizer_model_identifier, +) from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType @@ -25,6 +30,7 @@ class AnimaModelLoaderOutput(BaseInvocationOutput): transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer") qwen3_encoder: Qwen3EncoderField = OutputField(description=FieldDescriptions.qwen3_encoder, title="Qwen3 Encoder") vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE") + t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder") @invocation( @@ -32,7 +38,7 @@ class AnimaModelLoaderOutput(BaseInvocationOutput): title="Main Model - Anima", tags=["model", "anima"], category="model", - version="1.0.0", + version="1.2.0", classification=Classification.Prototype, ) class AnimaModelLoaderInvocation(BaseInvocation): @@ -42,6 +48,7 @@ class AnimaModelLoaderInvocation(BaseInvocation): - Transformer: Cosmos Predict2 DiT + LLM Adapter (from single-file checkpoint) - Qwen3 Encoder: Qwen3 0.6B (standalone single-file) - VAE: AutoencoderKLQwenImage / Wan 2.1 VAE (standalone single-file or FLUX VAE) + - T5 Encoder: T5-XXL model (only the tokenizer submodel is used, for LLM Adapter token IDs) """ model: ModelIdentifierField = InputField( @@ -69,6 +76,14 @@ class AnimaModelLoaderInvocation(BaseInvocation): title="Qwen3 Encoder", ) + t5_encoder_model: Optional[ModelIdentifierField] = InputField( + default=None, + description="T5-XXL encoder model. The tokenizer submodel is used for Anima text encoding.", + input=Input.Direct, + ui_model_type=ModelType.T5Encoder, + title="T5 Encoder", + ) + def invoke(self, context: InvocationContext) -> AnimaModelLoaderOutput: # Transformer always comes from the main model transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer}) @@ -91,8 +106,18 @@ def invoke(self, context: InvocationContext) -> AnimaModelLoaderOutput: "No Qwen3 Encoder source provided. Set 'Qwen3 Encoder' to a Qwen3 0.6B model." ) + # T5 Encoder (only tokenizer submodel is used by Anima) + if self.t5_encoder_model is not None: + t5_tokenizer = preprocess_t5_tokenizer_model_identifier(self.t5_encoder_model) + t5_encoder = preprocess_t5_encoder_model_identifier(self.t5_encoder_model) + else: + raise ValueError( + "No T5 Encoder source provided. Set 'T5 Encoder' to a T5-XXL encoder model." + ) + return AnimaModelLoaderOutput( transformer=TransformerField(transformer=transformer, loras=[]), qwen3_encoder=Qwen3EncoderField(tokenizer=qwen3_tokenizer, text_encoder=qwen3_encoder), vae=VAEField(vae=vae), + t5_encoder=T5EncoderField(tokenizer=t5_tokenizer, text_encoder=t5_encoder, loras=[]), ) diff --git a/invokeai/app/invocations/anima_text_encoder.py b/invokeai/app/invocations/anima_text_encoder.py index 4b9add1c947..b8743ae6b37 100644 --- a/invokeai/app/invocations/anima_text_encoder.py +++ b/invokeai/app/invocations/anima_text_encoder.py @@ -17,7 +17,7 @@ from typing import Iterator, Tuple import torch -from transformers import PreTrainedModel, PreTrainedTokenizerBase, T5TokenizerFast +from transformers import PreTrainedModel, PreTrainedTokenizerBase from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation from invokeai.app.invocations.fields import ( @@ -28,7 +28,7 @@ TensorField, UIComponent, ) -from invokeai.app.invocations.model import Qwen3EncoderField +from invokeai.app.invocations.model import Qwen3EncoderField, T5EncoderField from invokeai.app.invocations.primitives import AnimaConditioningOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.patches.layer_patcher import LayerPatcher @@ -43,16 +43,13 @@ # T5-XXL max sequence length for token IDs T5_MAX_SEQ_LEN = 512 -# T5-XXL tokenizer source (same vocabulary regardless of T5 model variant) -T5_TOKENIZER_NAME = "google/t5-v1_1-xxl" - @invocation( "anima_text_encoder", title="Prompt - Anima", tags=["prompt", "conditioning", "anima"], category="conditioning", - version="1.1.0", + version="1.3.0", classification=Classification.Prototype, ) class AnimaTextEncoderInvocation(BaseInvocation): @@ -69,6 +66,11 @@ class AnimaTextEncoderInvocation(BaseInvocation): description=FieldDescriptions.qwen3_encoder, input=Input.Connection, ) + t5_encoder: T5EncoderField = InputField( + title="T5 Encoder", + description=FieldDescriptions.t5_encoder, + input=Input.Connection, + ) mask: TensorField | None = InputField( default=None, description="A mask defining the region that this conditioning prompt applies to.", @@ -184,15 +186,16 @@ def _encode_prompt( # --- Step 2: Tokenize with T5-XXL tokenizer (IDs only, no model) --- context.util.signal_progress("Tokenizing with T5-XXL") - t5_tokenizer = T5TokenizerFast.from_pretrained(T5_TOKENIZER_NAME) - t5_tokens = t5_tokenizer( - prompt, - padding=False, - truncation=True, - max_length=T5_MAX_SEQ_LEN, - return_tensors="pt", - ) - t5xxl_ids = t5_tokens.input_ids[0] # Shape: (seq_len,) + t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer) + with t5_tokenizer_info.model_on_device() as (_, t5_tokenizer): + t5_tokens = t5_tokenizer( + prompt, + padding=False, + truncation=True, + max_length=T5_MAX_SEQ_LEN, + return_tensors="pt", + ) + t5xxl_ids = t5_tokens.input_ids[0] # Shape: (seq_len,) return qwen3_embeds, t5xxl_ids, None diff --git a/invokeai/backend/model_manager/starter_models.py b/invokeai/backend/model_manager/starter_models.py index f1520563bf1..3fcca7f034f 100644 --- a/invokeai/backend/model_manager/starter_models.py +++ b/invokeai/backend/model_manager/starter_models.py @@ -888,7 +888,7 @@ class StarterModelBundle(BaseModel): description="Anima Preview 2 - 2B parameter anime-focused text-to-image model built on Cosmos Predict2 DiT. ~4.5GB", type=ModelType.Main, format=ModelFormat.Checkpoint, - dependencies=[anima_qwen3_encoder, anima_vae], + dependencies=[anima_qwen3_encoder, anima_vae, t5_base_encoder], ) # endregion @@ -1062,6 +1062,7 @@ class StarterModelBundle(BaseModel): anima_preview2, anima_qwen3_encoder, anima_vae, + t5_base_encoder, ] STARTER_BUNDLES: dict[str, StarterModelBundle] = { diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 8c75043b027..605bdc30b0d 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1201,6 +1201,8 @@ "animaVaePlaceholder": "Select Anima-compatible VAE", "animaQwen3Encoder": "Qwen3 0.6B Encoder", "animaQwen3EncoderPlaceholder": "Select Qwen3 0.6B encoder", + "animaT5Encoder": "T5-XXL Encoder", + "animaT5EncoderPlaceholder": "Select T5-XXL encoder", "zImageVae": "VAE (optional)", "zImageVaePlaceholder": "From VAE source model", "zImageQwen3Encoder": "Qwen3 Encoder (optional)", @@ -1505,6 +1507,7 @@ "noZImageQwen3EncoderSourceSelected": "No Qwen3 Encoder source: Select Qwen3 Encoder or Qwen3 Source model", "noAnimaVaeModelSelected": "No Anima VAE model selected", "noAnimaQwen3EncoderModelSelected": "No Anima Qwen3 Encoder model selected", + "noAnimaT5EncoderModelSelected": "No Anima T5 Encoder model selected", "fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bbox width is {{width}}", "fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bbox height is {{height}}", "fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox width is {{width}}", diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts index 2d431f86939..52b567fdebe 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts @@ -5,6 +5,7 @@ import { buildSelectIsStaging, selectCanvasSessionId } from 'features/controlLay import { loraIsEnabledChanged } from 'features/controlLayers/store/lorasSlice'; import { animaQwen3EncoderModelSelected, + animaT5EncoderModelSelected, animaVaeModelSelected, kleinQwen3EncoderModelSelected, kleinVaeModelSelected, @@ -46,6 +47,7 @@ import { selectGlobalRefImageModels, selectQwen3EncoderModels, selectRegionalRefImageModels, + selectT5EncoderModels, selectZImageDiffusersModels, } from 'services/api/hooks/modelsByType'; import type { FLUXKontextModelConfig, FLUXReduxModelConfig, IPAdapterModelConfig } from 'services/api/types'; @@ -159,7 +161,7 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) = } // handle incompatible Anima models - clear if switching away from anima - const { animaVaeModel, animaQwen3EncoderModel } = state.params; + const { animaVaeModel, animaQwen3EncoderModel, animaT5EncoderModel } = state.params; if (newBase !== 'anima') { if (animaVaeModel) { dispatch(animaVaeModelSelected(null)); @@ -169,13 +171,18 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) = dispatch(animaQwen3EncoderModelSelected(null)); modelsUpdatedDisabledOrCleared += 1; } + if (animaT5EncoderModel) { + dispatch(animaT5EncoderModelSelected(null)); + modelsUpdatedDisabledOrCleared += 1; + } } else { // Switching to Anima - set defaults if no valid configuration exists - const hasValidConfig = animaVaeModel && animaQwen3EncoderModel; + const hasValidConfig = animaVaeModel && animaQwen3EncoderModel && animaT5EncoderModel; if (!hasValidConfig) { const availableQwen3Encoders = selectQwen3EncoderModels(state); const availableAnimaVAEs = selectAnimaVAEModels(state); + const availableT5Encoders = selectT5EncoderModels(state); if (availableQwen3Encoders.length > 0 && availableAnimaVAEs.length > 0) { const qwen3Encoder = availableQwen3Encoders[0]; @@ -201,6 +208,16 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) = }) ); } + const t5Encoder = availableT5Encoders[0]; + if (t5Encoder && !animaT5EncoderModel) { + dispatch( + animaT5EncoderModelSelected({ + key: t5Encoder.key, + name: t5Encoder.name, + base: t5Encoder.base, + }) + ); + } } } } diff --git a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts index 373325c9b84..7e0c7a5029e 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts @@ -228,6 +228,13 @@ const slice = createSlice({ } state.animaQwen3EncoderModel = result.data; }, + animaT5EncoderModelSelected: (state, action: PayloadAction<{ key: string; name: string; base: string } | null>) => { + const result = zParamsState.shape.animaT5EncoderModel.safeParse(action.payload); + if (!result.success) { + return; + } + state.animaT5EncoderModel = result.data; + }, setAnimaScheduler: (state, action: PayloadAction<'euler' | 'heun' | 'lcm'>) => { state.animaScheduler = action.payload; }, @@ -502,6 +509,7 @@ const resetState = (state: ParamsState): ParamsState => { newState.zImageQwen3SourceModel = oldState.zImageQwen3SourceModel; newState.animaVaeModel = oldState.animaVaeModel; newState.animaQwen3EncoderModel = oldState.animaQwen3EncoderModel; + newState.animaT5EncoderModel = oldState.animaT5EncoderModel; newState.kleinVaeModel = oldState.kleinVaeModel; newState.kleinQwen3EncoderModel = oldState.kleinQwen3EncoderModel; return newState; @@ -580,6 +588,7 @@ export const { paramsReset, animaVaeModelSelected, animaQwen3EncoderModelSelected, + animaT5EncoderModelSelected, setAnimaScheduler, } = slice.actions; @@ -642,6 +651,7 @@ export const selectZImageQwen3EncoderModel = createParamsSelector((params) => pa export const selectZImageQwen3SourceModel = createParamsSelector((params) => params.zImageQwen3SourceModel); export const selectAnimaVaeModel = createParamsSelector((params) => params.animaVaeModel); export const selectAnimaQwen3EncoderModel = createParamsSelector((params) => params.animaQwen3EncoderModel); +export const selectAnimaT5EncoderModel = createParamsSelector((params) => params.animaT5EncoderModel); export const selectAnimaScheduler = createParamsSelector((params) => params.animaScheduler); export const selectKleinVaeModel = createParamsSelector((params) => params.kleinVaeModel); export const selectKleinQwen3EncoderModel = createParamsSelector((params) => params.kleinQwen3EncoderModel); diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts index 80ac3f978e8..369abbcf3da 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts @@ -753,6 +753,7 @@ export const zParamsState = z.object({ // Anima model components - uses Qwen3 0.6B + T5-XXL tokenizer + QwenImage VAE animaVaeModel: zParameterVAEModel.nullable(), // Optional: Separate QwenImage/FLUX VAE for Anima animaQwen3EncoderModel: zModelIdentifierField.nullable(), // Optional: Separate Qwen3 0.6B Encoder for Anima + animaT5EncoderModel: zModelIdentifierField.nullable(), // T5-XXL tokenizer for Anima LLM Adapter animaScheduler: z.enum(['euler', 'heun', 'lcm']).default('euler'), // Flux2 Klein model components - uses Qwen3 instead of CLIP+T5 kleinVaeModel: zParameterVAEModel.nullable(), // Optional: Separate FLUX.2 VAE for Klein @@ -821,6 +822,7 @@ export const getInitialParamsState = (): ParamsState => ({ zImageQwen3SourceModel: null, animaVaeModel: null, animaQwen3EncoderModel: null, + animaT5EncoderModel: null, animaScheduler: 'euler', kleinVaeModel: null, kleinQwen3EncoderModel: null, diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts index e180dc8289a..b5ae21919d5 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildAnimaGraph.ts @@ -3,6 +3,7 @@ import { getPrefixedId } from 'features/controlLayers/konva/util'; import { selectAnimaQwen3EncoderModel, selectAnimaScheduler, + selectAnimaT5EncoderModel, selectAnimaVaeModel, selectMainModelConfig, selectParamsSlice, @@ -40,6 +41,7 @@ export const buildAnimaGraph = async (arg: GraphBuilderArg): Promise 1 if (negCond !== null && negCondCollect !== null) { g.addEdge(modelLoader, 'qwen3_encoder', negCond, 'qwen3_encoder'); + g.addEdge(modelLoader, 't5_encoder', negCond, 't5_encoder'); g.addEdge(negCond, 'conditioning', negCondCollect, 'item'); g.addEdge(negCondCollect, 'collection', denoise, 'negative_conditioning'); } diff --git a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamAnimaModelSelect.tsx b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamAnimaModelSelect.tsx index 743558311e2..63cc5202090 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamAnimaModelSelect.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamAnimaModelSelect.tsx @@ -3,15 +3,17 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { useModelCombobox } from 'common/hooks/useModelCombobox'; import { animaQwen3EncoderModelSelected, + animaT5EncoderModelSelected, animaVaeModelSelected, selectAnimaQwen3EncoderModel, + selectAnimaT5EncoderModel, selectAnimaVaeModel, } from 'features/controlLayers/store/paramsSlice'; import { zModelIdentifierField } from 'features/nodes/types/common'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; -import { useAnimaVAEModels, useQwen3EncoderModels } from 'services/api/hooks/modelsByType'; -import type { Qwen3EncoderModelConfig, VAEModelConfig } from 'services/api/types'; +import { useAnimaVAEModels, useQwen3EncoderModels, useT5EncoderModels } from 'services/api/hooks/modelsByType'; +import type { Qwen3EncoderModelConfig, T5EncoderModelConfig, VAEModelConfig } from 'services/api/types'; /** * Anima VAE Model Select - uses Anima-base VAE models (QwenImage/Wan 2.1 VAE) @@ -102,13 +104,58 @@ const ParamAnimaQwen3EncoderModelSelect = memo(() => { ParamAnimaQwen3EncoderModelSelect.displayName = 'ParamAnimaQwen3EncoderModelSelect'; /** - * Combined component for Anima model selection (VAE + Qwen3 Encoder) + * Anima T5 Encoder Model Select - uses T5-XXL encoder models (tokenizer submodel used for Anima) + */ +const ParamAnimaT5EncoderModelSelect = memo(() => { + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + const animaT5EncoderModel = useAppSelector(selectAnimaT5EncoderModel); + const [modelConfigs, { isLoading }] = useT5EncoderModels(); + + const _onChange = useCallback( + (model: T5EncoderModelConfig | null) => { + if (model) { + dispatch(animaT5EncoderModelSelected(zModelIdentifierField.parse(model))); + } else { + dispatch(animaT5EncoderModelSelected(null)); + } + }, + [dispatch] + ); + + const { options, value, onChange, noOptionsMessage } = useModelCombobox({ + modelConfigs, + onChange: _onChange, + selectedModel: animaT5EncoderModel, + isLoading, + }); + + return ( + + {t('modelManager.animaT5Encoder')} + + + ); +}); + +ParamAnimaT5EncoderModelSelect.displayName = 'ParamAnimaT5EncoderModelSelect'; + +/** + * Combined component for Anima model selection (VAE + Qwen3 Encoder + T5 Encoder) */ const ParamAnimaModelSelect = () => { return ( <> + ); }; diff --git a/invokeai/frontend/web/src/features/queue/store/readiness.ts b/invokeai/frontend/web/src/features/queue/store/readiness.ts index 67f7ef7ddde..0775faa2cda 100644 --- a/invokeai/frontend/web/src/features/queue/store/readiness.ts +++ b/invokeai/frontend/web/src/features/queue/store/readiness.ts @@ -277,6 +277,9 @@ const getReasonsWhyCannotEnqueueGenerateTab = (arg: { if (!params.animaQwen3EncoderModel) { reasons.push({ content: i18n.t('parameters.invoke.noAnimaQwen3EncoderModelSelected') }); } + if (!params.animaT5EncoderModel) { + reasons.push({ content: i18n.t('parameters.invoke.noAnimaT5EncoderModelSelected') }); + } } if (model) { @@ -664,6 +667,9 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: { if (!params.animaQwen3EncoderModel) { reasons.push({ content: i18n.t('parameters.invoke.noAnimaQwen3EncoderModelSelected') }); } + if (!params.animaT5EncoderModel) { + reasons.push({ content: i18n.t('parameters.invoke.noAnimaT5EncoderModelSelected') }); + } } if (model) { diff --git a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts index 9599440477f..25087737ca9 100644 --- a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts +++ b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts @@ -109,3 +109,4 @@ export const selectQwen3EncoderModels = buildModelsSelector(isQwen3EncoderModelC export const selectZImageDiffusersModels = buildModelsSelector(isZImageDiffusersMainModelConfig); export const selectFluxVAEModels = buildModelsSelector(isFluxVAEModelConfig); export const selectAnimaVAEModels = buildModelsSelector(isAnimaVAEModelConfig); +export const selectT5EncoderModels = buildModelsSelector(isT5EncoderModelConfigOrSubmodel); diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 56fa2b2591d..af78eb5c22a 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -2995,6 +2995,7 @@ export type components = { * - Transformer: Cosmos Predict2 DiT + LLM Adapter (from single-file checkpoint) * - Qwen3 Encoder: Qwen3 0.6B (standalone single-file) * - VAE: AutoencoderKLQwenImage / Wan 2.1 VAE (standalone single-file or FLUX VAE) + * - T5 Encoder: T5-XXL model (only the tokenizer submodel is used, for LLM Adapter token IDs) */ AnimaModelLoaderInvocation: { /** @@ -3031,6 +3032,12 @@ export type components = { * @default null */ qwen3_encoder_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * T5 Encoder + * @description T5-XXL encoder model. The tokenizer submodel is used for Anima text encoding. + * @default null + */ + t5_encoder_model?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default anima_model_loader @@ -3058,6 +3065,11 @@ export type components = { * @description VAE */ vae: components["schemas"]["VAEField"]; + /** + * T5 Encoder + * @description T5 tokenizer and text encoder + */ + t5_encoder: components["schemas"]["T5EncoderField"]; /** * type * @default anima_model_loader_output @@ -3103,6 +3115,12 @@ export type components = { * @default null */ qwen3_encoder?: components["schemas"]["Qwen3EncoderField"] | null; + /** + * T5 Encoder + * @description T5 tokenizer and text encoder + * @default null + */ + t5_encoder?: components["schemas"]["T5EncoderField"] | null; /** * @description A mask defining the region that this conditioning prompt applies to. * @default null From a358b62d11298b459cc738bfa3e7f73a253cdfb3 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 17 Mar 2026 20:52:51 -0400 Subject: [PATCH 14/14] make lora identification more reliable --- .../backend/model_manager/configs/lora.py | 62 +++++++------------ .../lora_conversions/anima_lora_constants.py | 39 ++++++++++++ .../anima_lora_conversion_utils.py | 20 +++--- .../test_anima_lora_conversion_utils.py | 26 ++++++++ 4 files changed, 96 insertions(+), 51 deletions(-) diff --git a/invokeai/backend/model_manager/configs/lora.py b/invokeai/backend/model_manager/configs/lora.py index fa3526930da..1503a6fa868 100644 --- a/invokeai/backend/model_manager/configs/lora.py +++ b/invokeai/backend/model_manager/configs/lora.py @@ -31,6 +31,10 @@ ZImageVariantType, ) from invokeai.backend.model_manager.util.model_util import lora_token_vector_length +from invokeai.backend.patches.lora_conversions.anima_lora_constants import ( + has_cosmos_dit_kohya_keys, + has_cosmos_dit_peft_keys, +) from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control @@ -637,6 +641,13 @@ def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: return BaseModelType.Flux state_dict = mod.load_state_dict() + str_keys = [k for k in state_dict.keys() if isinstance(k, str)] + + # Rule out Anima LoRAs — their lora_te_ keys have shapes that + # lora_token_vector_length() misidentifies as SD2/SDXL. + if has_cosmos_dit_kohya_keys(str_keys) or has_cosmos_dit_peft_keys(str_keys): + raise NotAMatchError("model looks like an Anima LoRA, not a Stable Diffusion LoRA") + # If we've gotten here, we assume that the model is a Stable Diffusion model token_vector_length = lora_token_vector_length(state_dict) if token_vector_length == 768: @@ -770,26 +781,15 @@ def _validate_looks_like_lora(cls, mod: ModelOnDisk) -> None: - lora_unet_blocks_0_cross_attn_k_proj.lora_down.weight (Kohya format) - diffusion_model.blocks.0.cross_attn.k_proj.lora_A.weight (diffusers PEFT format) - transformer.blocks.0.cross_attn.k_proj.lora_A.weight (diffusers PEFT format) + + Detection requires Cosmos DiT-specific subcomponent names (cross_attn, + self_attn, mlp, adaln_modulation) to avoid false-positives on other + architectures that also use ``blocks`` in their paths. """ state_dict = mod.load_state_dict() + str_keys = [k for k in state_dict.keys() if isinstance(k, str)] - # Check for Kohya-style Anima LoRA keys - has_kohya_keys = state_dict_has_any_keys_starting_with( - state_dict, - { - "lora_unet_blocks_", - }, - ) - - # Check for diffusers PEFT format with Cosmos DiT layer names - has_cosmos_dit_keys = state_dict_has_any_keys_starting_with( - state_dict, - { - "diffusion_model.blocks.", - "transformer.blocks.", - "base_model.model.transformer.blocks.", - }, - ) + has_cosmos_keys = has_cosmos_dit_kohya_keys(str_keys) or has_cosmos_dit_peft_keys(str_keys) # Also check for LoRA/LoKR weight suffixes has_lora_suffix = state_dict_has_any_keys_ending_with( @@ -805,35 +805,21 @@ def _validate_looks_like_lora(cls, mod: ModelOnDisk) -> None: }, ) - if (has_kohya_keys or has_cosmos_dit_keys) and has_lora_suffix: + if has_cosmos_keys and has_lora_suffix: return raise NotAMatchError("model does not match Anima LoRA heuristics") @classmethod def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: - """Anima LoRAs target Cosmos DiT blocks (blocks.X.cross_attn, blocks.X.self_attn, etc.).""" - state_dict = mod.load_state_dict() - - # Kohya format: lora_unet_blocks_X_... - has_kohya_keys = state_dict_has_any_keys_starting_with( - state_dict, - { - "lora_unet_blocks_", - }, - ) + """Anima LoRAs target Cosmos DiT blocks (blocks.X.cross_attn, blocks.X.self_attn, etc.). - # Diffusers PEFT format with Cosmos DiT structure - has_cosmos_dit_keys = state_dict_has_any_keys_starting_with( - state_dict, - { - "diffusion_model.blocks.", - "transformer.blocks.", - "base_model.model.transformer.blocks.", - }, - ) + Uses Cosmos DiT-specific subcomponent names to avoid false-positives. + """ + state_dict = mod.load_state_dict() + str_keys = [k for k in state_dict.keys() if isinstance(k, str)] - if has_kohya_keys or has_cosmos_dit_keys: + if has_cosmos_dit_kohya_keys(str_keys) or has_cosmos_dit_peft_keys(str_keys): return BaseModelType.Anima raise NotAMatchError("model does not look like an Anima LoRA") diff --git a/invokeai/backend/patches/lora_conversions/anima_lora_constants.py b/invokeai/backend/patches/lora_conversions/anima_lora_constants.py index f44a25d9809..58c3e58b969 100644 --- a/invokeai/backend/patches/lora_conversions/anima_lora_constants.py +++ b/invokeai/backend/patches/lora_conversions/anima_lora_constants.py @@ -1,8 +1,47 @@ # Anima LoRA prefix constants # These prefixes are used for key mapping when applying LoRA patches to Anima models +import re + # Prefix for Anima transformer (Cosmos DiT architecture) LoRA layers ANIMA_LORA_TRANSFORMER_PREFIX = "lora_transformer-" # Prefix for Qwen3 text encoder LoRA layers ANIMA_LORA_QWEN3_PREFIX = "lora_qwen3-" + +# --------------------------------------------------------------------------- +# Cosmos DiT detection helpers +# +# Shared between ``anima_lora_conversion_utils.is_state_dict_likely_anima_lora`` +# and the config probing code in ``configs/lora.py``. Kept here (rather than +# in ``anima_lora_conversion_utils``) to avoid circular imports. +# --------------------------------------------------------------------------- + +# Cosmos DiT subcomponent names unique to the Anima / Cosmos Predict2 architecture. +_COSMOS_DIT_SUBCOMPONENTS_RE = r"(cross_attn|self_attn|mlp|adaln_modulation)" + +# Kohya format: lora_unet_[llm_adapter_]blocks_N_ +_KOHYA_ANIMA_RE = re.compile( + r"lora_unet_(llm_adapter_)?blocks_\d+_" + _COSMOS_DIT_SUBCOMPONENTS_RE +) + +# PEFT format: .blocks.N. +_PEFT_ANIMA_RE = re.compile( + r"(diffusion_model|transformer|base_model\.model\.transformer)\.blocks\.\d+\." + _COSMOS_DIT_SUBCOMPONENTS_RE +) + + +def has_cosmos_dit_kohya_keys(str_keys: list[str]) -> bool: + """Check for Kohya-style keys targeting Cosmos DiT blocks with specific subcomponents. + + Requires both the ``lora_unet_[llm_adapter_]blocks_N_`` prefix **and** a + Cosmos DiT subcomponent name (cross_attn, self_attn, mlp, adaln_modulation) + to avoid false-positives on other architectures that might also use bare + ``blocks`` in their key paths. + """ + return any(_KOHYA_ANIMA_RE.search(k) is not None for k in str_keys) + + +def has_cosmos_dit_peft_keys(str_keys: list[str]) -> bool: + """Check for diffusers PEFT keys targeting Cosmos DiT blocks with specific subcomponents.""" + return any(_PEFT_ANIMA_RE.search(k) is not None for k in str_keys) diff --git a/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py index ab9f8500027..bc40f69897a 100644 --- a/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py +++ b/invokeai/backend/patches/lora_conversions/anima_lora_conversion_utils.py @@ -22,6 +22,8 @@ from invokeai.backend.patches.lora_conversions.anima_lora_constants import ( ANIMA_LORA_QWEN3_PREFIX, ANIMA_LORA_TRANSFORMER_PREFIX, + has_cosmos_dit_kohya_keys, + has_cosmos_dit_peft_keys, ) from invokeai.backend.patches.model_patch_raw import ModelPatchRaw @@ -30,24 +32,16 @@ def is_state_dict_likely_anima_lora(state_dict: dict[str | int, torch.Tensor]) - """Checks if the provided state dict is likely an Anima LoRA. Anima LoRAs use Kohya-style naming with lora_unet_ prefix and underscore-separated - model key paths targeting Cosmos DiT blocks. + model key paths targeting Cosmos DiT blocks. Detection requires Cosmos DiT-specific + subcomponent names (cross_attn, self_attn, mlp, adaln_modulation) to avoid + false-positives on other architectures that also use ``blocks`` in their paths. """ str_keys = [k for k in state_dict.keys() if isinstance(k, str)] - # Anima LoRAs use Kohya-style keys: lora_unet_blocks_X_... - has_kohya_keys = any(k.startswith("lora_unet_blocks_") for k in str_keys) - - if has_kohya_keys: + if has_cosmos_dit_kohya_keys(str_keys): return True - # Also check for diffusers PEFT format with Anima-specific layer names - # (blocks.X.cross_attn, blocks.X.self_attn, blocks.X.mlp — Cosmos DiT structure) - has_cosmos_dit_keys = any( - k.startswith(("diffusion_model.blocks.", "transformer.blocks.", "base_model.model.transformer.blocks.")) - for k in str_keys - ) - - return has_cosmos_dit_keys + return has_cosmos_dit_peft_keys(str_keys) # Mapping from Kohya underscore-style substrings to model parameter names. diff --git a/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py b/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py index e911e7d738e..f0c63b5c3e8 100644 --- a/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py +++ b/tests/backend/patches/lora_conversions/test_anima_lora_conversion_utils.py @@ -49,6 +49,32 @@ def test_is_state_dict_likely_anima_lora_false_for_flux(): assert not is_state_dict_likely_anima_lora(state_dict) +def test_is_state_dict_likely_anima_lora_false_for_generic_blocks(): + """Test that is_state_dict_likely_anima_lora() returns False for a hypothetical architecture + that uses lora_unet_blocks_ but with non-Cosmos DiT subcomponent names.""" + state_dict = { + # Has lora_unet_blocks_ prefix but uses 'attention' and 'ff' instead of + # Cosmos DiT subcomponents (cross_attn, self_attn, mlp, adaln_modulation) + "lora_unet_blocks_0_attention_to_q.lora_down.weight": torch.empty([16, 512]), + "lora_unet_blocks_0_attention_to_q.lora_up.weight": torch.empty([512, 16]), + "lora_unet_blocks_0_ff_net_0_proj.lora_down.weight": torch.empty([16, 512]), + "lora_unet_blocks_0_ff_net_0_proj.lora_up.weight": torch.empty([2048, 16]), + } + assert not is_state_dict_likely_anima_lora(state_dict) + + +def test_is_state_dict_likely_anima_lora_false_for_generic_peft_blocks(): + """Test that is_state_dict_likely_anima_lora() returns False for a hypothetical architecture + that uses transformer.blocks. in PEFT format but with non-Cosmos subcomponents.""" + state_dict = { + "transformer.blocks.0.attention.to_q.lora_A.weight": torch.empty([16, 512]), + "transformer.blocks.0.attention.to_q.lora_B.weight": torch.empty([512, 16]), + "transformer.blocks.0.ff.net.0.proj.lora_A.weight": torch.empty([16, 512]), + "transformer.blocks.0.ff.net.0.proj.lora_B.weight": torch.empty([2048, 16]), + } + assert not is_state_dict_likely_anima_lora(state_dict) + + def test_is_state_dict_likely_anima_lora_false_for_random(): """Test that is_state_dict_likely_anima_lora() returns False for unrelated state dicts.""" state_dict = {