diff --git a/core/control-plane/schema.ts b/core/control-plane/schema.ts index 02901bda2fb..8372851f808 100644 --- a/core/control-plane/schema.ts +++ b/core/control-plane/schema.ts @@ -19,6 +19,7 @@ const modelDescriptionSchema = z.object({ "ovhcloud", "nebius", "siliconflow", + "avian", "tensorix", "scaleway", "watsonx", diff --git a/core/llm/autodetect.ts b/core/llm/autodetect.ts index 736caebcc6e..5d47abcaf8d 100644 --- a/core/llm/autodetect.ts +++ b/core/llm/autodetect.ts @@ -74,6 +74,7 @@ const PROVIDER_HANDLES_TEMPLATING: string[] = [ "docker", "nous", "zAI", + "avian", "tensorix", // TODO add these, change to inverted logic so only the ones that need templating are hardcoded // Asksage.ts @@ -134,6 +135,7 @@ const PROVIDER_SUPPORTS_IMAGES: string[] = [ "ovhcloud", "watsonx", "zAI", + "avian", "tensorix", ]; @@ -253,6 +255,7 @@ const PARALLEL_PROVIDERS: string[] = [ "vertexai", "function-network", "scaleway", + "avian", "minimax", "tensorix", ]; diff --git a/core/llm/llms/Avian.ts b/core/llm/llms/Avian.ts new file mode 100644 index 00000000000..e7b5c00b886 --- /dev/null +++ b/core/llm/llms/Avian.ts @@ -0,0 +1,13 @@ +import { LLMOptions } from "../../index.js"; + +import OpenAI from "./OpenAI.js"; + +class Avian extends OpenAI { + static providerName = "avian"; + static defaultOptions: Partial = { + apiBase: "https://api.avian.io/v1/", + useLegacyCompletionsEndpoint: false, + }; +} + +export default Avian; diff --git a/core/llm/llms/index.ts b/core/llm/llms/index.ts index 453b2d90cd8..4fb1beb09ac 100644 --- a/core/llm/llms/index.ts +++ b/core/llm/llms/index.ts @@ -11,6 +11,7 @@ import { renderTemplatedString } from "../../util/handlebars/renderTemplatedStri import { BaseLLM } from "../index"; import Anthropic from "./Anthropic"; import Asksage from "./Asksage"; +import Avian from "./Avian"; import Azure from "./Azure"; import Bedrock from "./Bedrock"; import BedrockImport from "./BedrockImport"; @@ -134,6 +135,7 @@ export const LLMClasses = [ LlamaStack, TARS, zAI, + Avian, ]; export async function llmFromDescription( diff --git a/core/llm/toolSupport.ts b/core/llm/toolSupport.ts index 1b74d6188b6..6459633dbf8 100644 --- a/core/llm/toolSupport.ts +++ b/core/llm/toolSupport.ts @@ -433,6 +433,15 @@ export const PROVIDER_TOOL_SUPPORT: Record boolean> = const lower = model.toLowerCase(); return !!lower.match(/^glm-[4-9]/); }, + avian: (model) => { + const lower = model.toLowerCase(); + return ( + lower.includes("deepseek") || + lower.includes("glm") || + lower.includes("kimi") || + lower.includes("minimax") + ); + }, moonshot: (model) => { // support moonshot models // https://platform.moonshot.ai/docs/pricing/chat#concepts diff --git a/docs/customize/model-providers/more/avian.mdx b/docs/customize/model-providers/more/avian.mdx new file mode 100644 index 00000000000..defae22a31f --- /dev/null +++ b/docs/customize/model-providers/more/avian.mdx @@ -0,0 +1,61 @@ +--- +title: "Avian" +description: "Configure Avian's AI models with Continue, including DeepSeek V3.2, Kimi K2.5, GLM-5, and MiniMax M2.5" +--- + +[Avian](https://avian.io/) provides an OpenAI-compatible API with access to leading AI models at competitive pricing. + +Get an API key from the [Avian dashboard](https://avian.io) + +## Configuration + + + + ```yaml title="config.yaml" + name: My Config + version: 0.0.1 + schema: v1 + + models: + - name: DeepSeek V3.2 + provider: avian + model: deepseek/deepseek-v3.2 + apiKey: + ``` + + + ```json title="config.json" + { + "models": [ + { + "title": "DeepSeek V3.2", + "provider": "avian", + "model": "deepseek/deepseek-v3.2", + "apiKey": "" + } + ] + } + ``` + + + +## Available Models + +| Model | Context Length | Input Price | Output Price | +| ----- | ------------- | ----------- | ------------ | +| `deepseek/deepseek-v3.2` | 164K | $0.26/M | $0.38/M | +| `moonshotai/kimi-k2.5` | 131K | $0.45/M | $2.20/M | +| `z-ai/glm-5` | 131K | $0.30/M | $2.55/M | +| `minimax/minimax-m2.5` | 1M | $0.30/M | $1.10/M | + +## Configuration Options + +| Option | Description | Default | +| --------- | -------------------- | ----------------------------- | +| `apiKey` | Avian API key | Required | +| `apiBase` | API base URL | `https://api.avian.io/v1` | +| `model` | Model name to use | - | + + +You can set the `AVIAN_API_KEY` environment variable instead of specifying the API key directly in the configuration file. + diff --git a/docs/docs.json b/docs/docs.json index 8556401ffc6..b6b330a73c4 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -169,6 +169,7 @@ "group": "More Providers", "pages": [ "customize/model-providers/more/asksage", + "customize/model-providers/more/avian", "customize/model-providers/more/clawrouter", "customize/model-providers/more/deepseek", "customize/model-providers/more/deepinfra", diff --git a/extensions/vscode/config_schema.json b/extensions/vscode/config_schema.json index 6eb11282100..4f6900b53e0 100644 --- a/extensions/vscode/config_schema.json +++ b/extensions/vscode/config_schema.json @@ -236,7 +236,8 @@ "ovhcloud", "venice", "inception", - "tars" + "tars", + "avian" ], "markdownEnumDescriptions": [ "### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/openai)", @@ -289,7 +290,8 @@ "### OVHcloud AI Endpoints is a serverless inference API that provides access to a curated selection of models (e.g., Llama, Mistral, Qwen, Deepseek). It is designed with security and data privacy in mind and is compliant with GDPR. To get started, create an API key on the OVHcloud [AI Endpoints website](https://endpoints.ai.cloud.ovh.net/). For more information, including pricing, visit the OVHcloud [AI Endpoints product page](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/).", "### Venice\n Venice.AI is a privacy-focused generative AI platform, allowing users to interact with open-source LLMs without storing any private user data.\nHosted models support the OpenAI API standard, providing seamless integration for users seeking privacy and flexibility.\nTo get started with the Venice API, either purchase a pro account, stake $VVV for daily inference allotments, or fund your account with USD.\nVisit the [API settings page](https://venice.ai/settings/api) or learn more at the [Venice API documentation](https://venice.ai/api).", "### Inception\n Inception Labs offer a new generation of diffusion-based LLMs.\nVisit the [API settings page](https://platform.inceptionlabs.ai/) or learn more at the [Inception docs](https://platform.inceptionlabs.ai/docs).", - "### TARS\nTARS is an OpenAI-compatible proxy router. To get started, obtain an API key and configure the provider in your config.json." + "### TARS\nTARS is an OpenAI-compatible proxy router. To get started, obtain an API key and configure the provider in your config.json.", + "### Avian\nAvian provides an OpenAI-compatible API with access to leading AI models including DeepSeek, Kimi, GLM, and MiniMax. To get started, obtain an API key from [avian.io](https://avian.io)." ], "type": "string" }, @@ -536,7 +538,8 @@ "kindo", "scaleway", "ovhcloud", - "venice" + "venice", + "avian" ] } }, diff --git a/gui/public/logos/avian.png b/gui/public/logos/avian.png new file mode 100644 index 00000000000..fd69be4b79d Binary files /dev/null and b/gui/public/logos/avian.png differ diff --git a/gui/src/pages/AddNewModel/configs/models.ts b/gui/src/pages/AddNewModel/configs/models.ts index 2482b1af241..07a1d2be98b 100644 --- a/gui/src/pages/AddNewModel/configs/models.ts +++ b/gui/src/pages/AddNewModel/configs/models.ts @@ -563,6 +563,59 @@ export const models: { [key: string]: ModelPackage } = { providerOptions: ["zAI"], isOpenSource: false, }, + avianDeepseekV32: { + title: "DeepSeek V3.2", + description: "DeepSeek V3.2 with 164K context, available through Avian", + refUrl: "https://avian.io", + params: { + title: "DeepSeek V3.2", + model: "deepseek/deepseek-v3.2", + contextLength: 164_000, + }, + icon: "avian.png", + providerOptions: ["avian"], + isOpenSource: false, + }, + avianKimiK25: { + title: "Kimi K2.5", + description: + "Moonshot AI's Kimi K2.5 with 131K context, available through Avian", + refUrl: "https://avian.io", + params: { + title: "Kimi K2.5", + model: "moonshotai/kimi-k2.5", + contextLength: 131_000, + }, + icon: "avian.png", + providerOptions: ["avian"], + isOpenSource: false, + }, + avianGlm5: { + title: "GLM-5", + description: "Z.ai's GLM-5 with 131K context, available through Avian", + refUrl: "https://avian.io", + params: { + title: "GLM-5", + model: "z-ai/glm-5", + contextLength: 131_000, + }, + icon: "avian.png", + providerOptions: ["avian"], + isOpenSource: false, + }, + avianMinimaxM25: { + title: "MiniMax M2.5", + description: "MiniMax M2.5 with 1M context window, available through Avian", + refUrl: "https://avian.io", + params: { + title: "MiniMax M2.5", + model: "minimax/minimax-m2.5", + contextLength: 1_000_000, + }, + icon: "avian.png", + providerOptions: ["avian"], + isOpenSource: false, + }, mistralOs: { title: "Mistral", description: diff --git a/gui/src/pages/AddNewModel/configs/providers.ts b/gui/src/pages/AddNewModel/configs/providers.ts index 8e2826e4fdd..268d757d748 100644 --- a/gui/src/pages/AddNewModel/configs/providers.ts +++ b/gui/src/pages/AddNewModel/configs/providers.ts @@ -266,6 +266,32 @@ export const providers: Partial> = { ], apiKeyUrl: "https://z.ai/manage-apikey/apikey-list", }, + avian: { + title: "Avian", + provider: "avian", + description: "Access top AI models at low cost through Avian's API", + longDescription: + "Avian provides an OpenAI-compatible API with access to leading AI models including DeepSeek V3.2, Kimi K2.5, GLM-5, and MiniMax M2.5. Get your API key from the [Avian dashboard](https://avian.io).", + icon: "avian.png", + tags: [ModelProviderTags.RequiresApiKey], + packages: [ + models.avianDeepseekV32, + models.avianKimiK25, + models.avianGlm5, + models.avianMinimaxM25, + ], + collectInputFor: [ + { + inputType: "text", + key: "apiKey", + label: "API Key", + placeholder: "Enter your Avian API key", + required: true, + }, + ...completionParamsInputsConfigs, + ], + apiKeyUrl: "https://avian.io", + }, "function-network": { title: "Function Network", provider: "function-network", diff --git a/packages/config-types/src/index.ts b/packages/config-types/src/index.ts index 8561500e662..6185783a639 100644 --- a/packages/config-types/src/index.ts +++ b/packages/config-types/src/index.ts @@ -62,6 +62,7 @@ export const modelDescriptionSchema = z.object({ "scaleway", "watsonx", "minimax", + "avian", ]), model: z.string(), apiKey: z.string().optional(), diff --git a/packages/llm-info/src/index.ts b/packages/llm-info/src/index.ts index 066b39c0ce2..3905eaecc36 100644 --- a/packages/llm-info/src/index.ts +++ b/packages/llm-info/src/index.ts @@ -1,4 +1,5 @@ import { Anthropic } from "./providers/anthropic.js"; +import { Avian } from "./providers/avian.js"; import { Azure } from "./providers/azure.js"; import { Bedrock } from "./providers/bedrock.js"; import { Cohere } from "./providers/cohere.js"; @@ -29,6 +30,7 @@ export const allModelProviders: ModelProvider[] = [ MiniMax, xAI, zAI, + Avian, ]; export const allLlms: LlmInfoWithProvider[] = allModelProviders.flatMap( diff --git a/packages/llm-info/src/providers/avian.ts b/packages/llm-info/src/providers/avian.ts new file mode 100644 index 00000000000..574fae5d43f --- /dev/null +++ b/packages/llm-info/src/providers/avian.ts @@ -0,0 +1,36 @@ +import { ModelProvider } from "../types.js"; + +export const Avian: ModelProvider = { + models: [ + { + model: "deepseek/deepseek-v3.2", + displayName: "DeepSeek V3.2", + contextLength: 164000, + recommendedFor: ["chat"], + regex: /deepseek\/deepseek-v3\.2/, + }, + { + model: "moonshotai/kimi-k2.5", + displayName: "Kimi K2.5", + contextLength: 131000, + recommendedFor: ["chat"], + regex: /moonshotai\/kimi-k2\.5/, + }, + { + model: "z-ai/glm-5", + displayName: "GLM-5", + contextLength: 131000, + recommendedFor: ["chat"], + regex: /z-ai\/glm-5/, + }, + { + model: "minimax/minimax-m2.5", + displayName: "MiniMax M2.5", + contextLength: 1000000, + recommendedFor: ["chat"], + regex: /minimax\/minimax-m2\.5/, + }, + ], + id: "avian", + displayName: "Avian", +}; diff --git a/packages/openai-adapters/src/index.ts b/packages/openai-adapters/src/index.ts index c9eb4da00fa..731814ae1ae 100644 --- a/packages/openai-adapters/src/index.ts +++ b/packages/openai-adapters/src/index.ts @@ -133,6 +133,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined { return openAICompatible("https://api.x.ai/v1/", config); case "zAI": return openAICompatible("https://api.z.ai/api/paas/v4/", config); + case "avian": + return openAICompatible("https://api.avian.io/v1/", config); case "voyage": return openAICompatible("https://api.voyageai.com/v1/", config); case "mistral": diff --git a/packages/openai-adapters/src/types.ts b/packages/openai-adapters/src/types.ts index 3b324b0ac6b..92f7a2592e6 100644 --- a/packages/openai-adapters/src/types.ts +++ b/packages/openai-adapters/src/types.ts @@ -58,6 +58,7 @@ export const OpenAIConfigSchema = BasePlusConfig.extend({ z.literal("vllm"), z.literal("xAI"), z.literal("zAI"), + z.literal("avian"), z.literal("scaleway"), z.literal("tensorix"), z.literal("ncompass"),