refactor: move provider model helpers into plugins

This commit is contained in:
Peter Steinberger
2026-03-27 20:16:35 +00:00
parent 5d3d54ee36
commit c28e76c490
94 changed files with 3455 additions and 611 deletions

View File

@@ -2,3 +2,4 @@ export {
ANTHROPIC_VERTEX_DEFAULT_MODEL_ID,
buildAnthropicVertexProvider,
} from "./provider-catalog.js";
export { resolveAnthropicVertexRegion } from "./region.js";

View File

@@ -2,7 +2,7 @@ import type {
ModelDefinitionConfig,
ModelProviderConfig,
} from "openclaw/plugin-sdk/provider-models";
import { resolveAnthropicVertexRegion } from "openclaw/plugin-sdk/provider-models";
import { resolveAnthropicVertexRegion } from "./region.js";
export const ANTHROPIC_VERTEX_DEFAULT_MODEL_ID = "claude-sonnet-4-6";
const ANTHROPIC_VERTEX_DEFAULT_CONTEXT_WINDOW = 1_000_000;
const GCP_VERTEX_CREDENTIALS_MARKER = "gcp-vertex-credentials";

View File

@@ -0,0 +1,20 @@
const ANTHROPIC_VERTEX_DEFAULT_REGION = "global";
const ANTHROPIC_VERTEX_REGION_RE = /^[a-z0-9-]+$/;
function normalizeOptionalSecretInput(value: unknown): string | undefined {
if (typeof value !== "string") {
return undefined;
}
const trimmed = value.trim();
return trimmed || undefined;
}
export function resolveAnthropicVertexRegion(env: NodeJS.ProcessEnv = process.env): string {
const region =
normalizeOptionalSecretInput(env.GOOGLE_CLOUD_LOCATION) ||
normalizeOptionalSecretInput(env.CLOUD_ML_REGION);
return region && ANTHROPIC_VERTEX_REGION_RE.test(region)
? region
: ANTHROPIC_VERTEX_DEFAULT_REGION;
}

View File

@@ -1 +1,8 @@
export { buildBytePlusCodingProvider, buildBytePlusProvider } from "./provider-catalog.js";
export {
buildBytePlusModelDefinition,
BYTEPLUS_BASE_URL,
BYTEPLUS_CODING_BASE_URL,
BYTEPLUS_CODING_MODEL_CATALOG,
BYTEPLUS_MODEL_CATALOG,
} from "./models.js";

View File

@@ -0,0 +1,123 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
type VolcModelCatalogEntry = {
id: string;
name: string;
reasoning: boolean;
input: ReadonlyArray<ModelDefinitionConfig["input"][number]>;
contextWindow: number;
maxTokens: number;
};
const VOLC_MODEL_KIMI_K2_5 = {
id: "kimi-k2-5-260127",
name: "Kimi K2.5",
reasoning: false,
input: ["text", "image"] as const,
contextWindow: 256000,
maxTokens: 4096,
} as const;
const VOLC_MODEL_GLM_4_7 = {
id: "glm-4-7-251222",
name: "GLM 4.7",
reasoning: false,
input: ["text", "image"] as const,
contextWindow: 200000,
maxTokens: 4096,
} as const;
const VOLC_SHARED_CODING_MODEL_CATALOG = [
{
id: "ark-code-latest",
name: "Ark Coding Plan",
reasoning: false,
input: ["text"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
{
id: "doubao-seed-code",
name: "Doubao Seed Code",
reasoning: false,
input: ["text"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
{
id: "glm-4.7",
name: "GLM 4.7 Coding",
reasoning: false,
input: ["text"] as const,
contextWindow: 200000,
maxTokens: 4096,
},
{
id: "kimi-k2-thinking",
name: "Kimi K2 Thinking",
reasoning: false,
input: ["text"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
{
id: "kimi-k2.5",
name: "Kimi K2.5 Coding",
reasoning: false,
input: ["text"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
] as const;
export const BYTEPLUS_BASE_URL = "https://ark.ap-southeast.bytepluses.com/api/v3";
export const BYTEPLUS_CODING_BASE_URL = "https://ark.ap-southeast.bytepluses.com/api/coding/v3";
export const BYTEPLUS_DEFAULT_MODEL_ID = "seed-1-8-251228";
export const BYTEPLUS_CODING_DEFAULT_MODEL_ID = "ark-code-latest";
export const BYTEPLUS_DEFAULT_MODEL_REF = `byteplus/${BYTEPLUS_DEFAULT_MODEL_ID}`;
export const BYTEPLUS_DEFAULT_COST = {
input: 0.0001,
output: 0.0002,
cacheRead: 0,
cacheWrite: 0,
};
export const BYTEPLUS_MODEL_CATALOG = [
{
id: "seed-1-8-251228",
name: "Seed 1.8",
reasoning: false,
input: ["text", "image"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
VOLC_MODEL_KIMI_K2_5,
VOLC_MODEL_GLM_4_7,
] as const;
export const BYTEPLUS_CODING_MODEL_CATALOG = VOLC_SHARED_CODING_MODEL_CATALOG;
export type BytePlusCatalogEntry = (typeof BYTEPLUS_MODEL_CATALOG)[number];
export type BytePlusCodingCatalogEntry = (typeof BYTEPLUS_CODING_MODEL_CATALOG)[number];
function buildVolcModelDefinition(
entry: VolcModelCatalogEntry,
cost: ModelDefinitionConfig["cost"],
): ModelDefinitionConfig {
return {
id: entry.id,
name: entry.name,
reasoning: entry.reasoning,
input: [...entry.input],
cost,
contextWindow: entry.contextWindow,
maxTokens: entry.maxTokens,
};
}
export function buildBytePlusModelDefinition(
entry: BytePlusCatalogEntry | BytePlusCodingCatalogEntry,
): ModelDefinitionConfig {
return buildVolcModelDefinition(entry, BYTEPLUS_DEFAULT_COST);
}

View File

@@ -1,11 +1,11 @@
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import {
buildBytePlusModelDefinition,
BYTEPLUS_BASE_URL,
BYTEPLUS_CODING_BASE_URL,
BYTEPLUS_CODING_MODEL_CATALOG,
BYTEPLUS_MODEL_CATALOG,
type ModelProviderConfig,
} from "openclaw/plugin-sdk/provider-models";
} from "./api.js";
export function buildBytePlusProvider(): ModelProviderConfig {
return {

14
extensions/chutes/api.ts Normal file
View File

@@ -0,0 +1,14 @@
export {
buildChutesModelDefinition,
CHUTES_BASE_URL,
CHUTES_DEFAULT_MODEL_ID,
CHUTES_DEFAULT_MODEL_REF,
CHUTES_MODEL_CATALOG,
discoverChutesModels,
} from "./models.js";
export { buildChutesProvider } from "./provider-catalog.js";
export {
applyChutesApiKeyConfig,
applyChutesConfig,
applyChutesProviderConfig,
} from "./onboard.js";

601
extensions/chutes/models.ts Normal file
View File

@@ -0,0 +1,601 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
import { createSubsystemLogger } from "openclaw/plugin-sdk/runtime-env";
const log = createSubsystemLogger("chutes-models");
export const CHUTES_BASE_URL = "https://llm.chutes.ai/v1";
export const CHUTES_DEFAULT_MODEL_ID = "zai-org/GLM-4.7-TEE";
export const CHUTES_DEFAULT_MODEL_REF = `chutes/${CHUTES_DEFAULT_MODEL_ID}`;
const CHUTES_DEFAULT_CONTEXT_WINDOW = 128000;
const CHUTES_DEFAULT_MAX_TOKENS = 4096;
export const CHUTES_MODEL_CATALOG: ModelDefinitionConfig[] = [
{
id: "Qwen/Qwen3-32B",
name: "Qwen/Qwen3-32B",
reasoning: true,
input: ["text"],
contextWindow: 40960,
maxTokens: 40960,
cost: { input: 0.08, output: 0.24, cacheRead: 0, cacheWrite: 0 },
},
{
id: "unsloth/Mistral-Nemo-Instruct-2407",
name: "unsloth/Mistral-Nemo-Instruct-2407",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 131072,
cost: { input: 0.02, output: 0.04, cacheRead: 0, cacheWrite: 0 },
},
{
id: "deepseek-ai/DeepSeek-V3-0324-TEE",
name: "deepseek-ai/DeepSeek-V3-0324-TEE",
reasoning: true,
input: ["text"],
contextWindow: 163840,
maxTokens: 65536,
cost: { input: 0.25, output: 1, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen3-235B-A22B-Instruct-2507-TEE",
name: "Qwen/Qwen3-235B-A22B-Instruct-2507-TEE",
reasoning: true,
input: ["text"],
contextWindow: 262144,
maxTokens: 65536,
cost: { input: 0.08, output: 0.55, cacheRead: 0, cacheWrite: 0 },
},
{
id: "openai/gpt-oss-120b-TEE",
name: "openai/gpt-oss-120b-TEE",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 65536,
cost: { input: 0.05, output: 0.45, cacheRead: 0, cacheWrite: 0 },
},
{
id: "chutesai/Mistral-Small-3.1-24B-Instruct-2503",
name: "chutesai/Mistral-Small-3.1-24B-Instruct-2503",
reasoning: false,
input: ["text", "image"],
contextWindow: 131072,
maxTokens: 131072,
cost: { input: 0.03, output: 0.11, cacheRead: 0, cacheWrite: 0 },
},
{
id: "deepseek-ai/DeepSeek-V3.2-TEE",
name: "deepseek-ai/DeepSeek-V3.2-TEE",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 65536,
cost: { input: 0.28, output: 0.42, cacheRead: 0, cacheWrite: 0 },
},
{
id: "zai-org/GLM-4.7-TEE",
name: "zai-org/GLM-4.7-TEE",
reasoning: true,
input: ["text"],
contextWindow: 202752,
maxTokens: 65535,
cost: { input: 0.4, output: 2, cacheRead: 0, cacheWrite: 0 },
},
{
id: "moonshotai/Kimi-K2.5-TEE",
name: "moonshotai/Kimi-K2.5-TEE",
reasoning: true,
input: ["text", "image"],
contextWindow: 262144,
maxTokens: 65535,
cost: { input: 0.45, output: 2.2, cacheRead: 0, cacheWrite: 0 },
},
{
id: "unsloth/gemma-3-27b-it",
name: "unsloth/gemma-3-27b-it",
reasoning: false,
input: ["text", "image"],
contextWindow: 128000,
maxTokens: 65536,
cost: { input: 0.04, output: 0.15, cacheRead: 0, cacheWrite: 0 },
},
{
id: "XiaomiMiMo/MiMo-V2-Flash-TEE",
name: "XiaomiMiMo/MiMo-V2-Flash-TEE",
reasoning: true,
input: ["text"],
contextWindow: 262144,
maxTokens: 65536,
cost: { input: 0.09, output: 0.29, cacheRead: 0, cacheWrite: 0 },
},
{
id: "chutesai/Mistral-Small-3.2-24B-Instruct-2506",
name: "chutesai/Mistral-Small-3.2-24B-Instruct-2506",
reasoning: false,
input: ["text", "image"],
contextWindow: 131072,
maxTokens: 131072,
cost: { input: 0.06, output: 0.18, cacheRead: 0, cacheWrite: 0 },
},
{
id: "deepseek-ai/DeepSeek-R1-0528-TEE",
name: "deepseek-ai/DeepSeek-R1-0528-TEE",
reasoning: true,
input: ["text"],
contextWindow: 163840,
maxTokens: 65536,
cost: { input: 0.45, output: 2.15, cacheRead: 0, cacheWrite: 0 },
},
{
id: "zai-org/GLM-5-TEE",
name: "zai-org/GLM-5-TEE",
reasoning: true,
input: ["text"],
contextWindow: 202752,
maxTokens: 65535,
cost: { input: 0.95, output: 3.15, cacheRead: 0, cacheWrite: 0 },
},
{
id: "deepseek-ai/DeepSeek-V3.1-TEE",
name: "deepseek-ai/DeepSeek-V3.1-TEE",
reasoning: true,
input: ["text"],
contextWindow: 163840,
maxTokens: 65536,
cost: { input: 0.2, output: 0.8, cacheRead: 0, cacheWrite: 0 },
},
{
id: "deepseek-ai/DeepSeek-V3.1-Terminus-TEE",
name: "deepseek-ai/DeepSeek-V3.1-Terminus-TEE",
reasoning: true,
input: ["text"],
contextWindow: 163840,
maxTokens: 65536,
cost: { input: 0.23, output: 0.9, cacheRead: 0, cacheWrite: 0 },
},
{
id: "unsloth/gemma-3-4b-it",
name: "unsloth/gemma-3-4b-it",
reasoning: false,
input: ["text", "image"],
contextWindow: 96000,
maxTokens: 96000,
cost: { input: 0.01, output: 0.03, cacheRead: 0, cacheWrite: 0 },
},
{
id: "MiniMaxAI/MiniMax-M2.5-TEE",
name: "MiniMaxAI/MiniMax-M2.5-TEE",
reasoning: true,
input: ["text"],
contextWindow: 196608,
maxTokens: 65536,
cost: { input: 0.3, output: 1.1, cacheRead: 0, cacheWrite: 0 },
},
{
id: "tngtech/DeepSeek-TNG-R1T2-Chimera",
name: "tngtech/DeepSeek-TNG-R1T2-Chimera",
reasoning: true,
input: ["text"],
contextWindow: 163840,
maxTokens: 163840,
cost: { input: 0.25, output: 0.85, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen3-Coder-Next-TEE",
name: "Qwen/Qwen3-Coder-Next-TEE",
reasoning: true,
input: ["text"],
contextWindow: 262144,
maxTokens: 65536,
cost: { input: 0.12, output: 0.75, cacheRead: 0, cacheWrite: 0 },
},
{
id: "NousResearch/Hermes-4-405B-FP8-TEE",
name: "NousResearch/Hermes-4-405B-FP8-TEE",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 65536,
cost: { input: 0.3, output: 1.2, cacheRead: 0, cacheWrite: 0 },
},
{
id: "deepseek-ai/DeepSeek-V3",
name: "deepseek-ai/DeepSeek-V3",
reasoning: false,
input: ["text"],
contextWindow: 163840,
maxTokens: 163840,
cost: { input: 0.3, output: 1.2, cacheRead: 0, cacheWrite: 0 },
},
{
id: "openai/gpt-oss-20b",
name: "openai/gpt-oss-20b",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 131072,
cost: { input: 0.04, output: 0.15, cacheRead: 0, cacheWrite: 0 },
},
{
id: "unsloth/Llama-3.2-3B-Instruct",
name: "unsloth/Llama-3.2-3B-Instruct",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 4096,
cost: { input: 0.01, output: 0.01, cacheRead: 0, cacheWrite: 0 },
},
{
id: "unsloth/Mistral-Small-24B-Instruct-2501",
name: "unsloth/Mistral-Small-24B-Instruct-2501",
reasoning: false,
input: ["text", "image"],
contextWindow: 32768,
maxTokens: 32768,
cost: { input: 0.07, output: 0.3, cacheRead: 0, cacheWrite: 0 },
},
{
id: "zai-org/GLM-4.7-FP8",
name: "zai-org/GLM-4.7-FP8",
reasoning: true,
input: ["text"],
contextWindow: 202752,
maxTokens: 65535,
cost: { input: 0.3, output: 1.2, cacheRead: 0, cacheWrite: 0 },
},
{
id: "zai-org/GLM-4.6-TEE",
name: "zai-org/GLM-4.6-TEE",
reasoning: true,
input: ["text"],
contextWindow: 202752,
maxTokens: 65536,
cost: { input: 0.4, output: 1.7, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen3.5-397B-A17B-TEE",
name: "Qwen/Qwen3.5-397B-A17B-TEE",
reasoning: true,
input: ["text", "image"],
contextWindow: 262144,
maxTokens: 65536,
cost: { input: 0.55, output: 3.5, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen2.5-72B-Instruct",
name: "Qwen/Qwen2.5-72B-Instruct",
reasoning: false,
input: ["text"],
contextWindow: 32768,
maxTokens: 32768,
cost: { input: 0.3, output: 1.2, cacheRead: 0, cacheWrite: 0 },
},
{
id: "NousResearch/DeepHermes-3-Mistral-24B-Preview",
name: "NousResearch/DeepHermes-3-Mistral-24B-Preview",
reasoning: false,
input: ["text"],
contextWindow: 32768,
maxTokens: 32768,
cost: { input: 0.02, output: 0.1, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen3-Next-80B-A3B-Instruct",
name: "Qwen/Qwen3-Next-80B-A3B-Instruct",
reasoning: false,
input: ["text"],
contextWindow: 262144,
maxTokens: 262144,
cost: { input: 0.1, output: 0.8, cacheRead: 0, cacheWrite: 0 },
},
{
id: "zai-org/GLM-4.6-FP8",
name: "zai-org/GLM-4.6-FP8",
reasoning: true,
input: ["text"],
contextWindow: 202752,
maxTokens: 65535,
cost: { input: 0.3, output: 1.2, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen3-235B-A22B-Thinking-2507",
name: "Qwen/Qwen3-235B-A22B-Thinking-2507",
reasoning: true,
input: ["text"],
contextWindow: 262144,
maxTokens: 262144,
cost: { input: 0.11, output: 0.6, cacheRead: 0, cacheWrite: 0 },
},
{
id: "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
name: "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 131072,
cost: { input: 0.03, output: 0.11, cacheRead: 0, cacheWrite: 0 },
},
{
id: "tngtech/R1T2-Chimera-Speed",
name: "tngtech/R1T2-Chimera-Speed",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 65536,
cost: { input: 0.22, output: 0.6, cacheRead: 0, cacheWrite: 0 },
},
{
id: "zai-org/GLM-4.6V",
name: "zai-org/GLM-4.6V",
reasoning: true,
input: ["text", "image"],
contextWindow: 131072,
maxTokens: 65536,
cost: { input: 0.3, output: 0.9, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen2.5-VL-32B-Instruct",
name: "Qwen/Qwen2.5-VL-32B-Instruct",
reasoning: false,
input: ["text", "image"],
contextWindow: 16384,
maxTokens: 16384,
cost: { input: 0.05, output: 0.22, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen3-VL-235B-A22B-Instruct",
name: "Qwen/Qwen3-VL-235B-A22B-Instruct",
reasoning: false,
input: ["text", "image"],
contextWindow: 262144,
maxTokens: 262144,
cost: { input: 0.3, output: 1.2, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen3-14B",
name: "Qwen/Qwen3-14B",
reasoning: true,
input: ["text"],
contextWindow: 40960,
maxTokens: 40960,
cost: { input: 0.05, output: 0.22, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen2.5-Coder-32B-Instruct",
name: "Qwen/Qwen2.5-Coder-32B-Instruct",
reasoning: false,
input: ["text"],
contextWindow: 32768,
maxTokens: 32768,
cost: { input: 0.03, output: 0.11, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen3-30B-A3B",
name: "Qwen/Qwen3-30B-A3B",
reasoning: true,
input: ["text"],
contextWindow: 40960,
maxTokens: 40960,
cost: { input: 0.06, output: 0.22, cacheRead: 0, cacheWrite: 0 },
},
{
id: "unsloth/gemma-3-12b-it",
name: "unsloth/gemma-3-12b-it",
reasoning: false,
input: ["text", "image"],
contextWindow: 131072,
maxTokens: 131072,
cost: { input: 0.03, output: 0.1, cacheRead: 0, cacheWrite: 0 },
},
{
id: "unsloth/Llama-3.2-1B-Instruct",
name: "unsloth/Llama-3.2-1B-Instruct",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 4096,
cost: { input: 0.01, output: 0.01, cacheRead: 0, cacheWrite: 0 },
},
{
id: "nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16-TEE",
name: "nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16-TEE",
reasoning: true,
input: ["text"],
contextWindow: 128000,
maxTokens: 4096,
cost: { input: 0.3, output: 1.2, cacheRead: 0, cacheWrite: 0 },
},
{
id: "NousResearch/Hermes-4-14B",
name: "NousResearch/Hermes-4-14B",
reasoning: true,
input: ["text"],
contextWindow: 40960,
maxTokens: 40960,
cost: { input: 0.01, output: 0.05, cacheRead: 0, cacheWrite: 0 },
},
{
id: "Qwen/Qwen3Guard-Gen-0.6B",
name: "Qwen/Qwen3Guard-Gen-0.6B",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 4096,
cost: { input: 0.01, output: 0.01, cacheRead: 0, cacheWrite: 0 },
},
{
id: "rednote-hilab/dots.ocr",
name: "rednote-hilab/dots.ocr",
reasoning: false,
input: ["text", "image"],
contextWindow: 131072,
maxTokens: 131072,
cost: { input: 0.01, output: 0.01, cacheRead: 0, cacheWrite: 0 },
},
];
export function buildChutesModelDefinition(
model: (typeof CHUTES_MODEL_CATALOG)[number],
): ModelDefinitionConfig {
return {
...model,
compat: {
supportsUsageInStreaming: false,
},
};
}
interface ChutesModelEntry {
id: string;
name?: string;
supported_features?: string[];
input_modalities?: string[];
context_length?: number;
max_output_length?: number;
pricing?: {
prompt?: number;
completion?: number;
};
[key: string]: unknown;
}
interface OpenAIListModelsResponse {
data?: ChutesModelEntry[];
}
const CACHE_TTL = 5 * 60 * 1000;
const CACHE_MAX_ENTRIES = 100;
interface CacheEntry {
models: ModelDefinitionConfig[];
time: number;
}
const modelCache = new Map<string, CacheEntry>();
function pruneExpiredCacheEntries(now: number = Date.now()): void {
for (const [key, entry] of modelCache.entries()) {
if (now - entry.time >= CACHE_TTL) {
modelCache.delete(key);
}
}
}
function cacheAndReturn(
tokenKey: string,
models: ModelDefinitionConfig[],
): ModelDefinitionConfig[] {
const now = Date.now();
pruneExpiredCacheEntries(now);
if (!modelCache.has(tokenKey) && modelCache.size >= CACHE_MAX_ENTRIES) {
const oldest = modelCache.keys().next();
if (!oldest.done) {
modelCache.delete(oldest.value);
}
}
modelCache.set(tokenKey, { models, time: now });
return models;
}
export async function discoverChutesModels(accessToken?: string): Promise<ModelDefinitionConfig[]> {
const trimmedKey = accessToken?.trim() ?? "";
const now = Date.now();
pruneExpiredCacheEntries(now);
const cached = modelCache.get(trimmedKey);
if (cached) {
return cached.models;
}
if (process.env.NODE_ENV === "test" || process.env.VITEST === "true") {
return CHUTES_MODEL_CATALOG.map(buildChutesModelDefinition);
}
let effectiveKey = trimmedKey;
const staticCatalog = () =>
cacheAndReturn(effectiveKey, CHUTES_MODEL_CATALOG.map(buildChutesModelDefinition));
const headers: Record<string, string> = {};
if (trimmedKey) {
headers.Authorization = `Bearer ${trimmedKey}`;
}
try {
let response = await fetch(`${CHUTES_BASE_URL}/models`, {
signal: AbortSignal.timeout(10_000),
headers,
});
if (response.status === 401 && trimmedKey) {
effectiveKey = "";
response = await fetch(`${CHUTES_BASE_URL}/models`, {
signal: AbortSignal.timeout(10_000),
});
}
if (!response.ok) {
if (response.status !== 401 && response.status !== 503) {
log.warn(`GET /v1/models failed: HTTP ${response.status}, using static catalog`);
}
return staticCatalog();
}
const body = (await response.json()) as OpenAIListModelsResponse;
const data = body?.data;
if (!Array.isArray(data) || data.length === 0) {
log.warn("No models in response, using static catalog");
return staticCatalog();
}
const seen = new Set<string>();
const models: ModelDefinitionConfig[] = [];
for (const entry of data) {
const id = typeof entry?.id === "string" ? entry.id.trim() : "";
if (!id || seen.has(id)) {
continue;
}
seen.add(id);
const isReasoning =
entry.supported_features?.includes("reasoning") ||
id.toLowerCase().includes("r1") ||
id.toLowerCase().includes("thinking") ||
id.toLowerCase().includes("reason") ||
id.toLowerCase().includes("tee");
const input: Array<"text" | "image"> = (entry.input_modalities || ["text"]).filter(
(i): i is "text" | "image" => i === "text" || i === "image",
);
models.push({
id,
name: id,
reasoning: isReasoning,
input,
cost: {
input: entry.pricing?.prompt || 0,
output: entry.pricing?.completion || 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: entry.context_length || CHUTES_DEFAULT_CONTEXT_WINDOW,
maxTokens: entry.max_output_length || CHUTES_DEFAULT_MAX_TOKENS,
compat: {
supportsUsageInStreaming: false,
},
});
}
return cacheAndReturn(
effectiveKey,
models.length > 0 ? models : CHUTES_MODEL_CATALOG.map(buildChutesModelDefinition),
);
} catch (error) {
log.warn(`Discovery failed: ${String(error)}, using static catalog`);
return staticCatalog();
}
}

View File

@@ -1,14 +1,14 @@
import {
CHUTES_BASE_URL,
CHUTES_DEFAULT_MODEL_REF,
CHUTES_MODEL_CATALOG,
buildChutesModelDefinition,
} from "openclaw/plugin-sdk/provider-models";
import {
applyAgentDefaultModelPrimary,
applyProviderConfigWithModelCatalogPreset,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
import {
CHUTES_BASE_URL,
CHUTES_DEFAULT_MODEL_REF,
CHUTES_MODEL_CATALOG,
buildChutesModelDefinition,
} from "./api.js";
export { CHUTES_DEFAULT_MODEL_REF };

View File

@@ -1,10 +1,10 @@
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import {
CHUTES_BASE_URL,
CHUTES_MODEL_CATALOG,
buildChutesModelDefinition,
discoverChutesModels,
type ModelProviderConfig,
} from "openclaw/plugin-sdk/provider-models";
} from "./api.js";
/**
* Build the Chutes provider with dynamic model discovery.

View File

@@ -0,0 +1,13 @@
export {
buildCloudflareAiGatewayModelDefinition,
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID,
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF,
CLOUDFLARE_AI_GATEWAY_PROVIDER_ID,
resolveCloudflareAiGatewayBaseUrl,
} from "./models.js";
export {
applyCloudflareAiGatewayConfig,
applyCloudflareAiGatewayProviderConfig,
buildCloudflareAiGatewayConfigPatch,
} from "./onboard.js";

View File

@@ -15,13 +15,10 @@ import {
} from "openclaw/plugin-sdk/provider-auth";
import {
buildCloudflareAiGatewayModelDefinition,
resolveCloudflareAiGatewayBaseUrl,
} from "openclaw/plugin-sdk/provider-models";
import {
applyCloudflareAiGatewayConfig,
buildCloudflareAiGatewayConfigPatch,
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF,
} from "./onboard.js";
resolveCloudflareAiGatewayBaseUrl,
} from "./models.js";
import { applyCloudflareAiGatewayConfig, buildCloudflareAiGatewayConfigPatch } from "./onboard.js";
const PROVIDER_ID = "cloudflare-ai-gateway";
const PROVIDER_ENV_VAR = "CLOUDFLARE_AI_GATEWAY_API_KEY";

View File

@@ -0,0 +1,44 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
export const CLOUDFLARE_AI_GATEWAY_PROVIDER_ID = "cloudflare-ai-gateway";
export const CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID = "claude-sonnet-4-5";
export const CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF = `${CLOUDFLARE_AI_GATEWAY_PROVIDER_ID}/${CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID}`;
const CLOUDFLARE_AI_GATEWAY_DEFAULT_CONTEXT_WINDOW = 200_000;
const CLOUDFLARE_AI_GATEWAY_DEFAULT_MAX_TOKENS = 64_000;
const CLOUDFLARE_AI_GATEWAY_DEFAULT_COST = {
input: 3,
output: 15,
cacheRead: 0.3,
cacheWrite: 3.75,
};
export function buildCloudflareAiGatewayModelDefinition(params?: {
id?: string;
name?: string;
reasoning?: boolean;
input?: Array<"text" | "image">;
}): ModelDefinitionConfig {
const id = params?.id?.trim() || CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID;
return {
id,
name: params?.name ?? "Claude Sonnet 4.5",
reasoning: params?.reasoning ?? true,
input: params?.input ?? ["text", "image"],
cost: CLOUDFLARE_AI_GATEWAY_DEFAULT_COST,
contextWindow: CLOUDFLARE_AI_GATEWAY_DEFAULT_CONTEXT_WINDOW,
maxTokens: CLOUDFLARE_AI_GATEWAY_DEFAULT_MAX_TOKENS,
};
}
export function resolveCloudflareAiGatewayBaseUrl(params: {
accountId: string;
gatewayId: string;
}): string {
const accountId = params.accountId.trim();
const gatewayId = params.gatewayId.trim();
if (!accountId || !gatewayId) {
return "";
}
return `https://gateway.ai.cloudflare.com/v1/${accountId}/${gatewayId}/anthropic`;
}

View File

@@ -1,15 +1,13 @@
import {
buildCloudflareAiGatewayModelDefinition,
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF,
resolveCloudflareAiGatewayBaseUrl,
} from "openclaw/plugin-sdk/provider-models";
import {
applyAgentDefaultModelPrimary,
applyProviderConfigWithDefaultModel,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
export { CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF };
import {
buildCloudflareAiGatewayModelDefinition,
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF,
resolveCloudflareAiGatewayBaseUrl,
} from "./models.js";
export function buildCloudflareAiGatewayConfigPatch(params: {
accountId: string;

View File

@@ -1 +1,6 @@
export {
buildDeepSeekModelDefinition,
DEEPSEEK_BASE_URL,
DEEPSEEK_MODEL_CATALOG,
} from "./models.js";
export { buildDeepSeekProvider } from "./provider-catalog.js";

View File

@@ -0,0 +1,44 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
export const DEEPSEEK_BASE_URL = "https://api.deepseek.com";
// DeepSeek V3.2 API pricing (per 1M tokens)
// https://api-docs.deepseek.com/quick_start/pricing
const DEEPSEEK_V3_2_COST = {
input: 0.28,
output: 0.42,
cacheRead: 0.028,
cacheWrite: 0,
};
export const DEEPSEEK_MODEL_CATALOG: ModelDefinitionConfig[] = [
{
id: "deepseek-chat",
name: "DeepSeek Chat",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: DEEPSEEK_V3_2_COST,
compat: { supportsUsageInStreaming: true },
},
{
id: "deepseek-reasoner",
name: "DeepSeek Reasoner",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 65536,
cost: DEEPSEEK_V3_2_COST,
compat: { supportsUsageInStreaming: true },
},
];
export function buildDeepSeekModelDefinition(
model: (typeof DEEPSEEK_MODEL_CATALOG)[number],
): ModelDefinitionConfig {
return {
...model,
api: "openai-completions",
};
}

View File

@@ -1,13 +1,9 @@
import {
buildDeepSeekModelDefinition,
DEEPSEEK_BASE_URL,
DEEPSEEK_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
import {
applyAgentDefaultModelPrimary,
applyProviderConfigWithModelCatalog,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
import { buildDeepSeekModelDefinition, DEEPSEEK_BASE_URL, DEEPSEEK_MODEL_CATALOG } from "./api.js";
export const DEEPSEEK_DEFAULT_MODEL_REF = "deepseek/deepseek-chat";

View File

@@ -1,9 +1,5 @@
import {
buildDeepSeekModelDefinition,
DEEPSEEK_BASE_URL,
DEEPSEEK_MODEL_CATALOG,
type ModelProviderConfig,
} from "openclaw/plugin-sdk/provider-models";
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import { buildDeepSeekModelDefinition, DEEPSEEK_BASE_URL, DEEPSEEK_MODEL_CATALOG } from "./api.js";
export function buildDeepSeekProvider(): ModelProviderConfig {
return {

107
extensions/google/api.ts Normal file
View File

@@ -0,0 +1,107 @@
import {
applyAgentDefaultModelPrimary,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
import {
createGoogleThinkingPayloadWrapper,
sanitizeGoogleThinkingPayload,
} from "openclaw/plugin-sdk/provider-stream";
export { createGoogleThinkingPayloadWrapper, sanitizeGoogleThinkingPayload };
export function normalizeGoogleModelId(id: string): string {
if (id === "gemini-3-pro") {
return "gemini-3-pro-preview";
}
if (id === "gemini-3-flash") {
return "gemini-3-flash-preview";
}
if (id === "gemini-3.1-pro") {
return "gemini-3.1-pro-preview";
}
if (id === "gemini-3.1-flash-lite") {
return "gemini-3.1-flash-lite-preview";
}
if (id === "gemini-3.1-flash" || id === "gemini-3.1-flash-preview") {
return "gemini-3-flash-preview";
}
return id;
}
const DEFAULT_GOOGLE_API_HOST = "generativelanguage.googleapis.com";
export const DEFAULT_GOOGLE_API_BASE_URL = "https://generativelanguage.googleapis.com/v1beta";
function trimTrailingSlashes(value: string): string {
return value.replace(/\/+$/, "");
}
export function normalizeGoogleApiBaseUrl(baseUrl?: string): string {
const raw = trimTrailingSlashes(baseUrl?.trim() || DEFAULT_GOOGLE_API_BASE_URL);
try {
const url = new URL(raw);
url.hash = "";
url.search = "";
if (
url.hostname.toLowerCase() === DEFAULT_GOOGLE_API_HOST &&
trimTrailingSlashes(url.pathname || "") === ""
) {
url.pathname = "/v1beta";
}
return trimTrailingSlashes(url.toString());
} catch {
if (/^https:\/\/generativelanguage\.googleapis\.com\/?$/i.test(raw)) {
return DEFAULT_GOOGLE_API_BASE_URL;
}
return raw;
}
}
export function parseGeminiAuth(apiKey: string): { headers: Record<string, string> } {
if (apiKey.startsWith("{")) {
try {
const parsed = JSON.parse(apiKey) as { token?: string; projectId?: string };
if (typeof parsed.token === "string" && parsed.token) {
return {
headers: {
Authorization: `Bearer ${parsed.token}`,
"Content-Type": "application/json",
},
};
}
} catch {
// Fall back to API key mode.
}
}
return {
headers: {
"x-goog-api-key": apiKey,
"Content-Type": "application/json",
},
};
}
export const GOOGLE_GEMINI_DEFAULT_MODEL = "google/gemini-3.1-pro-preview";
export function applyGoogleGeminiModelDefault(cfg: OpenClawConfig): {
next: OpenClawConfig;
changed: boolean;
} {
const current = cfg.agents?.defaults?.model as unknown;
const currentPrimary =
typeof current === "string"
? current.trim() || undefined
: current &&
typeof current === "object" &&
typeof (current as { primary?: unknown }).primary === "string"
? ((current as { primary: string }).primary || "").trim() || undefined
: undefined;
if (currentPrimary === GOOGLE_GEMINI_DEFAULT_MODEL) {
return { next: cfg, changed: false };
}
return {
next: applyAgentDefaultModelPrimary(cfg, GOOGLE_GEMINI_DEFAULT_MODEL),
changed: true,
};
}

View File

@@ -1,16 +1,16 @@
import type { ImageGenerationProvider } from "openclaw/plugin-sdk/image-generation";
import { resolveApiKeyForProvider } from "openclaw/plugin-sdk/image-generation-core";
import {
DEFAULT_GOOGLE_API_BASE_URL,
normalizeGoogleApiBaseUrl,
normalizeGoogleModelId,
parseGeminiAuth,
} from "openclaw/plugin-sdk/provider-google";
import {
assertOkOrThrowHttpError,
normalizeBaseUrl,
postJsonRequest,
} from "openclaw/plugin-sdk/provider-http";
import {
DEFAULT_GOOGLE_API_BASE_URL,
normalizeGoogleApiBaseUrl,
normalizeGoogleModelId,
parseGeminiAuth,
} from "./api.js";
const DEFAULT_GOOGLE_IMAGE_MODEL = "gemini-3.1-flash-image-preview";
const DEFAULT_OUTPUT_MIME = "image/png";

View File

@@ -7,12 +7,12 @@ import {
type ProviderFetchUsageSnapshotContext,
} from "openclaw/plugin-sdk/plugin-entry";
import { createProviderApiKeyAuthMethod } from "openclaw/plugin-sdk/provider-auth-api-key";
import type { ProviderPlugin } from "openclaw/plugin-sdk/provider-models";
import {
GOOGLE_GEMINI_DEFAULT_MODEL,
applyGoogleGeminiModelDefault,
createGoogleThinkingPayloadWrapper,
} from "openclaw/plugin-sdk/provider-google";
import type { ProviderPlugin } from "openclaw/plugin-sdk/provider-models";
} from "./api.js";
import { buildGoogleGeminiCliBackend } from "./cli-backend.js";
import { isModernGoogleModel, resolveGoogle31ForwardCompatModel } from "./provider-models.js";
import { createGeminiWebSearchProvider } from "./src/gemini-web-search-provider.js";

View File

@@ -3,4 +3,4 @@ export {
normalizeGoogleApiBaseUrl,
normalizeGoogleModelId,
parseGeminiAuth,
} from "openclaw/plugin-sdk/provider-google";
} from "./api.js";

View File

@@ -1,5 +1,4 @@
import { Type } from "@sinclair/typebox";
import { DEFAULT_GOOGLE_API_BASE_URL } from "openclaw/plugin-sdk/provider-google";
import {
buildSearchCacheKey,
buildUnsupportedSearchFilterResponse,
@@ -26,6 +25,7 @@ import {
wrapWebContent,
writeCachedSearchPayload,
} from "openclaw/plugin-sdk/provider-web-search";
import { DEFAULT_GOOGLE_API_BASE_URL } from "../api.js";
const DEFAULT_GEMINI_MODEL = "gemini-2.5-flash";
const GEMINI_API_BASE = DEFAULT_GOOGLE_API_BASE_URL;

View File

@@ -1,2 +1,10 @@
export {
buildHuggingfaceModelDefinition,
discoverHuggingfaceModels,
HUGGINGFACE_BASE_URL,
HUGGINGFACE_MODEL_CATALOG,
HUGGINGFACE_POLICY_SUFFIXES,
isHuggingfacePolicyLocked,
} from "./models.js";
export { buildHuggingfaceProvider } from "./provider-catalog.js";
export { applyHuggingfaceConfig, HUGGINGFACE_DEFAULT_MODEL_REF } from "./onboard.js";

View File

@@ -0,0 +1,199 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
export const HUGGINGFACE_BASE_URL = "https://router.huggingface.co/v1";
export const HUGGINGFACE_POLICY_SUFFIXES = ["cheapest", "fastest"] as const;
const HUGGINGFACE_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
const HUGGINGFACE_DEFAULT_CONTEXT_WINDOW = 131072;
const HUGGINGFACE_DEFAULT_MAX_TOKENS = 8192;
type HFModelEntry = {
id: string;
owned_by?: string;
name?: string;
title?: string;
display_name?: string;
architecture?: {
input_modalities?: string[];
};
providers?: Array<{
context_length?: number;
}>;
};
type OpenAIListModelsResponse = {
data?: HFModelEntry[];
};
export const HUGGINGFACE_MODEL_CATALOG: ModelDefinitionConfig[] = [
{
id: "deepseek-ai/DeepSeek-R1",
name: "DeepSeek R1",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: { input: 3.0, output: 7.0, cacheRead: 3.0, cacheWrite: 3.0 },
},
{
id: "deepseek-ai/DeepSeek-V3.1",
name: "DeepSeek V3.1",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: { input: 0.6, output: 1.25, cacheRead: 0.6, cacheWrite: 0.6 },
},
{
id: "meta-llama/Llama-3.3-70B-Instruct-Turbo",
name: "Llama 3.3 70B Instruct Turbo",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: { input: 0.88, output: 0.88, cacheRead: 0.88, cacheWrite: 0.88 },
},
{
id: "openai/gpt-oss-120b",
name: "GPT-OSS 120B",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
},
];
export function isHuggingfacePolicyLocked(modelRef: string): boolean {
const ref = String(modelRef).trim();
return HUGGINGFACE_POLICY_SUFFIXES.some((suffix) => ref.endsWith(`:${suffix}`) || ref === suffix);
}
export function buildHuggingfaceModelDefinition(
model: (typeof HUGGINGFACE_MODEL_CATALOG)[number],
): ModelDefinitionConfig {
return {
id: model.id,
name: model.name,
reasoning: model.reasoning,
input: model.input,
cost: model.cost,
contextWindow: model.contextWindow,
maxTokens: model.maxTokens,
};
}
function isReasoningModelHeuristic(modelId: string): boolean {
const lower = modelId.toLowerCase();
return (
lower.includes("r1") ||
lower.includes("reason") ||
lower.includes("thinking") ||
lower.includes("reasoner") ||
lower.includes("grok") ||
lower.includes("qwq")
);
}
function inferredMetaFromModelId(id: string): { name: string; reasoning: boolean } {
const base = id.split("/").pop() ?? id;
const reasoning = isReasoningModelHeuristic(id);
const name = base.replace(/-/g, " ").replace(/\b(\w)/g, (c) => c.toUpperCase());
return { name, reasoning };
}
function displayNameFromApiEntry(entry: HFModelEntry, inferredName: string): string {
const fromApi =
(typeof entry.name === "string" && entry.name.trim()) ||
(typeof entry.title === "string" && entry.title.trim()) ||
(typeof entry.display_name === "string" && entry.display_name.trim());
if (fromApi) {
return fromApi;
}
if (typeof entry.owned_by === "string" && entry.owned_by.trim()) {
const base = entry.id.split("/").pop() ?? entry.id;
return `${entry.owned_by.trim()}/${base}`;
}
return inferredName;
}
export async function discoverHuggingfaceModels(apiKey: string): Promise<ModelDefinitionConfig[]> {
if (process.env.VITEST === "true" || process.env.NODE_ENV === "test") {
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
const trimmedKey = apiKey?.trim();
if (!trimmedKey) {
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
try {
const response = await fetch(`${HUGGINGFACE_BASE_URL}/models`, {
signal: AbortSignal.timeout(10_000),
headers: {
Authorization: `Bearer ${trimmedKey}`,
"Content-Type": "application/json",
},
});
if (!response.ok) {
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
const body = (await response.json()) as OpenAIListModelsResponse;
const data = body?.data;
if (!Array.isArray(data) || data.length === 0) {
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
const catalogById = new Map(
HUGGINGFACE_MODEL_CATALOG.map((model) => [model.id, model] as const),
);
const seen = new Set<string>();
const models: ModelDefinitionConfig[] = [];
for (const entry of data) {
const id = typeof entry?.id === "string" ? entry.id.trim() : "";
if (!id || seen.has(id)) {
continue;
}
seen.add(id);
const catalogEntry = catalogById.get(id);
if (catalogEntry) {
models.push(buildHuggingfaceModelDefinition(catalogEntry));
continue;
}
const inferred = inferredMetaFromModelId(id);
const name = displayNameFromApiEntry(entry, inferred.name);
const modalities = entry.architecture?.input_modalities;
const input: Array<"text" | "image"> =
Array.isArray(modalities) && modalities.includes("image") ? ["text", "image"] : ["text"];
const providers = Array.isArray(entry.providers) ? entry.providers : [];
const providerWithContext = providers.find(
(provider) => typeof provider?.context_length === "number" && provider.context_length > 0,
);
models.push({
id,
name,
reasoning: inferred.reasoning,
input,
cost: HUGGINGFACE_DEFAULT_COST,
contextWindow: providerWithContext?.context_length ?? HUGGINGFACE_DEFAULT_CONTEXT_WINDOW,
maxTokens: HUGGINGFACE_DEFAULT_MAX_TOKENS,
});
}
return models.length > 0
? models
: HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
} catch {
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
}

View File

@@ -1,12 +1,12 @@
import {
buildHuggingfaceModelDefinition,
HUGGINGFACE_BASE_URL,
HUGGINGFACE_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
import {
createModelCatalogPresetAppliers,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
import {
buildHuggingfaceModelDefinition,
HUGGINGFACE_BASE_URL,
HUGGINGFACE_MODEL_CATALOG,
} from "./models.js";
export const HUGGINGFACE_DEFAULT_MODEL_REF = "huggingface/deepseek-ai/DeepSeek-R1";

View File

@@ -1,10 +1,17 @@
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import {
buildHuggingfaceModelDefinition,
discoverHuggingfaceModels,
type ModelProviderConfig,
HUGGINGFACE_BASE_URL,
HUGGINGFACE_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
} from "./models.js";
export {
buildHuggingfaceModelDefinition,
discoverHuggingfaceModels,
HUGGINGFACE_BASE_URL,
HUGGINGFACE_MODEL_CATALOG,
} from "./models.js";
export async function buildHuggingfaceProvider(
discoveryApiKey?: string,

View File

@@ -1 +1,14 @@
export { buildKilocodeProvider, buildKilocodeProviderWithDiscovery } from "./provider-catalog.js";
export {
buildKilocodeModelDefinition,
KILOCODE_BASE_URL,
KILOCODE_DEFAULT_CONTEXT_WINDOW,
KILOCODE_DEFAULT_COST,
KILOCODE_DEFAULT_MAX_TOKENS,
KILOCODE_DEFAULT_MODEL_ID,
KILOCODE_DEFAULT_MODEL_NAME,
KILOCODE_DEFAULT_MODEL_REF,
KILOCODE_MODELS_URL,
KILOCODE_MODEL_CATALOG,
discoverKilocodeModels,
} from "./provider-models.js";

View File

@@ -1,9 +1,9 @@
import { KILOCODE_BASE_URL, KILOCODE_DEFAULT_MODEL_REF } from "openclaw/plugin-sdk/provider-models";
import {
createModelCatalogPresetAppliers,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
import { buildKilocodeProvider } from "./provider-catalog.js";
import { KILOCODE_BASE_URL, KILOCODE_DEFAULT_MODEL_REF } from "./provider-models.js";
export { KILOCODE_BASE_URL, KILOCODE_DEFAULT_MODEL_REF };

View File

@@ -1,25 +1,25 @@
import { type ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import {
discoverKilocodeModels,
type ModelProviderConfig,
KILOCODE_BASE_URL,
KILOCODE_DEFAULT_CONTEXT_WINDOW,
KILOCODE_DEFAULT_COST,
KILOCODE_DEFAULT_MAX_TOKENS,
KILOCODE_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
KILOCODE_BASE_URL as LOCAL_KILOCODE_BASE_URL,
KILOCODE_DEFAULT_CONTEXT_WINDOW as LOCAL_KILOCODE_DEFAULT_CONTEXT_WINDOW,
KILOCODE_DEFAULT_COST as LOCAL_KILOCODE_DEFAULT_COST,
KILOCODE_DEFAULT_MAX_TOKENS as LOCAL_KILOCODE_DEFAULT_MAX_TOKENS,
KILOCODE_MODEL_CATALOG as LOCAL_KILOCODE_MODEL_CATALOG,
} from "./provider-models.js";
export function buildKilocodeProvider(): ModelProviderConfig {
return {
baseUrl: KILOCODE_BASE_URL,
baseUrl: LOCAL_KILOCODE_BASE_URL,
api: "openai-completions",
models: KILOCODE_MODEL_CATALOG.map((model) => ({
models: LOCAL_KILOCODE_MODEL_CATALOG.map((model) => ({
id: model.id,
name: model.name,
reasoning: model.reasoning,
input: model.input,
cost: KILOCODE_DEFAULT_COST,
contextWindow: model.contextWindow ?? KILOCODE_DEFAULT_CONTEXT_WINDOW,
maxTokens: model.maxTokens ?? KILOCODE_DEFAULT_MAX_TOKENS,
cost: LOCAL_KILOCODE_DEFAULT_COST,
contextWindow: model.contextWindow ?? LOCAL_KILOCODE_DEFAULT_CONTEXT_WINDOW,
maxTokens: model.maxTokens ?? LOCAL_KILOCODE_DEFAULT_MAX_TOKENS,
})),
};
}
@@ -27,7 +27,7 @@ export function buildKilocodeProvider(): ModelProviderConfig {
export async function buildKilocodeProviderWithDiscovery(): Promise<ModelProviderConfig> {
const models = await discoverKilocodeModels();
return {
baseUrl: KILOCODE_BASE_URL,
baseUrl: LOCAL_KILOCODE_BASE_URL,
api: "openai-completions",
models,
};

View File

@@ -0,0 +1,186 @@
import type { KilocodeModelCatalogEntry } from "openclaw/plugin-sdk/provider-models";
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
import { createSubsystemLogger } from "openclaw/plugin-sdk/runtime-env";
const log = createSubsystemLogger("kilocode-models");
export const KILOCODE_BASE_URL = "https://api.kilo.ai/api/gateway/";
export const KILOCODE_DEFAULT_MODEL_ID = "kilo/auto";
export const KILOCODE_DEFAULT_MODEL_REF = `kilocode/${KILOCODE_DEFAULT_MODEL_ID}`;
export const KILOCODE_DEFAULT_MODEL_NAME = "Kilo Auto";
export const KILOCODE_MODEL_CATALOG: KilocodeModelCatalogEntry[] = [
{
id: KILOCODE_DEFAULT_MODEL_ID,
name: KILOCODE_DEFAULT_MODEL_NAME,
input: ["text", "image"],
reasoning: true,
},
];
export const KILOCODE_DEFAULT_CONTEXT_WINDOW = 1000000;
export const KILOCODE_DEFAULT_MAX_TOKENS = 128000;
export const KILOCODE_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
export const KILOCODE_MODELS_URL = `${KILOCODE_BASE_URL}models`;
const DISCOVERY_TIMEOUT_MS = 5000;
interface GatewayModelPricing {
prompt: string;
completion: string;
image?: string;
request?: string;
input_cache_read?: string;
input_cache_write?: string;
web_search?: string;
internal_reasoning?: string;
}
interface GatewayModelEntry {
id: string;
name: string;
context_length: number;
architecture?: {
input_modalities?: string[];
output_modalities?: string[];
};
top_provider?: {
max_completion_tokens?: number | null;
};
pricing: GatewayModelPricing;
supported_parameters?: string[];
}
interface GatewayModelsResponse {
data: GatewayModelEntry[];
}
function toPricePerMillion(perToken: string | undefined): number {
if (!perToken) {
return 0;
}
const num = Number(perToken);
if (!Number.isFinite(num) || num < 0) {
return 0;
}
return num * 1_000_000;
}
function parseModality(entry: GatewayModelEntry): Array<"text" | "image"> {
const modalities = entry.architecture?.input_modalities;
if (!Array.isArray(modalities)) {
return ["text"];
}
const hasImage = modalities.some((m) => typeof m === "string" && m.toLowerCase() === "image");
return hasImage ? ["text", "image"] : ["text"];
}
function parseReasoning(entry: GatewayModelEntry): boolean {
const params = entry.supported_parameters;
if (!Array.isArray(params)) {
return false;
}
return params.includes("reasoning") || params.includes("include_reasoning");
}
function toModelDefinition(entry: GatewayModelEntry): ModelDefinitionConfig {
return {
id: entry.id,
name: entry.name || entry.id,
reasoning: parseReasoning(entry),
input: parseModality(entry),
cost: {
input: toPricePerMillion(entry.pricing.prompt),
output: toPricePerMillion(entry.pricing.completion),
cacheRead: toPricePerMillion(entry.pricing.input_cache_read),
cacheWrite: toPricePerMillion(entry.pricing.input_cache_write),
},
contextWindow: entry.context_length || KILOCODE_DEFAULT_CONTEXT_WINDOW,
maxTokens: entry.top_provider?.max_completion_tokens ?? KILOCODE_DEFAULT_MAX_TOKENS,
};
}
function buildStaticCatalog(): ModelDefinitionConfig[] {
return KILOCODE_MODEL_CATALOG.map((model) => ({
id: model.id,
name: model.name,
reasoning: model.reasoning,
input: model.input,
cost: KILOCODE_DEFAULT_COST,
contextWindow: model.contextWindow ?? KILOCODE_DEFAULT_CONTEXT_WINDOW,
maxTokens: model.maxTokens ?? KILOCODE_DEFAULT_MAX_TOKENS,
}));
}
export async function discoverKilocodeModels(): Promise<ModelDefinitionConfig[]> {
if (process.env.NODE_ENV === "test" || process.env.VITEST) {
return buildStaticCatalog();
}
try {
const response = await fetch(KILOCODE_MODELS_URL, {
headers: { Accept: "application/json" },
signal: AbortSignal.timeout(DISCOVERY_TIMEOUT_MS),
});
if (!response.ok) {
log.warn(`Failed to discover models: HTTP ${response.status}, using static catalog`);
return buildStaticCatalog();
}
const data = (await response.json()) as GatewayModelsResponse;
if (!Array.isArray(data.data) || data.data.length === 0) {
log.warn("No models found from gateway API, using static catalog");
return buildStaticCatalog();
}
const models: ModelDefinitionConfig[] = [];
const discoveredIds = new Set<string>();
for (const entry of data.data) {
if (!entry || typeof entry !== "object") {
continue;
}
const id = typeof entry.id === "string" ? entry.id.trim() : "";
if (!id || discoveredIds.has(id)) {
continue;
}
try {
models.push(toModelDefinition(entry));
discoveredIds.add(id);
} catch (e) {
log.warn(`Skipping malformed model entry "${id}": ${String(e)}`);
}
}
const staticModels = buildStaticCatalog();
for (const staticModel of staticModels) {
if (!discoveredIds.has(staticModel.id)) {
models.unshift(staticModel);
}
}
return models.length > 0 ? models : buildStaticCatalog();
} catch (error) {
log.warn(`Discovery failed: ${String(error)}, using static catalog`);
return buildStaticCatalog();
}
}
export function buildKilocodeModelDefinition(): ModelDefinitionConfig {
return {
id: KILOCODE_DEFAULT_MODEL_ID,
name: KILOCODE_DEFAULT_MODEL_NAME,
reasoning: true,
input: ["text", "image"],
cost: KILOCODE_DEFAULT_COST,
contextWindow: KILOCODE_DEFAULT_CONTEXT_WINDOW,
maxTokens: KILOCODE_DEFAULT_MAX_TOKENS,
};
}

View File

@@ -7,6 +7,6 @@ export {
KILOCODE_DEFAULT_MODEL_NAME,
KILOCODE_DEFAULT_MODEL_REF,
KILOCODE_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
} from "./provider-models.js";
export type { KilocodeModelCatalogEntry } from "openclaw/plugin-sdk/provider-models";

View File

@@ -143,6 +143,18 @@ function collectRuntimeApiOverlapExports(params: {
statement.moduleSpecifier && ts.isStringLiteral(statement.moduleSpecifier)
? statement.moduleSpecifier.text
: undefined;
if (
moduleSpecifier === "../../extensions/line/runtime-api.js" &&
statement.exportClause &&
ts.isNamedExports(statement.exportClause)
) {
for (const element of statement.exportClause.elements) {
if (!element.isTypeOnly) {
overlapExports.add(element.name.text);
}
}
continue;
}
const normalized = moduleSpecifier ? normalizeModuleSpecifier(moduleSpecifier) : null;
if (!normalized || !runtimeApiLocalModules.has(normalized)) {
continue;

View File

@@ -11,3 +11,11 @@ export {
MINIMAX_HOSTED_MODEL_REF,
MINIMAX_LM_STUDIO_COST,
} from "./model-definitions.js";
export {
isMiniMaxModernModelId,
MINIMAX_DEFAULT_MODEL_ID,
MINIMAX_DEFAULT_MODEL_REF,
MINIMAX_TEXT_MODEL_CATALOG,
MINIMAX_TEXT_MODEL_ORDER,
MINIMAX_TEXT_MODEL_REFS,
} from "./provider-models.js";

View File

@@ -11,11 +11,8 @@ import {
listProfilesForProvider,
} from "openclaw/plugin-sdk/provider-auth";
import { buildOauthProviderAuthResult } from "openclaw/plugin-sdk/provider-auth";
import {
isMiniMaxModernModelId,
MINIMAX_DEFAULT_MODEL_ID,
} from "openclaw/plugin-sdk/provider-models";
import { fetchMinimaxUsage } from "openclaw/plugin-sdk/provider-usage";
import { isMiniMaxModernModelId, MINIMAX_DEFAULT_MODEL_ID } from "./api.js";
import {
buildMinimaxImageGenerationProvider,
buildMinimaxPortalImageGenerationProvider,

View File

@@ -1,8 +1,5 @@
import {
MINIMAX_DEFAULT_MODEL_ID,
MINIMAX_TEXT_MODEL_CATALOG,
type ModelDefinitionConfig,
} from "openclaw/plugin-sdk/provider-models";
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
import { MINIMAX_DEFAULT_MODEL_ID, MINIMAX_TEXT_MODEL_CATALOG } from "./provider-models.js";
export const DEFAULT_MINIMAX_BASE_URL = "https://api.minimax.io/v1";
export const MINIMAX_API_BASE_URL = "https://api.minimax.io/anthropic";

View File

@@ -6,7 +6,7 @@ import {
MINIMAX_DEFAULT_MODEL_ID,
MINIMAX_TEXT_MODEL_CATALOG,
MINIMAX_TEXT_MODEL_ORDER,
} from "openclaw/plugin-sdk/provider-models";
} from "./provider-models.js";
const MINIMAX_PORTAL_BASE_URL = "https://api.minimax.io/anthropic";
const MINIMAX_DEFAULT_CONTEXT_WINDOW = 204800;

View File

@@ -0,0 +1,21 @@
import { matchesExactOrPrefix } from "openclaw/plugin-sdk/provider-models";
export const MINIMAX_DEFAULT_MODEL_ID = "MiniMax-M2.7";
export const MINIMAX_DEFAULT_MODEL_REF = `minimax/${MINIMAX_DEFAULT_MODEL_ID}`;
export const MINIMAX_TEXT_MODEL_ORDER = ["MiniMax-M2.7", "MiniMax-M2.7-highspeed"] as const;
export const MINIMAX_TEXT_MODEL_CATALOG = {
"MiniMax-M2.7": { name: "MiniMax M2.7", reasoning: true },
"MiniMax-M2.7-highspeed": { name: "MiniMax M2.7 Highspeed", reasoning: true },
} as const;
export const MINIMAX_TEXT_MODEL_REFS = MINIMAX_TEXT_MODEL_ORDER.map(
(modelId) => `minimax/${modelId}`,
);
const MINIMAX_MODERN_MODEL_MATCHERS = ["minimax-m2.7"] as const;
export function isMiniMaxModernModelId(modelId: string): boolean {
return matchesExactOrPrefix(modelId, MINIMAX_MODERN_MODEL_MATCHERS);
}

View File

@@ -1,5 +1,14 @@
export {
buildModelStudioDefaultModelDefinition,
buildModelStudioModelDefinition,
MODELSTUDIO_BASE_URL,
MODELSTUDIO_CN_BASE_URL,
MODELSTUDIO_DEFAULT_COST,
MODELSTUDIO_DEFAULT_MODEL_ID,
buildModelStudioProvider,
} from "./provider-catalog.js";
MODELSTUDIO_DEFAULT_MODEL_REF,
MODELSTUDIO_GLOBAL_BASE_URL,
MODELSTUDIO_STANDARD_CN_BASE_URL,
MODELSTUDIO_STANDARD_GLOBAL_BASE_URL,
MODELSTUDIO_MODEL_CATALOG,
} from "./models.js";
export { buildModelStudioProvider } from "./provider-catalog.js";

View File

@@ -1,43 +1,11 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
import {
MODELSTUDIO_BASE_URL,
MODELSTUDIO_DEFAULT_COST as MODELSTUDIO_PROVIDER_DEFAULT_COST,
MODELSTUDIO_DEFAULT_MODEL_ID as MODELSTUDIO_PROVIDER_DEFAULT_MODEL_ID,
MODELSTUDIO_MODEL_CATALOG,
} from "./provider-catalog.js";
export const MODELSTUDIO_GLOBAL_BASE_URL = MODELSTUDIO_BASE_URL;
export const MODELSTUDIO_CN_BASE_URL = "https://coding.dashscope.aliyuncs.com/v1";
export const MODELSTUDIO_DEFAULT_COST = MODELSTUDIO_PROVIDER_DEFAULT_COST;
export const MODELSTUDIO_DEFAULT_MODEL_ID = MODELSTUDIO_PROVIDER_DEFAULT_MODEL_ID;
export const MODELSTUDIO_DEFAULT_MODEL_REF = `modelstudio/${MODELSTUDIO_DEFAULT_MODEL_ID}`;
export const MODELSTUDIO_STANDARD_CN_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1";
export const MODELSTUDIO_STANDARD_GLOBAL_BASE_URL =
"https://dashscope-intl.aliyuncs.com/compatible-mode/v1";
export function buildModelStudioModelDefinition(params: {
id: string;
name?: string;
reasoning?: boolean;
input?: string[];
cost?: ModelDefinitionConfig["cost"];
contextWindow?: number;
maxTokens?: number;
}): ModelDefinitionConfig {
const catalog = MODELSTUDIO_MODEL_CATALOG.find((model) => model.id === params.id);
return {
id: params.id,
name: params.name ?? catalog?.name ?? params.id,
reasoning: params.reasoning ?? catalog?.reasoning ?? false,
input:
(params.input as ("text" | "image")[]) ?? (catalog?.input ? [...catalog.input] : ["text"]),
cost: params.cost ?? catalog?.cost ?? MODELSTUDIO_DEFAULT_COST,
contextWindow: params.contextWindow ?? catalog?.contextWindow ?? 262_144,
maxTokens: params.maxTokens ?? catalog?.maxTokens ?? 65_536,
};
}
export function buildModelStudioDefaultModelDefinition(): ModelDefinitionConfig {
return buildModelStudioModelDefinition({ id: MODELSTUDIO_DEFAULT_MODEL_ID });
}
export {
buildModelStudioDefaultModelDefinition,
buildModelStudioModelDefinition,
MODELSTUDIO_CN_BASE_URL,
MODELSTUDIO_DEFAULT_COST,
MODELSTUDIO_DEFAULT_MODEL_ID,
MODELSTUDIO_DEFAULT_MODEL_REF,
MODELSTUDIO_GLOBAL_BASE_URL,
MODELSTUDIO_STANDARD_CN_BASE_URL,
MODELSTUDIO_STANDARD_GLOBAL_BASE_URL,
} from "./models.js";

View File

@@ -0,0 +1,118 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
export const MODELSTUDIO_BASE_URL = "https://coding-intl.dashscope.aliyuncs.com/v1";
export const MODELSTUDIO_GLOBAL_BASE_URL = MODELSTUDIO_BASE_URL;
export const MODELSTUDIO_CN_BASE_URL = "https://coding.dashscope.aliyuncs.com/v1";
export const MODELSTUDIO_STANDARD_CN_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1";
export const MODELSTUDIO_STANDARD_GLOBAL_BASE_URL =
"https://dashscope-intl.aliyuncs.com/compatible-mode/v1";
export const MODELSTUDIO_DEFAULT_MODEL_ID = "qwen3.5-plus";
export const MODELSTUDIO_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
export const MODELSTUDIO_DEFAULT_MODEL_REF = `modelstudio/${MODELSTUDIO_DEFAULT_MODEL_ID}`;
export const MODELSTUDIO_MODEL_CATALOG: ReadonlyArray<ModelDefinitionConfig> = [
{
id: "qwen3.5-plus",
name: "qwen3.5-plus",
reasoning: false,
input: ["text", "image"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 1_000_000,
maxTokens: 65_536,
},
{
id: "qwen3-max-2026-01-23",
name: "qwen3-max-2026-01-23",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 262_144,
maxTokens: 65_536,
},
{
id: "qwen3-coder-next",
name: "qwen3-coder-next",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 262_144,
maxTokens: 65_536,
},
{
id: "qwen3-coder-plus",
name: "qwen3-coder-plus",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 1_000_000,
maxTokens: 65_536,
},
{
id: "MiniMax-M2.5",
name: "MiniMax-M2.5",
reasoning: true,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 1_000_000,
maxTokens: 65_536,
},
{
id: "glm-5",
name: "glm-5",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 202_752,
maxTokens: 16_384,
},
{
id: "glm-4.7",
name: "glm-4.7",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 202_752,
maxTokens: 16_384,
},
{
id: "kimi-k2.5",
name: "kimi-k2.5",
reasoning: false,
input: ["text", "image"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 262_144,
maxTokens: 32_768,
},
];
export function buildModelStudioModelDefinition(params: {
id: string;
name?: string;
reasoning?: boolean;
input?: string[];
cost?: ModelDefinitionConfig["cost"];
contextWindow?: number;
maxTokens?: number;
}): ModelDefinitionConfig {
const catalog = MODELSTUDIO_MODEL_CATALOG.find((model) => model.id === params.id);
return {
id: params.id,
name: params.name ?? catalog?.name ?? params.id,
reasoning: params.reasoning ?? catalog?.reasoning ?? false,
input:
(params.input as ("text" | "image")[]) ?? (catalog?.input ? [...catalog.input] : ["text"]),
cost: params.cost ?? catalog?.cost ?? MODELSTUDIO_DEFAULT_COST,
contextWindow: params.contextWindow ?? catalog?.contextWindow ?? 262_144,
maxTokens: params.maxTokens ?? catalog?.maxTokens ?? 65_536,
};
}
export function buildModelStudioDefaultModelDefinition(): ModelDefinitionConfig {
return buildModelStudioModelDefinition({ id: MODELSTUDIO_DEFAULT_MODEL_ID });
}

View File

@@ -1,91 +1,5 @@
import type {
ModelDefinitionConfig,
ModelProviderConfig,
} from "openclaw/plugin-sdk/provider-models";
export const MODELSTUDIO_BASE_URL = "https://coding-intl.dashscope.aliyuncs.com/v1";
export const MODELSTUDIO_DEFAULT_MODEL_ID = "qwen3.5-plus";
export const MODELSTUDIO_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
export const MODELSTUDIO_MODEL_CATALOG: ReadonlyArray<ModelDefinitionConfig> = [
{
id: "qwen3.5-plus",
name: "qwen3.5-plus",
reasoning: false,
input: ["text", "image"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 1_000_000,
maxTokens: 65_536,
},
{
id: "qwen3-max-2026-01-23",
name: "qwen3-max-2026-01-23",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 262_144,
maxTokens: 65_536,
},
{
id: "qwen3-coder-next",
name: "qwen3-coder-next",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 262_144,
maxTokens: 65_536,
},
{
id: "qwen3-coder-plus",
name: "qwen3-coder-plus",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 1_000_000,
maxTokens: 65_536,
},
{
id: "MiniMax-M2.5",
name: "MiniMax-M2.5",
reasoning: true,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 1_000_000,
maxTokens: 65_536,
},
{
id: "glm-5",
name: "glm-5",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 202_752,
maxTokens: 16_384,
},
{
id: "glm-4.7",
name: "glm-4.7",
reasoning: false,
input: ["text"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 202_752,
maxTokens: 16_384,
},
{
id: "kimi-k2.5",
name: "kimi-k2.5",
reasoning: false,
input: ["text", "image"],
cost: MODELSTUDIO_DEFAULT_COST,
contextWindow: 262_144,
maxTokens: 32_768,
},
];
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import { MODELSTUDIO_BASE_URL, MODELSTUDIO_MODEL_CATALOG } from "./models.js";
export function buildModelStudioProvider(): ModelProviderConfig {
return {

View File

@@ -1 +1,13 @@
export {
applyOpenAIConfig,
applyOpenAIProviderConfig,
OPENAI_CODEX_DEFAULT_MODEL,
OPENAI_DEFAULT_AUDIO_TRANSCRIPTION_MODEL,
OPENAI_DEFAULT_EMBEDDING_MODEL,
OPENAI_DEFAULT_IMAGE_MODEL,
OPENAI_DEFAULT_MODEL,
OPENAI_DEFAULT_TTS_MODEL,
OPENAI_DEFAULT_TTS_VOICE,
} from "./default-models.js";
export { buildOpenAICodexProvider } from "./openai-codex-catalog.js";
export { buildOpenAIProvider } from "./openai-provider.js";

View File

@@ -0,0 +1,40 @@
import { ensureModelAllowlistEntry } from "openclaw/plugin-sdk/provider-onboard";
import {
applyAgentDefaultModelPrimary,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.4";
export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.4";
export const OPENAI_DEFAULT_IMAGE_MODEL = "gpt-image-1";
export const OPENAI_DEFAULT_TTS_MODEL = "gpt-4o-mini-tts";
export const OPENAI_DEFAULT_TTS_VOICE = "alloy";
export const OPENAI_DEFAULT_AUDIO_TRANSCRIPTION_MODEL = "gpt-4o-mini-transcribe";
export const OPENAI_DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small";
export function applyOpenAIProviderConfig(cfg: OpenClawConfig): OpenClawConfig {
const next = ensureModelAllowlistEntry({
cfg,
modelRef: OPENAI_DEFAULT_MODEL,
});
const models = { ...next.agents?.defaults?.models };
models[OPENAI_DEFAULT_MODEL] = {
...models[OPENAI_DEFAULT_MODEL],
alias: models[OPENAI_DEFAULT_MODEL]?.alias ?? "GPT",
};
return {
...next,
agents: {
...next.agents,
defaults: {
...next.agents?.defaults,
models,
},
},
};
}
export function applyOpenAIConfig(cfg: OpenClawConfig): OpenClawConfig {
return applyAgentDefaultModelPrimary(applyOpenAIProviderConfig(cfg), OPENAI_DEFAULT_MODEL);
}

View File

@@ -1,6 +1,6 @@
import type { ImageGenerationProvider } from "openclaw/plugin-sdk/image-generation";
import { resolveApiKeyForProvider } from "openclaw/plugin-sdk/provider-auth";
import { OPENAI_DEFAULT_IMAGE_MODEL as DEFAULT_OPENAI_IMAGE_MODEL } from "openclaw/plugin-sdk/provider-models";
import { OPENAI_DEFAULT_IMAGE_MODEL as DEFAULT_OPENAI_IMAGE_MODEL } from "./default-models.js";
const DEFAULT_OPENAI_IMAGE_BASE_URL = "https://api.openai.com/v1";
const DEFAULT_OUTPUT_MIME = "image/png";

View File

@@ -5,7 +5,7 @@ import {
type AudioTranscriptionRequest,
type MediaUnderstandingProvider,
} from "openclaw/plugin-sdk/media-understanding";
import { OPENAI_DEFAULT_AUDIO_TRANSCRIPTION_MODEL } from "openclaw/plugin-sdk/provider-models";
import { OPENAI_DEFAULT_AUDIO_TRANSCRIPTION_MODEL } from "./default-models.js";
export const DEFAULT_OPENAI_AUDIO_BASE_URL = "https://api.openai.com/v1";

View File

@@ -15,11 +15,11 @@ import {
DEFAULT_CONTEXT_TOKENS,
normalizeModelCompat,
normalizeProviderId,
OPENAI_CODEX_DEFAULT_MODEL,
type ProviderPlugin,
} from "openclaw/plugin-sdk/provider-models";
import { createOpenAIAttributionHeadersWrapper } from "openclaw/plugin-sdk/provider-stream";
import { fetchCodexUsage } from "openclaw/plugin-sdk/provider-usage";
import { OPENAI_CODEX_DEFAULT_MODEL } from "./default-models.js";
import { resolveCodexAuthIdentity } from "./openai-codex-auth-identity.js";
import { buildOpenAICodexProvider } from "./openai-codex-catalog.js";
import {

View File

@@ -4,17 +4,16 @@ import {
} from "openclaw/plugin-sdk/plugin-entry";
import { createProviderApiKeyAuthMethod } from "openclaw/plugin-sdk/provider-auth";
import {
applyOpenAIConfig,
DEFAULT_CONTEXT_TOKENS,
normalizeModelCompat,
normalizeProviderId,
OPENAI_DEFAULT_MODEL,
type ProviderPlugin,
} from "openclaw/plugin-sdk/provider-models";
import {
createOpenAIAttributionHeadersWrapper,
createOpenAIDefaultTransportWrapper,
} from "openclaw/plugin-sdk/provider-stream";
import { applyOpenAIConfig, OPENAI_DEFAULT_MODEL } from "./default-models.js";
import {
cloneFirstTemplateModel,
findCatalogTemplate,

View File

@@ -0,0 +1,52 @@
import { OPENCODE_GO_DEFAULT_MODEL_REF } from "./onboard.js";
export {
applyOpencodeGoConfig,
applyOpencodeGoProviderConfig,
OPENCODE_GO_DEFAULT_MODEL_REF,
} from "./onboard.js";
function resolveCurrentPrimaryModel(model: unknown): string | undefined {
if (typeof model === "string") {
return model.trim() || undefined;
}
if (
model &&
typeof model === "object" &&
typeof (model as { primary?: unknown }).primary === "string"
) {
return ((model as { primary: string }).primary || "").trim() || undefined;
}
return undefined;
}
export function applyOpencodeGoModelDefault(
cfg: import("openclaw/plugin-sdk/provider-onboard").OpenClawConfig,
): {
next: import("openclaw/plugin-sdk/provider-onboard").OpenClawConfig;
changed: boolean;
} {
const current = resolveCurrentPrimaryModel(cfg.agents?.defaults?.model);
if (current === OPENCODE_GO_DEFAULT_MODEL_REF) {
return { next: cfg, changed: false };
}
return {
next: {
...cfg,
agents: {
...cfg.agents,
defaults: {
...cfg.agents?.defaults,
model:
cfg.agents?.defaults?.model && typeof cfg.agents.defaults.model === "object"
? {
...cfg.agents.defaults.model,
primary: OPENCODE_GO_DEFAULT_MODEL_REF,
}
: { primary: OPENCODE_GO_DEFAULT_MODEL_REF },
},
},
},
changed: true,
};
}

View File

@@ -1,7 +1,6 @@
import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
import { createProviderApiKeyAuthMethod } from "openclaw/plugin-sdk/provider-auth";
import { OPENCODE_GO_DEFAULT_MODEL_REF } from "openclaw/plugin-sdk/provider-models";
import { applyOpencodeGoConfig } from "./onboard.js";
import { applyOpencodeGoConfig, OPENCODE_GO_DEFAULT_MODEL_REF } from "./api.js";
const PROVIDER_ID = "opencode-go";

View File

@@ -1,11 +1,10 @@
import { OPENCODE_GO_DEFAULT_MODEL_REF } from "openclaw/plugin-sdk/provider-models";
import {
applyAgentDefaultModelPrimary,
withAgentModelAliases,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
export { OPENCODE_GO_DEFAULT_MODEL_REF };
export const OPENCODE_GO_DEFAULT_MODEL_REF = "opencode-go/kimi-k2.5";
const OPENCODE_GO_ALIAS_DEFAULTS: Record<string, string> = {
"opencode-go/kimi-k2.5": "Kimi",

View File

@@ -0,0 +1,62 @@
import { OPENCODE_ZEN_DEFAULT_MODEL_REF } from "./onboard.js";
export {
applyOpencodeZenConfig,
applyOpencodeZenProviderConfig,
OPENCODE_ZEN_DEFAULT_MODEL_REF,
} from "./onboard.js";
const LEGACY_OPENCODE_ZEN_DEFAULT_MODELS = new Set([
"opencode/claude-opus-4-5",
"opencode-zen/claude-opus-4-5",
]);
export const OPENCODE_ZEN_DEFAULT_MODEL = OPENCODE_ZEN_DEFAULT_MODEL_REF;
function resolveCurrentPrimaryModel(model: unknown): string | undefined {
if (typeof model === "string") {
return model.trim() || undefined;
}
if (
model &&
typeof model === "object" &&
typeof (model as { primary?: unknown }).primary === "string"
) {
return ((model as { primary: string }).primary || "").trim() || undefined;
}
return undefined;
}
export function applyOpencodeZenModelDefault(
cfg: import("openclaw/plugin-sdk/provider-onboard").OpenClawConfig,
): {
next: import("openclaw/plugin-sdk/provider-onboard").OpenClawConfig;
changed: boolean;
} {
const current = resolveCurrentPrimaryModel(cfg.agents?.defaults?.model);
const normalizedCurrent =
current && LEGACY_OPENCODE_ZEN_DEFAULT_MODELS.has(current)
? OPENCODE_ZEN_DEFAULT_MODEL
: current;
if (normalizedCurrent === OPENCODE_ZEN_DEFAULT_MODEL) {
return { next: cfg, changed: false };
}
return {
next: {
...cfg,
agents: {
...cfg.agents,
defaults: {
...cfg.agents?.defaults,
model:
cfg.agents?.defaults?.model && typeof cfg.agents.defaults.model === "object"
? {
...cfg.agents.defaults.model,
primary: OPENCODE_ZEN_DEFAULT_MODEL,
}
: { primary: OPENCODE_ZEN_DEFAULT_MODEL },
},
},
},
changed: true,
};
}

View File

@@ -1,10 +1,7 @@
import { isMiniMaxModernModelId } from "openclaw/plugin-sdk/minimax";
import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
import { createProviderApiKeyAuthMethod } from "openclaw/plugin-sdk/provider-auth";
import {
isMiniMaxModernModelId,
OPENCODE_ZEN_DEFAULT_MODEL,
} from "openclaw/plugin-sdk/provider-models";
import { applyOpencodeZenConfig } from "./onboard.js";
import { applyOpencodeZenConfig, OPENCODE_ZEN_DEFAULT_MODEL } from "./api.js";
const PROVIDER_ID = "opencode";

View File

@@ -1,11 +1,10 @@
import { OPENCODE_ZEN_DEFAULT_MODEL_REF } from "openclaw/plugin-sdk/provider-models";
import {
applyAgentDefaultModelPrimary,
withAgentModelAliases,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
export { OPENCODE_ZEN_DEFAULT_MODEL_REF };
export const OPENCODE_ZEN_DEFAULT_MODEL_REF = "opencode/claude-opus-4-6";
export function applyOpencodeZenProviderConfig(cfg: OpenClawConfig): OpenClawConfig {
return {

View File

@@ -5,7 +5,7 @@ import {
type ProviderRuntimeModel,
} from "openclaw/plugin-sdk/plugin-entry";
import { createProviderApiKeyAuthMethod } from "openclaw/plugin-sdk/provider-auth-api-key";
import { applyXaiModelCompat, DEFAULT_CONTEXT_TOKENS } from "openclaw/plugin-sdk/provider-models";
import { DEFAULT_CONTEXT_TOKENS } from "openclaw/plugin-sdk/provider-models";
import {
getOpenRouterModelCapabilities,
loadOpenRouterModelCapabilities,
@@ -13,6 +13,7 @@ import {
createOpenRouterWrapper,
isProxyReasoningUnsupported,
} from "openclaw/plugin-sdk/provider-stream";
import { applyXaiModelCompat } from "openclaw/plugin-sdk/xai";
import { openrouterMediaUnderstandingProvider } from "./media-understanding-provider.js";
import { applyOpenrouterConfig, OPENROUTER_DEFAULT_MODEL_REF } from "./onboard.js";
import { buildOpenrouterProvider } from "./provider-catalog.js";

6
extensions/sglang/api.ts Normal file
View File

@@ -0,0 +1,6 @@
export {
SGLANG_DEFAULT_API_KEY_ENV_VAR,
SGLANG_DEFAULT_BASE_URL,
SGLANG_MODEL_PLACEHOLDER,
SGLANG_PROVIDER_LABEL,
} from "./defaults.js";

View File

@@ -0,0 +1,4 @@
export const SGLANG_DEFAULT_BASE_URL = "http://127.0.0.1:30000/v1";
export const SGLANG_PROVIDER_LABEL = "SGLang";
export const SGLANG_DEFAULT_API_KEY_ENV_VAR = "SGLANG_API_KEY";
export const SGLANG_MODEL_PLACEHOLDER = "Qwen/Qwen3-8B";

View File

@@ -1,14 +1,14 @@
import {
SGLANG_DEFAULT_API_KEY_ENV_VAR,
SGLANG_DEFAULT_BASE_URL,
SGLANG_MODEL_PLACEHOLDER,
SGLANG_PROVIDER_LABEL,
} from "openclaw/plugin-sdk/agent-runtime";
import {
definePluginEntry,
type OpenClawPluginApi,
type ProviderAuthMethodNonInteractiveContext,
} from "openclaw/plugin-sdk/plugin-entry";
import {
SGLANG_DEFAULT_API_KEY_ENV_VAR,
SGLANG_DEFAULT_BASE_URL,
SGLANG_MODEL_PLACEHOLDER,
SGLANG_PROVIDER_LABEL,
} from "./api.js";
const PROVIDER_ID = "sglang";

View File

@@ -1 +1,7 @@
export {
buildSyntheticModelDefinition,
SYNTHETIC_BASE_URL,
SYNTHETIC_DEFAULT_MODEL_REF,
SYNTHETIC_MODEL_CATALOG,
} from "./models.js";
export { buildSyntheticProvider } from "./provider-catalog.js";

View File

@@ -0,0 +1,196 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
export const SYNTHETIC_BASE_URL = "https://api.synthetic.new/anthropic";
export const SYNTHETIC_DEFAULT_MODEL_ID = "hf:MiniMaxAI/MiniMax-M2.5";
export const SYNTHETIC_DEFAULT_MODEL_REF = `synthetic/${SYNTHETIC_DEFAULT_MODEL_ID}`;
export const SYNTHETIC_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
export const SYNTHETIC_MODEL_CATALOG = [
{
id: SYNTHETIC_DEFAULT_MODEL_ID,
name: "MiniMax M2.5",
reasoning: false,
input: ["text"],
contextWindow: 192000,
maxTokens: 65536,
},
{
id: "hf:moonshotai/Kimi-K2-Thinking",
name: "Kimi K2 Thinking",
reasoning: true,
input: ["text"],
contextWindow: 256000,
maxTokens: 8192,
},
{
id: "hf:zai-org/GLM-4.7",
name: "GLM-4.7",
reasoning: false,
input: ["text"],
contextWindow: 198000,
maxTokens: 128000,
},
{
id: "hf:deepseek-ai/DeepSeek-R1-0528",
name: "DeepSeek R1 0528",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 8192,
},
{
id: "hf:deepseek-ai/DeepSeek-V3-0324",
name: "DeepSeek V3 0324",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 8192,
},
{
id: "hf:deepseek-ai/DeepSeek-V3.1",
name: "DeepSeek V3.1",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 8192,
},
{
id: "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
name: "DeepSeek V3.1 Terminus",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 8192,
},
{
id: "hf:deepseek-ai/DeepSeek-V3.2",
name: "DeepSeek V3.2",
reasoning: false,
input: ["text"],
contextWindow: 159000,
maxTokens: 8192,
},
{
id: "hf:meta-llama/Llama-3.3-70B-Instruct",
name: "Llama 3.3 70B Instruct",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 8192,
},
{
id: "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
name: "Llama 4 Maverick 17B 128E Instruct FP8",
reasoning: false,
input: ["text"],
contextWindow: 524000,
maxTokens: 8192,
},
{
id: "hf:moonshotai/Kimi-K2-Instruct-0905",
name: "Kimi K2 Instruct 0905",
reasoning: false,
input: ["text"],
contextWindow: 256000,
maxTokens: 8192,
},
{
id: "hf:moonshotai/Kimi-K2.5",
name: "Kimi K2.5",
reasoning: true,
input: ["text", "image"],
contextWindow: 256000,
maxTokens: 8192,
},
{
id: "hf:openai/gpt-oss-120b",
name: "GPT OSS 120B",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 8192,
},
{
id: "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
name: "Qwen3 235B A22B Instruct 2507",
reasoning: false,
input: ["text"],
contextWindow: 256000,
maxTokens: 8192,
},
{
id: "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
name: "Qwen3 Coder 480B A35B Instruct",
reasoning: false,
input: ["text"],
contextWindow: 256000,
maxTokens: 8192,
},
{
id: "hf:Qwen/Qwen3-VL-235B-A22B-Instruct",
name: "Qwen3 VL 235B A22B Instruct",
reasoning: false,
input: ["text", "image"],
contextWindow: 250000,
maxTokens: 8192,
},
{
id: "hf:zai-org/GLM-4.5",
name: "GLM-4.5",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 128000,
},
{
id: "hf:zai-org/GLM-4.6",
name: "GLM-4.6",
reasoning: false,
input: ["text"],
contextWindow: 198000,
maxTokens: 128000,
},
{
id: "hf:zai-org/GLM-5",
name: "GLM-5",
reasoning: true,
input: ["text", "image"],
contextWindow: 256000,
maxTokens: 128000,
},
{
id: "hf:deepseek-ai/DeepSeek-V3",
name: "DeepSeek V3",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 8192,
},
{
id: "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
name: "Qwen3 235B A22B Thinking 2507",
reasoning: true,
input: ["text"],
contextWindow: 256000,
maxTokens: 8192,
},
] as const;
export type SyntheticCatalogEntry = (typeof SYNTHETIC_MODEL_CATALOG)[number];
export function buildSyntheticModelDefinition(entry: SyntheticCatalogEntry): ModelDefinitionConfig {
return {
id: entry.id,
name: entry.name,
reasoning: entry.reasoning,
input: [...entry.input],
cost: SYNTHETIC_DEFAULT_COST,
contextWindow: entry.contextWindow,
maxTokens: entry.maxTokens,
};
}

View File

@@ -1,13 +1,13 @@
import {
createModelCatalogPresetAppliers,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
import {
buildSyntheticModelDefinition,
SYNTHETIC_BASE_URL,
SYNTHETIC_DEFAULT_MODEL_REF,
SYNTHETIC_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
import {
createModelCatalogPresetAppliers,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
} from "./api.js";
export { SYNTHETIC_DEFAULT_MODEL_REF };

View File

@@ -1,9 +1,9 @@
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import {
buildSyntheticModelDefinition,
type ModelProviderConfig,
SYNTHETIC_BASE_URL,
SYNTHETIC_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
} from "./api.js";
export function buildSyntheticProvider(): ModelProviderConfig {
return {

View File

@@ -1,2 +1,7 @@
export {
buildTogetherModelDefinition,
TOGETHER_BASE_URL,
TOGETHER_MODEL_CATALOG,
} from "./models.js";
export { buildTogetherProvider } from "./provider-catalog.js";
export { applyTogetherConfig, TOGETHER_DEFAULT_MODEL_REF } from "./onboard.js";

View File

@@ -0,0 +1,133 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
export const TOGETHER_BASE_URL = "https://api.together.xyz/v1";
export const TOGETHER_MODEL_CATALOG: ModelDefinitionConfig[] = [
{
id: "zai-org/GLM-4.7",
name: "GLM 4.7 Fp8",
reasoning: false,
input: ["text"],
contextWindow: 202752,
maxTokens: 8192,
cost: {
input: 0.45,
output: 2.0,
cacheRead: 0.45,
cacheWrite: 2.0,
},
},
{
id: "moonshotai/Kimi-K2.5",
name: "Kimi K2.5",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.5,
output: 2.8,
cacheRead: 0.5,
cacheWrite: 2.8,
},
contextWindow: 262144,
maxTokens: 32768,
},
{
id: "meta-llama/Llama-3.3-70B-Instruct-Turbo",
name: "Llama 3.3 70B Instruct Turbo",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: {
input: 0.88,
output: 0.88,
cacheRead: 0.88,
cacheWrite: 0.88,
},
},
{
id: "meta-llama/Llama-4-Scout-17B-16E-Instruct",
name: "Llama 4 Scout 17B 16E Instruct",
reasoning: false,
input: ["text", "image"],
contextWindow: 10000000,
maxTokens: 32768,
cost: {
input: 0.18,
output: 0.59,
cacheRead: 0.18,
cacheWrite: 0.18,
},
},
{
id: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
name: "Llama 4 Maverick 17B 128E Instruct FP8",
reasoning: false,
input: ["text", "image"],
contextWindow: 20000000,
maxTokens: 32768,
cost: {
input: 0.27,
output: 0.85,
cacheRead: 0.27,
cacheWrite: 0.27,
},
},
{
id: "deepseek-ai/DeepSeek-V3.1",
name: "DeepSeek V3.1",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: {
input: 0.6,
output: 1.25,
cacheRead: 0.6,
cacheWrite: 0.6,
},
},
{
id: "deepseek-ai/DeepSeek-R1",
name: "DeepSeek R1",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: {
input: 3.0,
output: 7.0,
cacheRead: 3.0,
cacheWrite: 3.0,
},
},
{
id: "moonshotai/Kimi-K2-Instruct-0905",
name: "Kimi K2-Instruct 0905",
reasoning: false,
input: ["text"],
contextWindow: 262144,
maxTokens: 8192,
cost: {
input: 1.0,
output: 3.0,
cacheRead: 1.0,
cacheWrite: 3.0,
},
},
];
export function buildTogetherModelDefinition(
model: (typeof TOGETHER_MODEL_CATALOG)[number],
): ModelDefinitionConfig {
return {
id: model.id,
name: model.name,
api: "openai-completions",
reasoning: model.reasoning,
input: model.input,
cost: model.cost,
contextWindow: model.contextWindow,
maxTokens: model.maxTokens,
};
}

View File

@@ -1,12 +1,8 @@
import {
buildTogetherModelDefinition,
TOGETHER_BASE_URL,
TOGETHER_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
import {
createModelCatalogPresetAppliers,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
import { buildTogetherModelDefinition, TOGETHER_BASE_URL, TOGETHER_MODEL_CATALOG } from "./api.js";
export const TOGETHER_DEFAULT_MODEL_REF = "together/moonshotai/Kimi-K2.5";

View File

@@ -1,9 +1,5 @@
import {
buildTogetherModelDefinition,
type ModelProviderConfig,
TOGETHER_BASE_URL,
TOGETHER_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import { buildTogetherModelDefinition, TOGETHER_BASE_URL, TOGETHER_MODEL_CATALOG } from "./api.js";
export function buildTogetherProvider(): ModelProviderConfig {
return {

View File

@@ -1 +1,8 @@
export {
buildVeniceModelDefinition,
discoverVeniceModels,
VENICE_BASE_URL,
VENICE_DEFAULT_MODEL_REF,
VENICE_MODEL_CATALOG,
} from "./models.js";
export { buildVeniceProvider } from "./provider-catalog.js";

View File

@@ -1,5 +1,5 @@
import { defineSingleProviderPluginEntry } from "openclaw/plugin-sdk/provider-entry";
import { applyXaiModelCompat } from "openclaw/plugin-sdk/provider-models";
import { applyXaiModelCompat } from "openclaw/plugin-sdk/xai";
import { applyVeniceConfig, VENICE_DEFAULT_MODEL_REF } from "./onboard.js";
import { buildVeniceProvider } from "./provider-catalog.js";

647
extensions/venice/models.ts Normal file
View File

@@ -0,0 +1,647 @@
import { retryAsync } from "openclaw/plugin-sdk/infra-runtime";
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
import { createSubsystemLogger } from "openclaw/plugin-sdk/runtime-env";
const log = createSubsystemLogger("venice-models");
export const VENICE_BASE_URL = "https://api.venice.ai/api/v1";
export const VENICE_DEFAULT_MODEL_ID = "kimi-k2-5";
export const VENICE_DEFAULT_MODEL_REF = `venice/${VENICE_DEFAULT_MODEL_ID}`;
export const VENICE_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
const VENICE_DEFAULT_CONTEXT_WINDOW = 128_000;
const VENICE_DEFAULT_MAX_TOKENS = 4096;
const VENICE_DISCOVERY_HARD_MAX_TOKENS = 131_072;
const VENICE_DISCOVERY_TIMEOUT_MS = 10_000;
const VENICE_DISCOVERY_RETRYABLE_HTTP_STATUS = new Set([408, 425, 429, 500, 502, 503, 504]);
const VENICE_DISCOVERY_RETRYABLE_NETWORK_CODES = new Set([
"ECONNABORTED",
"ECONNREFUSED",
"ECONNRESET",
"EAI_AGAIN",
"ENETDOWN",
"ENETUNREACH",
"ENOTFOUND",
"ETIMEDOUT",
"UND_ERR_BODY_TIMEOUT",
"UND_ERR_CONNECT_TIMEOUT",
"UND_ERR_CONNECT_ERROR",
"UND_ERR_HEADERS_TIMEOUT",
"UND_ERR_SOCKET",
]);
export const VENICE_MODEL_CATALOG = [
{
id: "llama-3.3-70b",
name: "Llama 3.3 70B",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 4096,
privacy: "private",
},
{
id: "llama-3.2-3b",
name: "Llama 3.2 3B",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 4096,
privacy: "private",
},
{
id: "hermes-3-llama-3.1-405b",
name: "Hermes 3 Llama 3.1 405B",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 16384,
supportsTools: false,
privacy: "private",
},
{
id: "qwen3-235b-a22b-thinking-2507",
name: "Qwen3 235B Thinking",
reasoning: true,
input: ["text"],
contextWindow: 128000,
maxTokens: 16384,
privacy: "private",
},
{
id: "qwen3-235b-a22b-instruct-2507",
name: "Qwen3 235B Instruct",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 16384,
privacy: "private",
},
{
id: "qwen3-coder-480b-a35b-instruct",
name: "Qwen3 Coder 480B",
reasoning: false,
input: ["text"],
contextWindow: 256000,
maxTokens: 65536,
privacy: "private",
},
{
id: "qwen3-coder-480b-a35b-instruct-turbo",
name: "Qwen3 Coder 480B Turbo",
reasoning: false,
input: ["text"],
contextWindow: 256000,
maxTokens: 65536,
privacy: "private",
},
{
id: "qwen3-5-35b-a3b",
name: "Qwen3.5 35B A3B",
reasoning: true,
input: ["text", "image"],
contextWindow: 256000,
maxTokens: 65536,
privacy: "private",
},
{
id: "qwen3-next-80b",
name: "Qwen3 Next 80B",
reasoning: false,
input: ["text"],
contextWindow: 256000,
maxTokens: 16384,
privacy: "private",
},
{
id: "qwen3-vl-235b-a22b",
name: "Qwen3 VL 235B (Vision)",
reasoning: false,
input: ["text", "image"],
contextWindow: 256000,
maxTokens: 16384,
privacy: "private",
},
{
id: "qwen3-4b",
name: "Venice Small (Qwen3 4B)",
reasoning: true,
input: ["text"],
contextWindow: 32000,
maxTokens: 4096,
privacy: "private",
},
{
id: "deepseek-v3.2",
name: "DeepSeek V3.2",
reasoning: true,
input: ["text"],
contextWindow: 160000,
maxTokens: 32768,
supportsTools: false,
privacy: "private",
},
{
id: "venice-uncensored",
name: "Venice Uncensored (Dolphin-Mistral)",
reasoning: false,
input: ["text"],
contextWindow: 32000,
maxTokens: 4096,
supportsTools: false,
privacy: "private",
},
{
id: "mistral-31-24b",
name: "Venice Medium (Mistral)",
reasoning: false,
input: ["text", "image"],
contextWindow: 128000,
maxTokens: 4096,
privacy: "private",
},
{
id: "google-gemma-3-27b-it",
name: "Google Gemma 3 27B Instruct",
reasoning: false,
input: ["text", "image"],
contextWindow: 198000,
maxTokens: 16384,
privacy: "private",
},
{
id: "openai-gpt-oss-120b",
name: "OpenAI GPT OSS 120B",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 16384,
privacy: "private",
},
{
id: "nvidia-nemotron-3-nano-30b-a3b",
name: "NVIDIA Nemotron 3 Nano 30B",
reasoning: false,
input: ["text"],
contextWindow: 128000,
maxTokens: 16384,
privacy: "private",
},
{
id: "olafangensan-glm-4.7-flash-heretic",
name: "GLM 4.7 Flash Heretic",
reasoning: true,
input: ["text"],
contextWindow: 128000,
maxTokens: 24000,
privacy: "private",
},
{
id: "zai-org-glm-4.6",
name: "GLM 4.6",
reasoning: false,
input: ["text"],
contextWindow: 198000,
maxTokens: 16384,
privacy: "private",
},
{
id: "zai-org-glm-4.7",
name: "GLM 4.7",
reasoning: true,
input: ["text"],
contextWindow: 198000,
maxTokens: 16384,
privacy: "private",
},
{
id: "zai-org-glm-4.7-flash",
name: "GLM 4.7 Flash",
reasoning: true,
input: ["text"],
contextWindow: 128000,
maxTokens: 16384,
privacy: "private",
},
{
id: "zai-org-glm-5",
name: "GLM 5",
reasoning: true,
input: ["text"],
contextWindow: 198000,
maxTokens: 32000,
privacy: "private",
},
{
id: "kimi-k2-5",
name: "Kimi K2.5",
reasoning: true,
input: ["text", "image"],
contextWindow: 256000,
maxTokens: 65536,
privacy: "private",
},
{
id: "kimi-k2-thinking",
name: "Kimi K2 Thinking",
reasoning: true,
input: ["text"],
contextWindow: 256000,
maxTokens: 65536,
privacy: "private",
},
{
id: "minimax-m21",
name: "MiniMax M2.1",
reasoning: true,
input: ["text"],
contextWindow: 198000,
maxTokens: 32768,
privacy: "private",
},
{
id: "minimax-m25",
name: "MiniMax M2.5",
reasoning: true,
input: ["text"],
contextWindow: 198000,
maxTokens: 32768,
privacy: "private",
},
{
id: "claude-opus-4-5",
name: "Claude Opus 4.5 (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 198000,
maxTokens: 32768,
privacy: "anonymized",
},
{
id: "claude-opus-4-6",
name: "Claude Opus 4.6 (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 1000000,
maxTokens: 128000,
privacy: "anonymized",
},
{
id: "claude-sonnet-4-5",
name: "Claude Sonnet 4.5 (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 198000,
maxTokens: 64000,
privacy: "anonymized",
},
{
id: "claude-sonnet-4-6",
name: "Claude Sonnet 4.6 (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 1000000,
maxTokens: 64000,
privacy: "anonymized",
},
{
id: "openai-gpt-52",
name: "GPT-5.2 (via Venice)",
reasoning: true,
input: ["text"],
contextWindow: 256000,
maxTokens: 65536,
privacy: "anonymized",
},
{
id: "openai-gpt-52-codex",
name: "GPT-5.2 Codex (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 256000,
maxTokens: 65536,
privacy: "anonymized",
},
{
id: "openai-gpt-53-codex",
name: "GPT-5.3 Codex (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 400000,
maxTokens: 128000,
privacy: "anonymized",
},
{
id: "openai-gpt-54",
name: "GPT-5.4 (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 1000000,
maxTokens: 131072,
privacy: "anonymized",
},
{
id: "openai-gpt-4o-2024-11-20",
name: "GPT-4o (via Venice)",
reasoning: false,
input: ["text", "image"],
contextWindow: 128000,
maxTokens: 16384,
privacy: "anonymized",
},
{
id: "openai-gpt-4o-mini-2024-07-18",
name: "GPT-4o Mini (via Venice)",
reasoning: false,
input: ["text", "image"],
contextWindow: 128000,
maxTokens: 16384,
privacy: "anonymized",
},
{
id: "gemini-3-pro-preview",
name: "Gemini 3 Pro (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 198000,
maxTokens: 32768,
privacy: "anonymized",
},
{
id: "gemini-3-1-pro-preview",
name: "Gemini 3.1 Pro (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 1000000,
maxTokens: 32768,
privacy: "anonymized",
},
{
id: "gemini-3-flash-preview",
name: "Gemini 3 Flash (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 256000,
maxTokens: 65536,
privacy: "anonymized",
},
{
id: "grok-41-fast",
name: "Grok 4.1 Fast (via Venice)",
reasoning: true,
input: ["text", "image"],
contextWindow: 1000000,
maxTokens: 30000,
privacy: "anonymized",
},
{
id: "grok-code-fast-1",
name: "Grok Code Fast 1 (via Venice)",
reasoning: true,
input: ["text"],
contextWindow: 256000,
maxTokens: 10000,
privacy: "anonymized",
},
] as const;
export type VeniceCatalogEntry = (typeof VENICE_MODEL_CATALOG)[number];
export function buildVeniceModelDefinition(entry: VeniceCatalogEntry): ModelDefinitionConfig {
return {
id: entry.id,
name: entry.name,
reasoning: entry.reasoning,
input: [...entry.input],
cost: VENICE_DEFAULT_COST,
contextWindow: entry.contextWindow,
maxTokens: entry.maxTokens,
compat: {
supportsUsageInStreaming: false,
...("supportsTools" in entry && !entry.supportsTools ? { supportsTools: false } : {}),
},
};
}
interface VeniceModelSpec {
name: string;
privacy: "private" | "anonymized";
availableContextTokens?: number;
maxCompletionTokens?: number;
capabilities?: {
supportsReasoning?: boolean;
supportsVision?: boolean;
supportsFunctionCalling?: boolean;
};
}
interface VeniceModel {
id: string;
model_spec?: VeniceModelSpec;
}
interface VeniceModelsResponse {
data: VeniceModel[];
}
class VeniceDiscoveryHttpError extends Error {
readonly status: number;
constructor(status: number) {
super(`HTTP ${status}`);
this.name = "VeniceDiscoveryHttpError";
this.status = status;
}
}
function staticVeniceModelDefinitions(): ModelDefinitionConfig[] {
return VENICE_MODEL_CATALOG.map(buildVeniceModelDefinition);
}
function hasRetryableNetworkCode(err: unknown): boolean {
const queue: unknown[] = [err];
const seen = new Set<unknown>();
while (queue.length > 0) {
const current = queue.shift();
if (!current || typeof current !== "object" || seen.has(current)) {
continue;
}
seen.add(current);
const candidate = current as {
cause?: unknown;
errors?: unknown;
code?: unknown;
errno?: unknown;
};
const code =
typeof candidate.code === "string"
? candidate.code
: typeof candidate.errno === "string"
? candidate.errno
: undefined;
if (code && VENICE_DISCOVERY_RETRYABLE_NETWORK_CODES.has(code)) {
return true;
}
if (candidate.cause) {
queue.push(candidate.cause);
}
if (Array.isArray(candidate.errors)) {
queue.push(...candidate.errors);
}
}
return false;
}
function isRetryableVeniceDiscoveryError(err: unknown): boolean {
if (err instanceof VeniceDiscoveryHttpError) {
return true;
}
if (err instanceof Error && err.name === "AbortError") {
return true;
}
if (err instanceof TypeError && err.message.toLowerCase() === "fetch failed") {
return true;
}
return hasRetryableNetworkCode(err);
}
function normalizePositiveInt(value: unknown): number | undefined {
if (typeof value !== "number" || !Number.isFinite(value) || value <= 0) {
return undefined;
}
return Math.floor(value);
}
function resolveApiMaxCompletionTokens(params: {
apiModel: VeniceModel;
knownMaxTokens?: number;
}): number | undefined {
const raw = normalizePositiveInt(params.apiModel.model_spec?.maxCompletionTokens);
if (!raw) {
return undefined;
}
const contextWindow = normalizePositiveInt(params.apiModel.model_spec?.availableContextTokens);
const knownMaxTokens =
typeof params.knownMaxTokens === "number" && Number.isFinite(params.knownMaxTokens)
? Math.floor(params.knownMaxTokens)
: undefined;
const hardCap = knownMaxTokens ?? VENICE_DISCOVERY_HARD_MAX_TOKENS;
const fallbackContextWindow = knownMaxTokens ?? VENICE_DEFAULT_CONTEXT_WINDOW;
return Math.min(raw, contextWindow ?? fallbackContextWindow, hardCap);
}
function resolveApiSupportsTools(apiModel: VeniceModel): boolean | undefined {
const supportsFunctionCalling = apiModel.model_spec?.capabilities?.supportsFunctionCalling;
return typeof supportsFunctionCalling === "boolean" ? supportsFunctionCalling : undefined;
}
export async function discoverVeniceModels(): Promise<ModelDefinitionConfig[]> {
if (process.env.NODE_ENV === "test" || process.env.VITEST) {
return staticVeniceModelDefinitions();
}
try {
const response = await retryAsync(
async () => {
const currentResponse = await fetch(`${VENICE_BASE_URL}/models`, {
signal: AbortSignal.timeout(VENICE_DISCOVERY_TIMEOUT_MS),
headers: {
Accept: "application/json",
},
});
if (
!currentResponse.ok &&
VENICE_DISCOVERY_RETRYABLE_HTTP_STATUS.has(currentResponse.status)
) {
throw new VeniceDiscoveryHttpError(currentResponse.status);
}
return currentResponse;
},
{
attempts: 3,
minDelayMs: 300,
maxDelayMs: 2000,
jitter: 0.2,
label: "venice-model-discovery",
shouldRetry: isRetryableVeniceDiscoveryError,
},
);
if (!response.ok) {
log.warn(`Failed to discover models: HTTP ${response.status}, using static catalog`);
return staticVeniceModelDefinitions();
}
const data = (await response.json()) as VeniceModelsResponse;
if (!Array.isArray(data.data) || data.data.length === 0) {
log.warn("No models found from API, using static catalog");
return staticVeniceModelDefinitions();
}
const catalogById = new Map<string, VeniceCatalogEntry>(
VENICE_MODEL_CATALOG.map((m) => [m.id, m]),
);
const models: ModelDefinitionConfig[] = [];
for (const apiModel of data.data) {
const catalogEntry = catalogById.get(apiModel.id);
const apiMaxTokens = resolveApiMaxCompletionTokens({
apiModel,
knownMaxTokens: catalogEntry?.maxTokens,
});
const apiSupportsTools = resolveApiSupportsTools(apiModel);
if (catalogEntry) {
const definition = buildVeniceModelDefinition(catalogEntry);
if (apiMaxTokens !== undefined) {
definition.maxTokens = apiMaxTokens;
}
if (apiSupportsTools === false) {
definition.compat = {
...definition.compat,
supportsTools: false,
};
}
models.push(definition);
} else {
const apiSpec = apiModel.model_spec;
const isReasoning =
apiSpec?.capabilities?.supportsReasoning ||
apiModel.id.toLowerCase().includes("thinking") ||
apiModel.id.toLowerCase().includes("reason") ||
apiModel.id.toLowerCase().includes("r1");
const hasVision = apiSpec?.capabilities?.supportsVision === true;
models.push({
id: apiModel.id,
name: apiSpec?.name || apiModel.id,
reasoning: isReasoning,
input: hasVision ? ["text", "image"] : ["text"],
cost: VENICE_DEFAULT_COST,
contextWindow:
normalizePositiveInt(apiSpec?.availableContextTokens) ?? VENICE_DEFAULT_CONTEXT_WINDOW,
maxTokens: apiMaxTokens ?? VENICE_DEFAULT_MAX_TOKENS,
compat: {
supportsUsageInStreaming: false,
...(apiSupportsTools === false ? { supportsTools: false } : {}),
},
});
}
}
return models.length > 0 ? models : staticVeniceModelDefinitions();
} catch (error) {
if (error instanceof VeniceDiscoveryHttpError) {
log.warn(`Failed to discover models: HTTP ${error.status}, using static catalog`);
return staticVeniceModelDefinitions();
}
log.warn(`Discovery failed: ${String(error)}, using static catalog`);
return staticVeniceModelDefinitions();
}
}

View File

@@ -1,13 +1,13 @@
import {
createModelCatalogPresetAppliers,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
import {
buildVeniceModelDefinition,
VENICE_BASE_URL,
VENICE_DEFAULT_MODEL_REF,
VENICE_MODEL_CATALOG,
} from "openclaw/plugin-sdk/provider-models";
import {
createModelCatalogPresetAppliers,
type OpenClawConfig,
} from "openclaw/plugin-sdk/provider-onboard";
} from "./api.js";
export { VENICE_DEFAULT_MODEL_REF };

View File

@@ -1,8 +1,5 @@
import {
discoverVeniceModels,
type ModelProviderConfig,
VENICE_BASE_URL,
} from "openclaw/plugin-sdk/provider-models";
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import { discoverVeniceModels, VENICE_BASE_URL } from "./api.js";
export async function buildVeniceProvider(): Promise<ModelProviderConfig> {
const models = await discoverVeniceModels();

View File

@@ -1,2 +1,7 @@
export {
discoverVercelAiGatewayModels,
getStaticVercelAiGatewayModelCatalog,
VERCEL_AI_GATEWAY_BASE_URL,
} from "./models.js";
export { buildVercelAiGatewayProvider } from "./provider-catalog.js";
export { applyVercelAiGatewayConfig, VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF } from "./onboard.js";

View File

@@ -0,0 +1,197 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
import { createSubsystemLogger } from "openclaw/plugin-sdk/runtime-env";
export const VERCEL_AI_GATEWAY_PROVIDER_ID = "vercel-ai-gateway";
export const VERCEL_AI_GATEWAY_BASE_URL = "https://ai-gateway.vercel.sh";
export const VERCEL_AI_GATEWAY_DEFAULT_MODEL_ID = "anthropic/claude-opus-4.6";
export const VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF = `${VERCEL_AI_GATEWAY_PROVIDER_ID}/${VERCEL_AI_GATEWAY_DEFAULT_MODEL_ID}`;
export const VERCEL_AI_GATEWAY_DEFAULT_CONTEXT_WINDOW = 200_000;
export const VERCEL_AI_GATEWAY_DEFAULT_MAX_TOKENS = 128_000;
export const VERCEL_AI_GATEWAY_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
} as const;
const log = createSubsystemLogger("agents/vercel-ai-gateway");
type VercelPricingShape = {
input?: number | string;
output?: number | string;
input_cache_read?: number | string;
input_cache_write?: number | string;
};
type VercelGatewayModelShape = {
id?: string;
name?: string;
context_window?: number;
max_tokens?: number;
tags?: string[];
pricing?: VercelPricingShape;
};
type VercelGatewayModelsResponse = {
data?: VercelGatewayModelShape[];
};
type StaticVercelGatewayModel = Omit<ModelDefinitionConfig, "cost"> & {
cost?: Partial<ModelDefinitionConfig["cost"]>;
};
const STATIC_VERCEL_AI_GATEWAY_MODEL_CATALOG: readonly StaticVercelGatewayModel[] = [
{
id: "anthropic/claude-opus-4.6",
name: "Claude Opus 4.6",
reasoning: true,
input: ["text", "image"],
contextWindow: 1_000_000,
maxTokens: 128_000,
cost: {
input: 5,
output: 25,
cacheRead: 0.5,
cacheWrite: 6.25,
},
},
{
id: "openai/gpt-5.4",
name: "GPT 5.4",
reasoning: true,
input: ["text", "image"],
contextWindow: 200_000,
maxTokens: 128_000,
cost: {
input: 2.5,
output: 15,
cacheRead: 0.25,
},
},
{
id: "openai/gpt-5.4-pro",
name: "GPT 5.4 Pro",
reasoning: true,
input: ["text", "image"],
contextWindow: 200_000,
maxTokens: 128_000,
cost: {
input: 30,
output: 180,
cacheRead: 0,
},
},
] as const;
function toPerMillionCost(value: number | string | undefined): number {
const numeric =
typeof value === "number"
? value
: typeof value === "string"
? Number.parseFloat(value)
: Number.NaN;
if (!Number.isFinite(numeric) || numeric < 0) {
return 0;
}
return numeric * 1_000_000;
}
function normalizeCost(pricing?: VercelPricingShape): ModelDefinitionConfig["cost"] {
return {
input: toPerMillionCost(pricing?.input),
output: toPerMillionCost(pricing?.output),
cacheRead: toPerMillionCost(pricing?.input_cache_read),
cacheWrite: toPerMillionCost(pricing?.input_cache_write),
};
}
function buildStaticModelDefinition(model: StaticVercelGatewayModel): ModelDefinitionConfig {
return {
id: model.id,
name: model.name,
reasoning: model.reasoning,
input: model.input,
contextWindow: model.contextWindow,
maxTokens: model.maxTokens,
cost: {
...VERCEL_AI_GATEWAY_DEFAULT_COST,
...model.cost,
},
};
}
function getStaticFallbackModel(id: string): ModelDefinitionConfig | undefined {
const fallback = STATIC_VERCEL_AI_GATEWAY_MODEL_CATALOG.find((model) => model.id === id);
return fallback ? buildStaticModelDefinition(fallback) : undefined;
}
export function getStaticVercelAiGatewayModelCatalog(): ModelDefinitionConfig[] {
return STATIC_VERCEL_AI_GATEWAY_MODEL_CATALOG.map(buildStaticModelDefinition);
}
function buildDiscoveredModelDefinition(
model: VercelGatewayModelShape,
): ModelDefinitionConfig | null {
const id = typeof model.id === "string" ? model.id.trim() : "";
if (!id) {
return null;
}
const fallback = getStaticFallbackModel(id);
const contextWindow =
typeof model.context_window === "number" && Number.isFinite(model.context_window)
? model.context_window
: (fallback?.contextWindow ?? VERCEL_AI_GATEWAY_DEFAULT_CONTEXT_WINDOW);
const maxTokens =
typeof model.max_tokens === "number" && Number.isFinite(model.max_tokens)
? model.max_tokens
: (fallback?.maxTokens ?? VERCEL_AI_GATEWAY_DEFAULT_MAX_TOKENS);
const normalizedCost = normalizeCost(model.pricing);
return {
id,
name: (typeof model.name === "string" ? model.name.trim() : "") || fallback?.name || id,
reasoning:
Array.isArray(model.tags) && model.tags.includes("reasoning")
? true
: (fallback?.reasoning ?? false),
input: Array.isArray(model.tags)
? model.tags.includes("vision")
? ["text", "image"]
: ["text"]
: (fallback?.input ?? ["text"]),
contextWindow,
maxTokens,
cost:
normalizedCost.input > 0 ||
normalizedCost.output > 0 ||
normalizedCost.cacheRead > 0 ||
normalizedCost.cacheWrite > 0
? normalizedCost
: (fallback?.cost ?? VERCEL_AI_GATEWAY_DEFAULT_COST),
};
}
export async function discoverVercelAiGatewayModels(): Promise<ModelDefinitionConfig[]> {
if (process.env.VITEST || process.env.NODE_ENV === "test") {
return getStaticVercelAiGatewayModelCatalog();
}
try {
const response = await fetch(`${VERCEL_AI_GATEWAY_BASE_URL}/v1/models`, {
signal: AbortSignal.timeout(5000),
});
if (!response.ok) {
log.warn(`Failed to discover Vercel AI Gateway models: HTTP ${response.status}`);
return getStaticVercelAiGatewayModelCatalog();
}
const data = (await response.json()) as VercelGatewayModelsResponse;
const discovered = (data.data ?? [])
.map(buildDiscoveredModelDefinition)
.filter((entry): entry is ModelDefinitionConfig => entry !== null);
return discovered.length > 0 ? discovered : getStaticVercelAiGatewayModelCatalog();
} catch (error) {
log.warn(`Failed to discover Vercel AI Gateway models: ${String(error)}`);
return getStaticVercelAiGatewayModelCatalog();
}
}

View File

@@ -1,8 +1,5 @@
import {
discoverVercelAiGatewayModels,
VERCEL_AI_GATEWAY_BASE_URL,
type ModelProviderConfig,
} from "openclaw/plugin-sdk/provider-models";
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import { discoverVercelAiGatewayModels, VERCEL_AI_GATEWAY_BASE_URL } from "./api.js";
export async function buildVercelAiGatewayProvider(): Promise<ModelProviderConfig> {
return {

6
extensions/vllm/api.ts Normal file
View File

@@ -0,0 +1,6 @@
export {
VLLM_DEFAULT_API_KEY_ENV_VAR,
VLLM_DEFAULT_BASE_URL,
VLLM_MODEL_PLACEHOLDER,
VLLM_PROVIDER_LABEL,
} from "./defaults.js";

View File

@@ -0,0 +1,4 @@
export const VLLM_DEFAULT_BASE_URL = "http://127.0.0.1:8000/v1";
export const VLLM_PROVIDER_LABEL = "vLLM";
export const VLLM_DEFAULT_API_KEY_ENV_VAR = "VLLM_API_KEY";
export const VLLM_MODEL_PLACEHOLDER = "meta-llama/Meta-Llama-3-8B-Instruct";

View File

@@ -1,14 +1,14 @@
import {
VLLM_DEFAULT_API_KEY_ENV_VAR,
VLLM_DEFAULT_BASE_URL,
VLLM_MODEL_PLACEHOLDER,
VLLM_PROVIDER_LABEL,
} from "openclaw/plugin-sdk/agent-runtime";
import {
definePluginEntry,
type OpenClawPluginApi,
type ProviderAuthMethodNonInteractiveContext,
} from "openclaw/plugin-sdk/plugin-entry";
import {
VLLM_DEFAULT_API_KEY_ENV_VAR,
VLLM_DEFAULT_BASE_URL,
VLLM_MODEL_PLACEHOLDER,
VLLM_PROVIDER_LABEL,
} from "./api.js";
const PROVIDER_ID = "vllm";

View File

@@ -1 +1,8 @@
export { buildDoubaoCodingProvider, buildDoubaoProvider } from "./provider-catalog.js";
export {
buildDoubaoModelDefinition,
DOUBAO_BASE_URL,
DOUBAO_CODING_BASE_URL,
DOUBAO_CODING_MODEL_CATALOG,
DOUBAO_MODEL_CATALOG,
} from "./models.js";

View File

@@ -0,0 +1,149 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-models";
type VolcModelCatalogEntry = {
id: string;
name: string;
reasoning: boolean;
input: ReadonlyArray<ModelDefinitionConfig["input"][number]>;
contextWindow: number;
maxTokens: number;
};
const VOLC_MODEL_KIMI_K2_5 = {
id: "kimi-k2-5-260127",
name: "Kimi K2.5",
reasoning: false,
input: ["text", "image"] as const,
contextWindow: 256000,
maxTokens: 4096,
} as const;
const VOLC_MODEL_GLM_4_7 = {
id: "glm-4-7-251222",
name: "GLM 4.7",
reasoning: false,
input: ["text", "image"] as const,
contextWindow: 200000,
maxTokens: 4096,
} as const;
const VOLC_SHARED_CODING_MODEL_CATALOG = [
{
id: "ark-code-latest",
name: "Ark Coding Plan",
reasoning: false,
input: ["text"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
{
id: "doubao-seed-code",
name: "Doubao Seed Code",
reasoning: false,
input: ["text"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
{
id: "glm-4.7",
name: "GLM 4.7 Coding",
reasoning: false,
input: ["text"] as const,
contextWindow: 200000,
maxTokens: 4096,
},
{
id: "kimi-k2-thinking",
name: "Kimi K2 Thinking",
reasoning: false,
input: ["text"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
{
id: "kimi-k2.5",
name: "Kimi K2.5 Coding",
reasoning: false,
input: ["text"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
] as const;
export const DOUBAO_BASE_URL = "https://ark.cn-beijing.volces.com/api/v3";
export const DOUBAO_CODING_BASE_URL = "https://ark.cn-beijing.volces.com/api/coding/v3";
export const DOUBAO_DEFAULT_MODEL_ID = "doubao-seed-1-8-251228";
export const DOUBAO_CODING_DEFAULT_MODEL_ID = "ark-code-latest";
export const DOUBAO_DEFAULT_MODEL_REF = `volcengine/${DOUBAO_DEFAULT_MODEL_ID}`;
export const DOUBAO_DEFAULT_COST = {
input: 0.0001,
output: 0.0002,
cacheRead: 0,
cacheWrite: 0,
};
export const DOUBAO_MODEL_CATALOG = [
{
id: "doubao-seed-code-preview-251028",
name: "doubao-seed-code-preview-251028",
reasoning: false,
input: ["text", "image"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
{
id: "doubao-seed-1-8-251228",
name: "Doubao Seed 1.8",
reasoning: false,
input: ["text", "image"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
VOLC_MODEL_KIMI_K2_5,
VOLC_MODEL_GLM_4_7,
{
id: "deepseek-v3-2-251201",
name: "DeepSeek V3.2",
reasoning: false,
input: ["text", "image"] as const,
contextWindow: 128000,
maxTokens: 4096,
},
] as const;
export const DOUBAO_CODING_MODEL_CATALOG = [
...VOLC_SHARED_CODING_MODEL_CATALOG,
{
id: "doubao-seed-code-preview-251028",
name: "Doubao Seed Code Preview",
reasoning: false,
input: ["text"] as const,
contextWindow: 256000,
maxTokens: 4096,
},
] as const;
export type DoubaoCatalogEntry = (typeof DOUBAO_MODEL_CATALOG)[number];
export type DoubaoCodingCatalogEntry = (typeof DOUBAO_CODING_MODEL_CATALOG)[number];
function buildVolcModelDefinition(
entry: VolcModelCatalogEntry,
cost: ModelDefinitionConfig["cost"],
): ModelDefinitionConfig {
return {
id: entry.id,
name: entry.name,
reasoning: entry.reasoning,
input: [...entry.input],
cost,
contextWindow: entry.contextWindow,
maxTokens: entry.maxTokens,
};
}
export function buildDoubaoModelDefinition(
entry: DoubaoCatalogEntry | DoubaoCodingCatalogEntry,
): ModelDefinitionConfig {
return buildVolcModelDefinition(entry, DOUBAO_DEFAULT_COST);
}

View File

@@ -1,11 +1,11 @@
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-models";
import {
buildDoubaoModelDefinition,
DOUBAO_BASE_URL,
DOUBAO_CODING_BASE_URL,
DOUBAO_CODING_MODEL_CATALOG,
DOUBAO_MODEL_CATALOG,
type ModelProviderConfig,
} from "openclaw/plugin-sdk/provider-models";
} from "./api.js";
export function buildDoubaoProvider(): ModelProviderConfig {
return {

59
extensions/xai/api.ts Normal file
View File

@@ -0,0 +1,59 @@
export { buildXaiProvider } from "./provider-catalog.js";
export {
buildXaiCatalogModels,
buildXaiModelDefinition,
resolveXaiCatalogEntry,
XAI_BASE_URL,
XAI_DEFAULT_CONTEXT_WINDOW,
XAI_DEFAULT_MODEL_ID,
XAI_DEFAULT_MODEL_REF,
XAI_DEFAULT_MAX_TOKENS,
} from "./model-definitions.js";
export { isModernXaiModel, resolveXaiForwardCompatModel } from "./provider-models.js";
export const XAI_TOOL_SCHEMA_PROFILE = "xai";
export const HTML_ENTITY_TOOL_CALL_ARGUMENTS_ENCODING = "html-entities";
export function applyXaiModelCompat<T extends { compat?: unknown }>(model: T): T {
const patch = {
toolSchemaProfile: XAI_TOOL_SCHEMA_PROFILE,
nativeWebSearchTool: true,
toolCallArgumentsEncoding: HTML_ENTITY_TOOL_CALL_ARGUMENTS_ENCODING,
} satisfies Record<string, unknown>;
const compat =
model.compat && typeof model.compat === "object"
? (model.compat as Record<string, unknown>)
: undefined;
if (compat && Object.entries(patch).every(([key, value]) => compat[key] === value)) {
return model;
}
return {
...model,
compat: {
...compat,
...patch,
} as T extends { compat?: infer TCompat } ? TCompat : never,
} as T;
}
export function normalizeXaiModelId(id: string): string {
if (id === "grok-4-fast-reasoning") {
return "grok-4-fast";
}
if (id === "grok-4-1-fast-reasoning") {
return "grok-4-1-fast";
}
if (id === "grok-4.20-experimental-beta-0304-reasoning") {
return "grok-4.20-beta-latest-reasoning";
}
if (id === "grok-4.20-experimental-beta-0304-non-reasoning") {
return "grok-4.20-beta-latest-non-reasoning";
}
if (id === "grok-4.20-reasoning") {
return "grok-4.20-beta-latest-reasoning";
}
if (id === "grok-4.20-non-reasoning") {
return "grok-4.20-beta-latest-non-reasoning";
}
return id;
}

View File

@@ -1,8 +1,7 @@
import { defineSingleProviderPluginEntry } from "openclaw/plugin-sdk/provider-entry";
import { applyXaiModelCompat } from "openclaw/plugin-sdk/provider-models";
import { createToolStreamWrapper } from "openclaw/plugin-sdk/provider-stream";
import { applyXaiModelCompat, buildXaiProvider } from "./api.js";
import { applyXaiConfig, XAI_DEFAULT_MODEL_REF } from "./onboard.js";
import { buildXaiProvider } from "./provider-catalog.js";
import { isModernXaiModel, resolveXaiForwardCompatModel } from "./provider-models.js";
import {
createXaiToolCallArgumentDecodingWrapper,

View File

@@ -2,7 +2,8 @@ import type {
ProviderResolveDynamicModelContext,
ProviderRuntimeModel,
} from "openclaw/plugin-sdk/core";
import { applyXaiModelCompat, normalizeModelCompat } from "openclaw/plugin-sdk/provider-models";
import { normalizeModelCompat } from "openclaw/plugin-sdk/provider-models";
import { applyXaiModelCompat } from "./api.js";
import { resolveXaiCatalogEntry, XAI_BASE_URL } from "./model-definitions.js";
const XAI_MODERN_MODEL_PREFIXES = ["grok-3", "grok-4", "grok-code-fast"] as const;

View File

@@ -1,5 +1,5 @@
import { normalizeXaiModelId } from "openclaw/plugin-sdk/provider-models";
import { postTrustedWebToolsJson, wrapWebContent } from "openclaw/plugin-sdk/provider-web-search";
import { normalizeXaiModelId } from "../api.js";
export const XAI_WEB_SEARCH_ENDPOINT = "https://api.x.ai/v1/responses";
export const XAI_DEFAULT_WEB_SEARCH_MODEL = "grok-4-1-fast";

View File

@@ -1,44 +1,8 @@
import type { ModelDefinitionConfig } from "../config/types.js";
export const CLOUDFLARE_AI_GATEWAY_PROVIDER_ID = "cloudflare-ai-gateway";
export const CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID = "claude-sonnet-4-5";
export const CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF = `${CLOUDFLARE_AI_GATEWAY_PROVIDER_ID}/${CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID}`;
export const CLOUDFLARE_AI_GATEWAY_DEFAULT_CONTEXT_WINDOW = 200_000;
export const CLOUDFLARE_AI_GATEWAY_DEFAULT_MAX_TOKENS = 64_000;
export const CLOUDFLARE_AI_GATEWAY_DEFAULT_COST = {
input: 3,
output: 15,
cacheRead: 0.3,
cacheWrite: 3.75,
};
export function buildCloudflareAiGatewayModelDefinition(params?: {
id?: string;
name?: string;
reasoning?: boolean;
input?: Array<"text" | "image">;
}): ModelDefinitionConfig {
const id = params?.id?.trim() || CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID;
return {
id,
name: params?.name ?? "Claude Sonnet 4.5",
reasoning: params?.reasoning ?? true,
input: params?.input ?? ["text", "image"],
cost: CLOUDFLARE_AI_GATEWAY_DEFAULT_COST,
contextWindow: CLOUDFLARE_AI_GATEWAY_DEFAULT_CONTEXT_WINDOW,
maxTokens: CLOUDFLARE_AI_GATEWAY_DEFAULT_MAX_TOKENS,
};
}
export function resolveCloudflareAiGatewayBaseUrl(params: {
accountId: string;
gatewayId: string;
}): string {
const accountId = params.accountId.trim();
const gatewayId = params.gatewayId.trim();
if (!accountId || !gatewayId) {
return "";
}
return `https://gateway.ai.cloudflare.com/v1/${accountId}/${gatewayId}/anthropic`;
}
// Deprecated compat shim. Prefer openclaw/plugin-sdk/cloudflare-ai-gateway.
export {
buildCloudflareAiGatewayModelDefinition,
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_ID,
CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF,
CLOUDFLARE_AI_GATEWAY_PROVIDER_ID,
resolveCloudflareAiGatewayBaseUrl,
} from "../plugin-sdk/cloudflare-ai-gateway.js";

View File

@@ -1,231 +1,9 @@
import type { ModelDefinitionConfig } from "../config/types.models.js";
import { createSubsystemLogger } from "../logging/subsystem.js";
import { isReasoningModelHeuristic } from "../plugin-sdk/provider-reasoning.js";
const log = createSubsystemLogger("huggingface-models");
/** Hugging Face Inference Providers (router) — OpenAI-compatible chat completions. */
export const HUGGINGFACE_BASE_URL = "https://router.huggingface.co/v1";
/** Router policy suffixes: router picks backend by cost or speed; no specific provider selection. */
export const HUGGINGFACE_POLICY_SUFFIXES = ["cheapest", "fastest"] as const;
/**
* True when the model ref uses :cheapest or :fastest. When true, provider choice is locked
* (router decides); do not show an interactive "prefer specific backend" option.
*/
export function isHuggingfacePolicyLocked(modelRef: string): boolean {
const ref = String(modelRef).trim();
return HUGGINGFACE_POLICY_SUFFIXES.some((s) => ref.endsWith(`:${s}`) || ref === s);
}
/** Default cost when not in static catalog (HF pricing varies by provider). */
const HUGGINGFACE_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
/** Defaults for models discovered from GET /v1/models. */
const HUGGINGFACE_DEFAULT_CONTEXT_WINDOW = 131072;
const HUGGINGFACE_DEFAULT_MAX_TOKENS = 8192;
/**
* Shape of a single model entry from GET https://router.huggingface.co/v1/models.
* Aligned with the Inference Providers API response (object, data[].id, owned_by, architecture, providers).
*/
interface HFModelEntry {
id: string;
object?: string;
created?: number;
/** Organisation that owns the model (e.g. "Qwen", "deepseek-ai"). Used for display when name/title absent. */
owned_by?: string;
/** Display name from API when present (not all responses include this). */
name?: string;
title?: string;
display_name?: string;
/** Input/output modalities; we use input_modalities for ModelDefinitionConfig.input. */
architecture?: {
input_modalities?: string[];
output_modalities?: string[];
[key: string]: unknown;
};
/** Backend providers; we use the first provider with context_length when available. */
providers?: Array<{
provider?: string;
context_length?: number;
status?: string;
pricing?: { input?: number; output?: number; [key: string]: unknown };
[key: string]: unknown;
}>;
[key: string]: unknown;
}
/** Response shape from GET https://router.huggingface.co/v1/models (OpenAI-style list). */
interface OpenAIListModelsResponse {
object?: string;
data?: HFModelEntry[];
}
export const HUGGINGFACE_MODEL_CATALOG: ModelDefinitionConfig[] = [
{
id: "deepseek-ai/DeepSeek-R1",
name: "DeepSeek R1",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: { input: 3.0, output: 7.0, cacheRead: 3.0, cacheWrite: 3.0 },
},
{
id: "deepseek-ai/DeepSeek-V3.1",
name: "DeepSeek V3.1",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: { input: 0.6, output: 1.25, cacheRead: 0.6, cacheWrite: 0.6 },
},
{
id: "meta-llama/Llama-3.3-70B-Instruct-Turbo",
name: "Llama 3.3 70B Instruct Turbo",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: { input: 0.88, output: 0.88, cacheRead: 0.88, cacheWrite: 0.88 },
},
{
id: "openai/gpt-oss-120b",
name: "GPT-OSS 120B",
reasoning: false,
input: ["text"],
contextWindow: 131072,
maxTokens: 8192,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
},
];
export function buildHuggingfaceModelDefinition(
model: (typeof HUGGINGFACE_MODEL_CATALOG)[number],
): ModelDefinitionConfig {
return {
id: model.id,
name: model.name,
reasoning: model.reasoning,
input: model.input,
cost: model.cost,
contextWindow: model.contextWindow,
maxTokens: model.maxTokens,
};
}
/**
* Infer reasoning and display name from Hub-style model id (e.g. "deepseek-ai/DeepSeek-R1").
*/
function inferredMetaFromModelId(id: string): { name: string; reasoning: boolean } {
const base = id.split("/").pop() ?? id;
const reasoning = isReasoningModelHeuristic(id);
const name = base.replace(/-/g, " ").replace(/\b(\w)/g, (c) => c.toUpperCase());
return { name, reasoning };
}
/** Prefer API-supplied display name, then owned_by/id, then inferred from id. */
function displayNameFromApiEntry(entry: HFModelEntry, inferredName: string): string {
const fromApi =
(typeof entry.name === "string" && entry.name.trim()) ||
(typeof entry.title === "string" && entry.title.trim()) ||
(typeof entry.display_name === "string" && entry.display_name.trim());
if (fromApi) {
return fromApi;
}
if (typeof entry.owned_by === "string" && entry.owned_by.trim()) {
const base = entry.id.split("/").pop() ?? entry.id;
return `${entry.owned_by.trim()}/${base}`;
}
return inferredName;
}
/**
* Discover chat-completion models from Hugging Face Inference Providers (GET /v1/models).
* Requires a valid HF token. Falls back to static catalog on failure or in test env.
*/
export async function discoverHuggingfaceModels(apiKey: string): Promise<ModelDefinitionConfig[]> {
if (process.env.VITEST === "true" || process.env.NODE_ENV === "test") {
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
const trimmedKey = apiKey?.trim();
if (!trimmedKey) {
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
try {
// GET https://router.huggingface.co/v1/models — response: { object, data: [{ id, owned_by, architecture: { input_modalities }, providers: [{ provider, context_length?, pricing? }] }] }. POST /v1/chat/completions requires Authorization.
const response = await fetch(`${HUGGINGFACE_BASE_URL}/models`, {
signal: AbortSignal.timeout(10_000),
headers: {
Authorization: `Bearer ${trimmedKey}`,
"Content-Type": "application/json",
},
});
if (!response.ok) {
log.warn(`GET /v1/models failed: HTTP ${response.status}, using static catalog`);
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
const body = (await response.json()) as OpenAIListModelsResponse;
const data = body?.data;
if (!Array.isArray(data) || data.length === 0) {
log.warn("No models in response, using static catalog");
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
const catalogById = new Map(HUGGINGFACE_MODEL_CATALOG.map((m) => [m.id, m] as const));
const seen = new Set<string>();
const models: ModelDefinitionConfig[] = [];
for (const entry of data) {
const id = typeof entry?.id === "string" ? entry.id.trim() : "";
if (!id || seen.has(id)) {
continue;
}
seen.add(id);
const catalogEntry = catalogById.get(id);
if (catalogEntry) {
models.push(buildHuggingfaceModelDefinition(catalogEntry));
} else {
const inferred = inferredMetaFromModelId(id);
const name = displayNameFromApiEntry(entry, inferred.name);
const modalities = entry.architecture?.input_modalities;
const input: Array<"text" | "image"> =
Array.isArray(modalities) && modalities.includes("image") ? ["text", "image"] : ["text"];
const providers = Array.isArray(entry.providers) ? entry.providers : [];
const providerWithContext = providers.find(
(p) => typeof p?.context_length === "number" && p.context_length > 0,
);
const contextLength =
providerWithContext?.context_length ?? HUGGINGFACE_DEFAULT_CONTEXT_WINDOW;
models.push({
id,
name,
reasoning: inferred.reasoning,
input,
cost: HUGGINGFACE_DEFAULT_COST,
contextWindow: contextLength,
maxTokens: HUGGINGFACE_DEFAULT_MAX_TOKENS,
});
}
}
return models.length > 0
? models
: HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
} catch (error) {
log.warn(`Discovery failed: ${String(error)}, using static catalog`);
return HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
}
}
// Deprecated compat shim. Prefer openclaw/plugin-sdk/huggingface.
export {
buildHuggingfaceModelDefinition,
discoverHuggingfaceModels,
HUGGINGFACE_BASE_URL,
HUGGINGFACE_MODEL_CATALOG,
HUGGINGFACE_POLICY_SUFFIXES,
isHuggingfacePolicyLocked,
} from "../plugin-sdk/huggingface.js";

View File

@@ -24,14 +24,6 @@ vi.mock("../config/config.js", async (importOriginal) => {
};
});
vi.mock("../../extensions/telegram/api.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("../../extensions/telegram/api.js")>();
return {
...actual,
deleteTelegramUpdateOffset: offsetMocks.deleteTelegramUpdateOffset,
};
});
vi.mock("../../extensions/telegram/update-offset-runtime-api.js", async (importOriginal) => {
const actual =
await importOriginal<typeof import("../../extensions/telegram/update-offset-runtime-api.js")>();

View File

@@ -1,4 +1,4 @@
export {
applyGoogleGeminiModelDefault,
GOOGLE_GEMINI_DEFAULT_MODEL,
} from "../plugins/provider-model-defaults.js";
} from "../plugin-sdk/google.js";

View File

@@ -2,4 +2,4 @@ export {
applyOpenAIConfig,
applyOpenAIProviderConfig,
OPENAI_DEFAULT_MODEL,
} from "../plugins/provider-model-defaults.js";
} from "../plugin-sdk/openai.js";

View File

@@ -1,4 +1,4 @@
export {
applyOpencodeGoModelDefault,
OPENCODE_GO_DEFAULT_MODEL_REF,
} from "../plugins/provider-model-defaults.js";
} from "../plugin-sdk/opencode-go.js";

View File

@@ -1,4 +1,4 @@
export {
applyOpencodeZenModelDefault,
OPENCODE_ZEN_DEFAULT_MODEL,
} from "../plugins/provider-model-defaults.js";
} from "../plugin-sdk/opencode.js";

View File

@@ -0,0 +1,90 @@
import { describe, expect, it } from "vitest";
import type { OpenClawConfig } from "./config.js";
import {
listLegacyWebSearchConfigPaths,
migrateLegacyWebSearchConfig,
} from "./legacy-web-search.js";
describe("legacy web search config", () => {
it("migrates legacy provider config through bundled web search ownership metadata", () => {
const res = migrateLegacyWebSearchConfig<OpenClawConfig>({
tools: {
web: {
search: {
provider: "grok",
apiKey: "brave-key",
grok: {
apiKey: "xai-key",
model: "grok-4-search",
},
kimi: {
apiKey: "kimi-key",
model: "kimi-k2.5",
},
},
},
},
});
expect(res.config.tools?.web?.search).toEqual({
provider: "grok",
});
expect(res.config.plugins?.entries?.brave).toEqual({
enabled: true,
config: {
webSearch: {
apiKey: "brave-key",
},
},
});
expect(res.config.plugins?.entries?.xai).toEqual({
enabled: true,
config: {
webSearch: {
apiKey: "xai-key",
model: "grok-4-search",
},
},
});
expect(res.config.plugins?.entries?.moonshot).toEqual({
enabled: true,
config: {
webSearch: {
apiKey: "kimi-key",
model: "kimi-k2.5",
},
},
});
expect(res.changes).toEqual([
"Moved tools.web.search.apiKey → plugins.entries.brave.config.webSearch.apiKey.",
"Moved tools.web.search.grok → plugins.entries.xai.config.webSearch.",
"Moved tools.web.search.kimi → plugins.entries.moonshot.config.webSearch.",
]);
});
it("lists legacy paths for metadata-owned provider config", () => {
expect(
listLegacyWebSearchConfigPaths({
tools: {
web: {
search: {
apiKey: "brave-key",
grok: {
apiKey: "xai-key",
model: "grok-4-search",
},
kimi: {
model: "kimi-k2.5",
},
},
},
},
}),
).toEqual([
"tools.web.search.apiKey",
"tools.web.search.grok.apiKey",
"tools.web.search.grok.model",
"tools.web.search.kimi.model",
]);
});
});

View File

@@ -1,3 +1,4 @@
import { BUNDLED_WEB_SEARCH_PROVIDER_PLUGIN_IDS } from "../plugins/bundled-capability-metadata.js";
import type { OpenClawConfig } from "./config.js";
import { mergeMissing } from "./legacy.shared.js";
@@ -11,16 +12,10 @@ const GENERIC_WEB_SEARCH_KEYS = new Set([
"cacheTtlMinutes",
]);
const LEGACY_PROVIDER_MAP = {
brave: "brave",
firecrawl: "firecrawl",
gemini: "google",
grok: "xai",
kimi: "moonshot",
perplexity: "perplexity",
} as const;
type LegacyProviderId = keyof typeof LEGACY_PROVIDER_MAP;
const LEGACY_WEB_SEARCH_PROVIDER_PLUGIN_IDS = BUNDLED_WEB_SEARCH_PROVIDER_PLUGIN_IDS;
const LEGACY_WEB_SEARCH_PROVIDER_IDS = Object.keys(LEGACY_WEB_SEARCH_PROVIDER_PLUGIN_IDS);
const LEGACY_WEB_SEARCH_PROVIDER_ID_SET = new Set(LEGACY_WEB_SEARCH_PROVIDER_IDS);
const LEGACY_GLOBAL_WEB_SEARCH_PROVIDER_ID = "brave";
function isRecord(value: unknown): value is JsonRecord {
return typeof value === "object" && value !== null && !Array.isArray(value);
@@ -49,10 +44,7 @@ function resolveLegacySearchConfig(raw: unknown): JsonRecord | undefined {
return isRecord(web?.search) ? web.search : undefined;
}
function copyLegacyProviderConfig(
search: JsonRecord,
providerKey: LegacyProviderId,
): JsonRecord | undefined {
function copyLegacyProviderConfig(search: JsonRecord, providerKey: string): JsonRecord | undefined {
const current = search[providerKey];
return isRecord(current) ? cloneRecord(current) : undefined;
}
@@ -69,9 +61,41 @@ function hasMappedLegacyWebSearchConfig(raw: unknown): boolean {
if (hasOwnKey(search, "apiKey")) {
return true;
}
return (Object.keys(LEGACY_PROVIDER_MAP) as LegacyProviderId[]).some((providerId) =>
isRecord(search[providerId]),
return LEGACY_WEB_SEARCH_PROVIDER_IDS.some((providerId) => isRecord(search[providerId]));
}
function resolveLegacyGlobalWebSearchMigration(search: JsonRecord): {
pluginId: string;
payload: JsonRecord;
legacyPath: string;
targetPath: string;
} | null {
const legacyProviderConfig = copyLegacyProviderConfig(
search,
LEGACY_GLOBAL_WEB_SEARCH_PROVIDER_ID,
);
const payload = legacyProviderConfig ?? {};
const hasLegacyApiKey = hasOwnKey(search, "apiKey");
if (hasLegacyApiKey) {
payload.apiKey = search.apiKey;
}
if (Object.keys(payload).length === 0) {
return null;
}
const pluginId =
LEGACY_WEB_SEARCH_PROVIDER_PLUGIN_IDS[LEGACY_GLOBAL_WEB_SEARCH_PROVIDER_ID] ??
LEGACY_GLOBAL_WEB_SEARCH_PROVIDER_ID;
return {
pluginId,
payload,
legacyPath: hasLegacyApiKey
? "tools.web.search.apiKey"
: `tools.web.search.${LEGACY_GLOBAL_WEB_SEARCH_PROVIDER_ID}`,
targetPath:
hasLegacyApiKey && !legacyProviderConfig
? `plugins.entries.${pluginId}.config.webSearch.apiKey`
: `plugins.entries.${pluginId}.config.webSearch`,
};
}
function migratePluginWebSearchConfig(params: {
@@ -123,7 +147,7 @@ export function listLegacyWebSearchConfigPaths(raw: unknown): string[] {
if ("apiKey" in search) {
paths.push("tools.web.search.apiKey");
}
for (const providerId of Object.keys(LEGACY_PROVIDER_MAP) as LegacyProviderId[]) {
for (const providerId of LEGACY_WEB_SEARCH_PROVIDER_IDS) {
const scoped = search[providerId];
if (isRecord(scoped)) {
for (const key of Object.keys(scoped)) {
@@ -179,12 +203,8 @@ function normalizeLegacyWebSearchConfigRecord<T extends JsonRecord>(
if (key === "apiKey") {
continue;
}
if (
(Object.keys(LEGACY_PROVIDER_MAP) as LegacyProviderId[]).includes(key as LegacyProviderId)
) {
if (isRecord(value)) {
continue;
}
if (LEGACY_WEB_SEARCH_PROVIDER_ID_SET.has(key) && isRecord(value)) {
continue;
}
if (GENERIC_WEB_SEARCH_KEYS.has(key) || !isRecord(value)) {
nextSearch[key] = value;
@@ -192,37 +212,35 @@ function normalizeLegacyWebSearchConfigRecord<T extends JsonRecord>(
}
web.search = nextSearch;
const legacyBraveConfig = copyLegacyProviderConfig(search, "brave");
const braveConfig = legacyBraveConfig ?? {};
if (hasOwnKey(search, "apiKey")) {
braveConfig.apiKey = search.apiKey;
}
if (Object.keys(braveConfig).length > 0) {
const globalSearchMigration = resolveLegacyGlobalWebSearchMigration(search);
if (globalSearchMigration) {
migratePluginWebSearchConfig({
root: nextRoot,
legacyPath: hasOwnKey(search, "apiKey")
? "tools.web.search.apiKey"
: "tools.web.search.brave",
targetPath:
hasOwnKey(search, "apiKey") && !legacyBraveConfig
? "plugins.entries.brave.config.webSearch.apiKey"
: "plugins.entries.brave.config.webSearch",
pluginId: LEGACY_PROVIDER_MAP.brave,
payload: braveConfig,
legacyPath: globalSearchMigration.legacyPath,
targetPath: globalSearchMigration.targetPath,
pluginId: globalSearchMigration.pluginId,
payload: globalSearchMigration.payload,
changes,
});
}
for (const providerId of ["firecrawl", "gemini", "grok", "kimi", "perplexity"] as const) {
for (const providerId of LEGACY_WEB_SEARCH_PROVIDER_IDS) {
if (providerId === LEGACY_GLOBAL_WEB_SEARCH_PROVIDER_ID) {
continue;
}
const scoped = copyLegacyProviderConfig(search, providerId);
if (!scoped || Object.keys(scoped).length === 0) {
continue;
}
const pluginId = LEGACY_WEB_SEARCH_PROVIDER_PLUGIN_IDS[providerId];
if (!pluginId) {
continue;
}
migratePluginWebSearchConfig({
root: nextRoot,
legacyPath: `tools.web.search.${providerId}`,
targetPath: `plugins.entries.${LEGACY_PROVIDER_MAP[providerId]}.config.webSearch`,
pluginId: LEGACY_PROVIDER_MAP[providerId],
targetPath: `plugins.entries.${pluginId}.config.webSearch`,
pluginId,
payload: scoped,
changes,
});

View File

@@ -1,10 +1,12 @@
import {
ZAI_CN_BASE_URL,
ZAI_CODING_CN_BASE_URL,
ZAI_CODING_GLOBAL_BASE_URL,
ZAI_GLOBAL_BASE_URL,
} from "../plugin-sdk/zai.js";
import { fetchWithTimeout } from "../utils/fetch-timeout.js";
export type ZaiEndpointId = "global" | "cn" | "coding-global" | "coding-cn";
const ZAI_CODING_GLOBAL_BASE_URL = "https://api.z.ai/api/coding/paas/v4";
const ZAI_CODING_CN_BASE_URL = "https://open.bigmodel.cn/api/coding/paas/v4";
const ZAI_GLOBAL_BASE_URL = "https://api.z.ai/api/paas/v4";
const ZAI_CN_BASE_URL = "https://open.bigmodel.cn/api/paas/v4";
export type ZaiDetectedEndpoint = {
endpoint: ZaiEndpointId;