feat(config): expose full pi-ai model compat fields in config schema (openclaw#11063) thanks @ikari-pl

Verified:
- pnpm build
- pnpm check
- pnpm test (full run; transient lobster timeout rerun passed)

Co-authored-by: ikari-pl <811702+ikari-pl@users.noreply.github.com>
Co-authored-by: Tak Hoffman <781889+Takhoffman@users.noreply.github.com>
This commit is contained in:
Cezar “ikari” Pokorski
2026-02-14 01:47:18 +01:00
committed by GitHub
parent 28431b84cc
commit d134c854a5
4 changed files with 49 additions and 0 deletions

View File

@@ -145,6 +145,7 @@ Docs: https://docs.openclaw.ai
- Onboarding/Providers: update MiniMax API default/recommended models from M2.1 to M2.5, add M2.5/M2.5-Lightning model entries, and include `minimax-m2.5` in modern model filtering. (#14865) Thanks @adao-max.
- Ollama: use configured `models.providers.ollama.baseUrl` for model discovery and normalize `/v1` endpoints to the native Ollama API root. (#14131) Thanks @shtse8.
- Voice Call: pass Twilio stream auth token via `<Parameter>` instead of query string. (#14029) Thanks @mcwigglesmcgee.
- Config/Models: allow full `models.providers.*.models[*].compat` keys used by `openai-completions` (`thinkingFormat`, `supportsStrictMode`, and streaming/tool-result compatibility flags) so valid provider overrides no longer fail strict config validation. (#11063) Thanks @ikari-pl.
- Feishu: pass `Buffer` directly to the Feishu SDK upload APIs instead of `Readable.from(...)` to avoid form-data upload failures. (#10345) Thanks @youngerstyle.
- Feishu: trigger mention-gated group handling only when the bot itself is mentioned (not just any mention). (#11088) Thanks @openperf.
- Feishu: probe status uses the resolved account context for multi-account credential checks. (#11233) Thanks @onevcat.

View File

@@ -0,0 +1,34 @@
import { describe, expect, it } from "vitest";
import { validateConfigObject } from "./validation.js";
describe("model compat config schema", () => {
it("accepts full openai-completions compat fields", () => {
const res = validateConfigObject({
models: {
providers: {
local: {
baseUrl: "http://127.0.0.1:1234/v1",
api: "openai-completions",
models: [
{
id: "qwen3-32b",
name: "Qwen3 32B",
compat: {
supportsUsageInStreaming: true,
supportsStrictMode: false,
thinkingFormat: "qwen",
requiresToolResultName: true,
requiresAssistantAfterToolResult: false,
requiresThinkingAsText: false,
requiresMistralToolIds: false,
},
},
],
},
},
},
});
expect(res.ok).toBe(true);
});
});

View File

@@ -11,7 +11,14 @@ export type ModelCompatConfig = {
supportsStore?: boolean;
supportsDeveloperRole?: boolean;
supportsReasoningEffort?: boolean;
supportsUsageInStreaming?: boolean;
supportsStrictMode?: boolean;
maxTokensField?: "max_completion_tokens" | "max_tokens";
thinkingFormat?: "openai" | "zai" | "qwen";
requiresToolResultName?: boolean;
requiresAssistantAfterToolResult?: boolean;
requiresThinkingAsText?: boolean;
requiresMistralToolIds?: boolean;
};
export type ModelProviderAuthMode = "api-key" | "aws-sdk" | "oauth" | "token";

View File

@@ -17,9 +17,16 @@ export const ModelCompatSchema = z
supportsStore: z.boolean().optional(),
supportsDeveloperRole: z.boolean().optional(),
supportsReasoningEffort: z.boolean().optional(),
supportsUsageInStreaming: z.boolean().optional(),
supportsStrictMode: z.boolean().optional(),
maxTokensField: z
.union([z.literal("max_completion_tokens"), z.literal("max_tokens")])
.optional(),
thinkingFormat: z.union([z.literal("openai"), z.literal("zai"), z.literal("qwen")]).optional(),
requiresToolResultName: z.boolean().optional(),
requiresAssistantAfterToolResult: z.boolean().optional(),
requiresThinkingAsText: z.boolean().optional(),
requiresMistralToolIds: z.boolean().optional(),
})
.strict()
.optional();