fix(mistral): repair max-token defaults and doctor migration (#53054)

* fix(mistral): repair max-token defaults and doctor migration

* fix(mistral): add missing small-model repair cap
This commit is contained in:
Vincent Koc
2026-03-23 10:57:56 -07:00
committed by GitHub
parent ffb287e1de
commit dd586d59ed
9 changed files with 224 additions and 10 deletions

View File

@@ -26,6 +26,7 @@ Docs: https://docs.openclaw.ai
- Agents/Anthropic: preserve latest assistant thinking and redacted-thinking block ordering during transcript image sanitization so follow-up turns do not trip Anthropic's unmodified-thinking validation. (#52961) Thanks @vincentkoc.
- Voice-call/Plivo: stabilize Plivo v2 replay keys so webhook retries and replay protection stop colliding on valid follow-up deliveries.
- Release/install: keep previously released bundled plugins and Control UI assets in published openclaw npm installs, and fail release checks when those shipped artifacts are missing. Thanks @vincentkoc.
- Mistral/models: lower bundled Mistral max-token defaults to safe output budgets and teach `openclaw doctor --fix` to repair old persisted Mistral provider configs that still carry context-sized output limits, avoiding deterministic Mistral 422 rejects on fresh and existing setups. Fixes #52599. Thanks @vincentkoc.
## 2026.3.22

View File

@@ -39,13 +39,13 @@ describe("mistral model definitions", () => {
reasoning: true,
input: ["text"],
contextWindow: 128000,
maxTokens: 128000,
maxTokens: 40000,
}),
expect.objectContaining({
id: "pixtral-large-latest",
input: ["text", "image"],
contextWindow: 128000,
maxTokens: 128000,
maxTokens: 32768,
}),
]),
);

View File

@@ -4,7 +4,7 @@ export const MISTRAL_BASE_URL = "https://api.mistral.ai/v1";
export const MISTRAL_DEFAULT_MODEL_ID = "mistral-large-latest";
export const MISTRAL_DEFAULT_MODEL_REF = `mistral/${MISTRAL_DEFAULT_MODEL_ID}`;
export const MISTRAL_DEFAULT_CONTEXT_WINDOW = 262144;
export const MISTRAL_DEFAULT_MAX_TOKENS = 262144;
export const MISTRAL_DEFAULT_MAX_TOKENS = 16384;
export const MISTRAL_DEFAULT_COST = {
input: 0.5,
output: 1.5,
@@ -29,7 +29,7 @@ const MISTRAL_MODEL_CATALOG = [
input: ["text"],
cost: { input: 0.4, output: 2, cacheRead: 0, cacheWrite: 0 },
contextWindow: 262144,
maxTokens: 262144,
maxTokens: 32768,
},
{
id: "magistral-small",
@@ -38,7 +38,7 @@ const MISTRAL_MODEL_CATALOG = [
input: ["text"],
cost: { input: 0.5, output: 1.5, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 128000,
maxTokens: 40000,
},
{
id: "mistral-large-latest",
@@ -56,7 +56,7 @@ const MISTRAL_MODEL_CATALOG = [
input: ["text", "image"],
cost: { input: 0.4, output: 2, cacheRead: 0, cacheWrite: 0 },
contextWindow: 262144,
maxTokens: 262144,
maxTokens: 8192,
},
{
id: "mistral-small-latest",
@@ -74,7 +74,7 @@ const MISTRAL_MODEL_CATALOG = [
input: ["text", "image"],
cost: { input: 2, output: 6, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 128000,
maxTokens: 32768,
},
] as const satisfies readonly ModelDefinitionConfig[];

View File

@@ -669,4 +669,52 @@ describe("normalizeCompatibilityConfigValues", () => {
"Merged tools.media.models[0].deepgram → tools.media.models[0].providerOptions.deepgram (filled missing canonical fields from legacy).",
]);
});
it("normalizes persisted mistral model maxTokens that matched the old context-sized defaults", () => {
const res = normalizeCompatibilityConfigValues({
models: {
providers: {
mistral: {
baseUrl: "https://api.mistral.ai/v1",
api: "openai-completions",
models: [
{
id: "mistral-large-latest",
name: "Mistral Large",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 262144,
maxTokens: 262144,
},
{
id: "magistral-small",
name: "Magistral Small",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 128000,
},
],
},
},
},
});
expect(res.config.models?.providers?.mistral?.models).toEqual([
expect.objectContaining({
id: "mistral-large-latest",
maxTokens: 16384,
}),
expect.objectContaining({
id: "magistral-small",
maxTokens: 40000,
}),
]);
expect(res.changes).toEqual([
"Normalized models.providers.mistral.models[0].maxTokens (262144 → 16384) to avoid Mistral context-window rejects.",
"Normalized models.providers.mistral.models[1].maxTokens (128000 → 40000) to avoid Mistral context-window rejects.",
]);
});
});

View File

@@ -1,5 +1,7 @@
import { normalizeProviderId } from "../agents/model-selection.js";
import { shouldMoveSingleAccountChannelKey } from "../channels/plugins/setup-helpers.js";
import type { OpenClawConfig } from "../config/config.js";
import { resolveNormalizedProviderModelMaxTokens } from "../config/defaults.js";
import {
formatSlackStreamingBooleanMigrationMessage,
formatSlackStreamModeMigrationMessage,
@@ -809,11 +811,91 @@ export function normalizeCompatibilityConfigValues(cfg: OpenClawConfig): {
};
};
const normalizeLegacyMistralModelMaxTokens = () => {
const rawProviders = next.models?.providers;
if (!isRecord(rawProviders)) {
return;
}
let providersChanged = false;
const nextProviders = { ...rawProviders };
for (const [providerId, rawProvider] of Object.entries(rawProviders)) {
if (normalizeProviderId(providerId) !== "mistral" || !isRecord(rawProvider)) {
continue;
}
const rawModels = rawProvider.models;
if (!Array.isArray(rawModels)) {
continue;
}
let modelsChanged = false;
const nextModels = rawModels.map((model, index) => {
if (!isRecord(model)) {
return model;
}
const modelId = typeof model.id === "string" ? model.id.trim() : "";
const contextWindow =
typeof model.contextWindow === "number" && Number.isFinite(model.contextWindow)
? model.contextWindow
: null;
const maxTokens =
typeof model.maxTokens === "number" && Number.isFinite(model.maxTokens)
? model.maxTokens
: null;
if (!modelId || contextWindow === null || maxTokens === null) {
return model;
}
const normalizedMaxTokens = resolveNormalizedProviderModelMaxTokens({
providerId,
modelId,
contextWindow,
rawMaxTokens: maxTokens,
});
if (normalizedMaxTokens === maxTokens) {
return model;
}
modelsChanged = true;
changes.push(
`Normalized models.providers.${providerId}.models[${index}].maxTokens (${maxTokens}${normalizedMaxTokens}) to avoid Mistral context-window rejects.`,
);
return {
...model,
maxTokens: normalizedMaxTokens,
};
});
if (!modelsChanged) {
continue;
}
nextProviders[providerId] = {
...rawProvider,
models: nextModels,
};
providersChanged = true;
}
if (!providersChanged) {
return;
}
next = {
...next,
models: {
...next.models,
providers: nextProviders as NonNullable<OpenClawConfig["models"]>["providers"],
},
};
};
normalizeBrowserSsrFPolicyAlias();
normalizeLegacyNanoBananaSkill();
normalizeLegacyTalkConfig();
normalizeLegacyCrossContextMessageConfig();
normalizeLegacyMediaProviderOptions();
normalizeLegacyMistralModelMaxTokens();
const legacyAckReaction = cfg.messages?.ackReaction?.trim();
const hasWhatsAppConfig = cfg.channels?.whatsapp !== undefined;

View File

@@ -7,6 +7,7 @@ import {
applyMinimaxApiConfig,
applyMinimaxApiProviderConfig,
} from "../../extensions/minimax/onboard.js";
import { buildMistralModelDefinition as buildBundledMistralModelDefinition } from "../../extensions/mistral/model-definitions.js";
import {
applyMistralConfig,
applyMistralProviderConfig,
@@ -50,6 +51,7 @@ import {
} from "../plugins/provider-auth-storage.js";
import {
MISTRAL_DEFAULT_MODEL_REF,
buildMistralModelDefinition as buildCoreMistralModelDefinition,
ZAI_CODING_CN_BASE_URL,
ZAI_GLOBAL_BASE_URL,
} from "../plugins/provider-model-definitions.js";
@@ -659,7 +661,18 @@ describe("applyMistralProviderConfig", () => {
(model) => model.id === "mistral-large-latest",
);
expect(mistralDefault?.contextWindow).toBe(262144);
expect(mistralDefault?.maxTokens).toBe(262144);
expect(mistralDefault?.maxTokens).toBe(16384);
});
it("keeps the core and bundled mistral defaults aligned", () => {
const bundled = buildBundledMistralModelDefinition();
const core = buildCoreMistralModelDefinition();
expect(core).toMatchObject({
id: bundled.id,
contextWindow: bundled.contextWindow,
maxTokens: bundled.maxTokens,
});
});
});

View File

@@ -41,6 +41,14 @@ const DEFAULT_MODEL_COST: ModelDefinitionConfig["cost"] = {
};
const DEFAULT_MODEL_INPUT: ModelDefinitionConfig["input"] = ["text"];
const DEFAULT_MODEL_MAX_TOKENS = 8192;
const MISTRAL_SAFE_MAX_TOKENS_BY_MODEL = {
"devstral-medium-latest": 32_768,
"magistral-small": 40_000,
"mistral-large-latest": 16_384,
"mistral-medium-2508": 8_192,
"mistral-small-latest": 16_384,
"pixtral-large-latest": 32_768,
} as const;
type ModelDefinitionLike = Partial<ModelDefinitionConfig> &
Pick<ModelDefinitionConfig, "id" | "name">;
@@ -71,6 +79,24 @@ function resolveModelCost(
};
}
export function resolveNormalizedProviderModelMaxTokens(params: {
providerId: string;
modelId: string;
contextWindow: number;
rawMaxTokens: number;
}): number {
const clamped = Math.min(params.rawMaxTokens, params.contextWindow);
if (normalizeProviderId(params.providerId) !== "mistral" || clamped < params.contextWindow) {
return clamped;
}
const safeMaxTokens =
MISTRAL_SAFE_MAX_TOKENS_BY_MODEL[
params.modelId as keyof typeof MISTRAL_SAFE_MAX_TOKENS_BY_MODEL
] ?? DEFAULT_MODEL_MAX_TOKENS;
return Math.min(safeMaxTokens, params.contextWindow);
}
function resolveAnthropicDefaultAuthMode(cfg: OpenClawConfig): AnthropicAuthDefaultsMode | null {
const profiles = cfg.auth?.profiles ?? {};
const anthropicProfiles = Object.entries(profiles).filter(
@@ -263,7 +289,12 @@ export function applyModelDefaults(cfg: OpenClawConfig): OpenClawConfig {
const defaultMaxTokens = Math.min(DEFAULT_MODEL_MAX_TOKENS, contextWindow);
const rawMaxTokens = isPositiveNumber(raw.maxTokens) ? raw.maxTokens : defaultMaxTokens;
const maxTokens = Math.min(rawMaxTokens, contextWindow);
const maxTokens = resolveNormalizedProviderModelMaxTokens({
providerId,
modelId: raw.id,
contextWindow,
rawMaxTokens,
});
if (raw.maxTokens !== maxTokens) {
modelMutated = true;
}

View File

@@ -29,6 +29,35 @@ describe("applyModelDefaults", () => {
} satisfies OpenClawConfig;
}
function buildMistralProviderConfig(overrides?: {
modelId?: string;
contextWindow?: number;
maxTokens?: number;
}) {
return {
models: {
providers: {
mistral: {
baseUrl: "https://api.mistral.ai/v1",
apiKey: "sk-mistral", // pragma: allowlist secret
api: "openai-completions",
models: [
{
id: overrides?.modelId ?? "mistral-large-latest",
name: "Mistral",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: overrides?.contextWindow ?? 262_144,
maxTokens: overrides?.maxTokens ?? 262_144,
},
],
},
},
},
} satisfies OpenClawConfig;
}
it("adds default aliases when models are present", () => {
const cfg = {
agents: {
@@ -109,6 +138,16 @@ describe("applyModelDefaults", () => {
expect(model?.maxTokens).toBe(32768);
});
it("normalizes stale mistral maxTokens that matched the full context window", () => {
const cfg = buildMistralProviderConfig();
const next = applyModelDefaults(cfg);
const model = next.models?.providers?.mistral?.models?.[0];
expect(model?.contextWindow).toBe(262144);
expect(model?.maxTokens).toBe(16384);
});
it("defaults anthropic provider and model api to anthropic-messages", () => {
const cfg = {
models: {

View File

@@ -27,7 +27,7 @@ const MISTRAL_BASE_URL = "https://api.mistral.ai/v1";
const MISTRAL_DEFAULT_MODEL_ID = "mistral-large-latest";
const MISTRAL_DEFAULT_MODEL_REF = `mistral/${MISTRAL_DEFAULT_MODEL_ID}`;
const MISTRAL_DEFAULT_CONTEXT_WINDOW = 262144;
const MISTRAL_DEFAULT_MAX_TOKENS = 262144;
const MISTRAL_DEFAULT_MAX_TOKENS = 16384;
const MISTRAL_DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 };
const MODELSTUDIO_CN_BASE_URL = "https://coding.dashscope.aliyuncs.com/v1";