fix: prefer exact provider config match

This commit is contained in:
Josh Lehman
2026-03-05 11:20:56 -08:00
parent c109fda016
commit 20fa77289c
3 changed files with 70 additions and 1 deletions

View File

@@ -483,6 +483,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Models/provider config precedence: prefer exact `models.providers.<name>` matches before normalized provider aliases in embedded model resolution, preventing alias/canonical key collisions from applying the wrong provider `api`, `baseUrl`, or headers. (#35934) thanks @RealKai42.
- Logging/Subsystem console timestamps: route subsystem console timestamp rendering through `formatConsoleTimestamp(...)` so `pretty` and timestamp-prefix output use local timezone formatting consistently instead of inline UTC `toISOString()` paths. (#25970) Thanks @openperf.
- Feishu/Multi-account + reply reliability: add `channels.feishu.defaultAccount` outbound routing support with schema validation, prevent inbound preview text from leaking into prompt system events, keep quoted-message extraction text-first (post/interactive/file placeholders instead of raw JSON), route Feishu video sends as `msg_type: "file"`, and avoid websocket event blocking by using non-blocking event handling in monitor dispatch. Landed from contributor PRs #31209, #29610, #30432, #30331, and #29501. Thanks @stakeswky, @hclsys, @bmendonca3, @patrick-yingxi-pan, and @zwffff.
- Feishu/Target routing + replies + dedupe: normalize provider-prefixed targets (`feishu:`/`lark:`), prefer configured `channels.feishu.defaultAccount` for tool execution, honor Feishu outbound `renderMode` in adapter text/caption sends, fall back to normal send when reply targets are withdrawn/deleted, and add synchronous in-memory dedupe guard for concurrent duplicate inbound events. Landed from contributor PRs #30428, #30438, #29958, #30444, and #29463. Thanks @bmendonca3 and @Yaxuan42.

View File

@@ -330,6 +330,66 @@ describe("resolveModel", () => {
});
});
it("prefers exact provider config over normalized alias match when both keys exist", () => {
mockDiscoveredModel({
provider: "qwen",
modelId: "qwen3-coder-plus",
templateModel: {
id: "qwen3-coder-plus",
name: "Qwen3 Coder Plus",
provider: "qwen",
api: "openai-completions",
baseUrl: "https://default-provider.example.com/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 8192,
maxTokens: 2048,
},
});
const cfg = {
models: {
providers: {
"qwen-portal": {
baseUrl: "https://canonical-provider.example.com/v1",
api: "openai-completions",
headers: { "X-Provider": "canonical" },
models: [{ ...makeModel("qwen3-coder-plus"), reasoning: false }],
},
qwen: {
baseUrl: "https://alias-provider.example.com/v1",
api: "anthropic-messages",
headers: { "X-Provider": "alias" },
models: [
{
...makeModel("qwen3-coder-plus"),
api: "anthropic-messages",
reasoning: true,
contextWindow: 262144,
maxTokens: 32768,
},
],
},
},
},
} as OpenClawConfig;
const result = resolveModel("qwen", "qwen3-coder-plus", "/tmp/agent", cfg);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "qwen",
id: "qwen3-coder-plus",
api: "anthropic-messages",
baseUrl: "https://alias-provider.example.com",
reasoning: true,
contextWindow: 262144,
maxTokens: 32768,
headers: { "X-Provider": "alias" },
});
});
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
mockOpenAICodexTemplateModel();

View File

@@ -28,7 +28,15 @@ function resolveConfiguredProviderConfig(
cfg: OpenClawConfig | undefined,
provider: string,
): InlineProviderConfig | undefined {
return findNormalizedProviderValue(cfg?.models?.providers, provider);
const configuredProviders = cfg?.models?.providers;
if (!configuredProviders) {
return undefined;
}
const exactProviderConfig = configuredProviders[provider];
if (exactProviderConfig) {
return exactProviderConfig;
}
return findNormalizedProviderValue(configuredProviders, provider);
}
function applyConfiguredProviderOverrides(params: {