mirror of
https://github.com/moltbot/moltbot.git
synced 2026-03-07 14:34:21 +00:00
fix(ui): inherit default model fallbacks in agents overview (#25729)
Land PR #25729 from @Suko. Use shared fallback-resolution helper and add regression coverage for default, override, and explicit-empty cases. Co-authored-by: suko <miha.sukic@gmail.com>
This commit is contained in:
@@ -33,6 +33,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Models/Bedrock auth: normalize additional Bedrock provider aliases (`bedrock`, `aws-bedrock`, `aws_bedrock`, `amazon bedrock`) to canonical `amazon-bedrock`, ensuring auth-mode resolution consistently selects AWS SDK fallback. (#25756) Thanks @fwhite13.
|
||||
- Providers/SiliconFlow: normalize `thinking="off"` to `thinking: null` for `Pro/*` model payloads to avoid provider-side 400 loops and misleading compaction retries. (#25435) Thanks @Zjianru.
|
||||
- Gateway/Models: honor explicit `agents.defaults.models` allowlist refs even when bundled model catalog data is stale, synthesize missing allowlist entries in `models.list`, and allow `sessions.patch`/`/model` selection for those refs without false `model not allowed` errors. (#20291) Thanks @kensipe, @nikolasdehor, and @vincentkoc.
|
||||
- Control UI/Agents: inherit `agents.defaults.model.fallbacks` in the Overview fallback input when no per-agent model entry exists, while preserving explicit per-agent fallback overrides (including empty lists). (#25729, #25710) Thanks @Suko.
|
||||
- Automation/Subagent/Cron reliability: honor `ANNOUNCE_SKIP` in `sessions_spawn` completion/direct announce flows (no user-visible token leaks), add transient direct-announce retries for channel unavailability (for example WhatsApp listener reconnect windows), and include `cron` in the `coding` tool profile so `/tools/invoke` can execute cron actions when explicitly allowed by gateway policy. (#25800, #25656, #25842, #25813, #25822, #25821) Thanks @astra-fer, @aaajiao, @dwight11232-coder, @kevinWangSheng, @widingmarcus-cyber, and @stakeswky.
|
||||
- Discord/Proxy + reactions + model picker: thread channel proxy fetch into inbound media/sticker downloads, use proxy-aware gateway metadata fetch for WSL/corporate proxy setups, wire `messages.statusReactions.{emojis,timing}` into Discord reaction lifecycle control, and compact model-picker `custom_id` keys to stay under Discord's 100-char limit while keeping backward-compatible parsing. (#25232, #25507, #25564, #25695) Thanks @openperf, @chilu18, @Yipsh, @lbo728, and @s1korrrr.
|
||||
- Discord/Block streaming: restore block-streamed reply delivery by suppressing only reasoning payloads (instead of all `block` payloads), fixing missing Discord replies in `channels.discord.streaming=block` mode. (#25839, #25836, #25792) Thanks @pewallin.
|
||||
|
||||
42
ui/src/ui/views/agents-utils.test.ts
Normal file
42
ui/src/ui/views/agents-utils.test.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { resolveEffectiveModelFallbacks } from "./agents-utils.ts";
|
||||
|
||||
describe("resolveEffectiveModelFallbacks", () => {
|
||||
it("inherits defaults when no entry fallbacks are configured", () => {
|
||||
const entryModel = undefined;
|
||||
const defaultModel = {
|
||||
primary: "openai/gpt-5-nano",
|
||||
fallbacks: ["google/gemini-2.0-flash"],
|
||||
};
|
||||
|
||||
expect(resolveEffectiveModelFallbacks(entryModel, defaultModel)).toEqual([
|
||||
"google/gemini-2.0-flash",
|
||||
]);
|
||||
});
|
||||
|
||||
it("prefers entry fallbacks over defaults", () => {
|
||||
const entryModel = {
|
||||
primary: "openai/gpt-5-mini",
|
||||
fallbacks: ["openai/gpt-5-nano"],
|
||||
};
|
||||
const defaultModel = {
|
||||
primary: "openai/gpt-5",
|
||||
fallbacks: ["google/gemini-2.0-flash"],
|
||||
};
|
||||
|
||||
expect(resolveEffectiveModelFallbacks(entryModel, defaultModel)).toEqual(["openai/gpt-5-nano"]);
|
||||
});
|
||||
|
||||
it("keeps explicit empty entry fallback lists", () => {
|
||||
const entryModel = {
|
||||
primary: "openai/gpt-5-mini",
|
||||
fallbacks: [],
|
||||
};
|
||||
const defaultModel = {
|
||||
primary: "openai/gpt-5",
|
||||
fallbacks: ["google/gemini-2.0-flash"],
|
||||
};
|
||||
|
||||
expect(resolveEffectiveModelFallbacks(entryModel, defaultModel)).toEqual([]);
|
||||
});
|
||||
});
|
||||
@@ -244,6 +244,13 @@ export function resolveModelFallbacks(model?: unknown): string[] | null {
|
||||
return null;
|
||||
}
|
||||
|
||||
export function resolveEffectiveModelFallbacks(
|
||||
entryModel?: unknown,
|
||||
defaultModel?: unknown,
|
||||
): string[] | null {
|
||||
return resolveModelFallbacks(entryModel) ?? resolveModelFallbacks(defaultModel);
|
||||
}
|
||||
|
||||
export function parseFallbackList(value: string): string[] {
|
||||
return value
|
||||
.split(",")
|
||||
|
||||
@@ -24,7 +24,7 @@ import {
|
||||
parseFallbackList,
|
||||
resolveAgentConfig,
|
||||
resolveAgentEmoji,
|
||||
resolveModelFallbacks,
|
||||
resolveEffectiveModelFallbacks,
|
||||
resolveModelLabel,
|
||||
resolveModelPrimary,
|
||||
} from "./agents-utils.ts";
|
||||
@@ -390,7 +390,10 @@ function renderAgentOverview(params: {
|
||||
resolveModelPrimary(config.defaults?.model) ||
|
||||
(defaultModel !== "-" ? normalizeModelValue(defaultModel) : null);
|
||||
const effectivePrimary = modelPrimary ?? defaultPrimary ?? null;
|
||||
const modelFallbacks = resolveModelFallbacks(config.entry?.model);
|
||||
const modelFallbacks = resolveEffectiveModelFallbacks(
|
||||
config.entry?.model,
|
||||
config.defaults?.model,
|
||||
);
|
||||
const fallbackText = modelFallbacks ? modelFallbacks.join(", ") : "";
|
||||
const identityName =
|
||||
agentIdentity?.name?.trim() ||
|
||||
|
||||
@@ -37,6 +37,7 @@ export default defineConfig({
|
||||
"src/**/*.test.ts",
|
||||
"extensions/**/*.test.ts",
|
||||
"test/**/*.test.ts",
|
||||
"ui/src/ui/views/agents-utils.test.ts",
|
||||
"ui/src/ui/views/usage-render-details.test.ts",
|
||||
"ui/src/ui/controllers/agents.test.ts",
|
||||
],
|
||||
|
||||
Reference in New Issue
Block a user