docs(test): refresh stale model refs

This commit is contained in:
Peter Steinberger
2026-04-04 08:05:33 +01:00
parent f38a3ae996
commit 14cfcdba1a
366 changed files with 883 additions and 890 deletions

View File

@@ -141,9 +141,9 @@ function pickModel(models: Array<Model<Api>>, raw?: string): Model<Api> | null {
}
const preferred = [
"claude-opus-4-5",
"claude-opus-4-6",
"claude-sonnet-4-6",
"claude-sonnet-4-6",
"claude-sonnet-4-5",
"claude-sonnet-4-0",
"claude-haiku-3-5",
];

View File

@@ -86,12 +86,12 @@ describe("getSoonestCooldownExpiry", () => {
"openai:p2": {
cooldownUntil: now + 30_000,
cooldownReason: "rate_limit",
cooldownModel: "gpt-5.2",
cooldownModel: "gpt-5.4",
},
});
expect(
getSoonestCooldownExpiry(store, ["openai:p1", "openai:p2"], { now, forModel: "gpt-5.2" }),
getSoonestCooldownExpiry(store, ["openai:p1", "openai:p2"], { now, forModel: "gpt-5.4" }),
).toBe(now + 30_000);
});
@@ -107,12 +107,12 @@ describe("getSoonestCooldownExpiry", () => {
"openai:p2": {
cooldownUntil: now + 30_000,
cooldownReason: "rate_limit",
cooldownModel: "gpt-5.2",
cooldownModel: "gpt-5.4",
},
});
expect(
getSoonestCooldownExpiry(store, ["openai:p1", "openai:p2"], { now, forModel: "gpt-5.2" }),
getSoonestCooldownExpiry(store, ["openai:p1", "openai:p2"], { now, forModel: "gpt-5.4" }),
).toBe(now + 20_000);
});
});

View File

@@ -73,7 +73,7 @@ const { runBtwSideQuestion } = await import("./btw.js");
type RunBtwSideQuestionParams = Parameters<typeof runBtwSideQuestion>[0];
const DEFAULT_AGENT_DIR = "/tmp/agent";
const DEFAULT_MODEL = "claude-sonnet-4-5";
const DEFAULT_MODEL = "claude-sonnet-4-6";
const DEFAULT_PROVIDER = "anthropic";
const DEFAULT_REASONING_LEVEL = "off";
const DEFAULT_SESSION_KEY = "agent:main:main";
@@ -202,7 +202,7 @@ describe("runBtwSideQuestion", () => {
getLeafEntryMock.mockReturnValue(null);
resolveModelWithRegistryMock.mockReturnValue({
provider: "anthropic",
id: "claude-sonnet-4-5",
id: "claude-sonnet-4-6",
api: "anthropic-messages",
});
getApiKeyForModelMock.mockResolvedValue({ apiKey: "secret", mode: "api-key", source: "test" });
@@ -222,7 +222,7 @@ describe("runBtwSideQuestion", () => {
role: "assistant",
content: [],
provider: "anthropic",
model: "claude-sonnet-4-5",
model: "claude-sonnet-4-6",
},
},
{
@@ -233,7 +233,7 @@ describe("runBtwSideQuestion", () => {
role: "assistant",
content: [],
provider: "anthropic",
model: "claude-sonnet-4-5",
model: "claude-sonnet-4-6",
},
},
{
@@ -244,7 +244,7 @@ describe("runBtwSideQuestion", () => {
content: [{ type: "text", text: "Side answer." }],
provider: "anthropic",
api: "anthropic-messages",
model: "claude-sonnet-4-5",
model: "claude-sonnet-4-6",
stopReason: "stop",
usage: {
input: 1,

View File

@@ -31,7 +31,7 @@ describe("runCliAgent reliability", () => {
workspaceDir: "/tmp",
prompt: "hi",
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: "run-2",
cliSessionId: "thread-123",
@@ -62,7 +62,7 @@ describe("runCliAgent reliability", () => {
workspaceDir: "/tmp",
prompt: "hi",
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: "run-2b",
cliSessionId: "thread-123",
@@ -102,7 +102,7 @@ describe("runCliAgent reliability", () => {
workspaceDir: "/tmp",
prompt: "hi",
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: "run-3",
cliSessionId: "thread-123",
@@ -145,7 +145,7 @@ describe("runCliAgent reliability", () => {
workspaceDir: "/tmp",
prompt: "hi",
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: "run-retry-failure",
cliSessionId: "thread-123",

View File

@@ -119,7 +119,7 @@ describe("runCliAgent spawn path", () => {
workspaceDir: "/tmp",
prompt: "hi",
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: "run-1",
cliSessionId: "thread-123",
@@ -233,7 +233,7 @@ describe("runCliAgent spawn path", () => {
} satisfies OpenClawConfig,
prompt: "hi",
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: "run-warning",
cliSessionId: "thread-123",
@@ -278,7 +278,7 @@ describe("runCliAgent spawn path", () => {
workspaceDir: tempDir,
prompt: `[media attached: ${sourceImage} (image/png)]\n\n<media:image>`,
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: "run-prompt-image",
});
@@ -369,7 +369,7 @@ describe("runCliAgent spawn path", () => {
prompt: `[media attached: ${sourceImage} (image/png)]\n\n<media:image>`,
images: [{ type: "image", data: SMALL_PNG_BASE64, mimeType: "image/png" }],
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: "run-explicit-image-precedence",
});
@@ -419,7 +419,7 @@ describe("runCliAgent spawn path", () => {
config: cfg,
prompt: "hi",
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: "run-4",
});

View File

@@ -211,7 +211,7 @@ export async function runCliAgentWithBackendConfig(params: {
} satisfies OpenClawConfig,
prompt: "hi",
provider: "codex-cli",
model: "gpt-5.2-codex",
model: "gpt-5.4",
timeoutMs: 1_000,
runId: params.runId,
cliSessionId: "thread-123",

View File

@@ -47,7 +47,7 @@ describe("compaction retry integration", () => {
content: [{ type: "text", text: "Test response" }],
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {
input: 0,
output: 0,

View File

@@ -30,7 +30,7 @@ function makeAssistantToolCall(
{ type: "text", text },
{ type: "toolCall", id: toolCallId, name: "test_tool", arguments: {} },
],
model: "gpt-5.2",
model: "gpt-5.4",
stopReason: "stop",
timestamp,
});
@@ -225,7 +225,7 @@ describe("pruneHistoryForContextShare", () => {
{ type: "toolCall", id: "call_a", name: "tool_a", arguments: {} },
{ type: "toolCall", id: "call_b", name: "tool_b", arguments: {} },
],
model: "gpt-5.2",
model: "gpt-5.4",
stopReason: "stop",
timestamp: 1,
}),

View File

@@ -30,7 +30,7 @@ async function loadFreshCompactionModuleForTest() {
function makeAssistantToolCall(timestamp: number): AssistantMessage {
return makeAgentAssistantMessage({
content: [{ type: "toolCall", id: "call_1", name: "browser", arguments: { action: "tabs" } }],
model: "gpt-5.2",
model: "gpt-5.4",
stopReason: "toolUse",
timestamp,
});

View File

@@ -473,13 +473,13 @@ describe("failover-error", () => {
it("coerces failover-worthy errors into FailoverError with metadata", () => {
const err = coerceToFailoverError("credit balance too low", {
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
});
expect(err?.name).toBe("FailoverError");
expect(err?.reason).toBe("billing");
expect(err?.status).toBe(402);
expect(err?.provider).toBe("anthropic");
expect(err?.model).toBe("claude-opus-4-5");
expect(err?.model).toBe("claude-opus-4-6");
});
it("maps overloaded to a 503 fallback status", () => {

View File

@@ -19,18 +19,18 @@ describe("model-selection-display", () => {
expect(
resolveModelDisplayRef({
runtimeProvider: "openai",
runtimeModel: "gpt-5.2",
runtimeModel: "gpt-5.4",
}),
).toBe("openai/gpt-5.2");
).toBe("openai/gpt-5.4");
});
it("falls back to override values when runtime values are absent", () => {
expect(
resolveModelDisplayRef({
overrideProvider: "openrouter",
overrideModel: "anthropic/claude-sonnet-4-5",
overrideModel: "anthropic/claude-sonnet-4-6",
}),
).toBe("anthropic/claude-sonnet-4-5");
).toBe("anthropic/claude-sonnet-4-6");
});
});
@@ -39,9 +39,9 @@ describe("model-selection-display", () => {
expect(
resolveModelDisplayName({
runtimeProvider: "openrouter",
runtimeModel: "anthropic/claude-sonnet-4-5",
runtimeModel: "anthropic/claude-sonnet-4-6",
}),
).toBe("claude-sonnet-4-5");
).toBe("claude-sonnet-4-6");
});
it("returns a stable empty-state label", () => {

View File

@@ -21,7 +21,7 @@ import {
const EXPLICIT_ALLOWLIST_CONFIG = {
agents: {
defaults: {
model: { primary: "openai/gpt-5.2" },
model: { primary: "openai/gpt-5.4" },
models: {
"anthropic/claude-sonnet-4-6": { alias: "sonnet" },
},
@@ -30,8 +30,8 @@ const EXPLICIT_ALLOWLIST_CONFIG = {
} as OpenClawConfig;
const BUNDLED_ALLOWLIST_CATALOG = [
{ provider: "anthropic", id: "claude-sonnet-4-5", name: "Claude Sonnet 4.5" },
{ provider: "openai", id: "gpt-5.2", name: "gpt-5.2" },
{ provider: "anthropic", id: "claude-sonnet-4-6", name: "Claude Sonnet 4.5" },
{ provider: "openai", id: "gpt-5.4", name: "gpt-5.4" },
];
const ANTHROPIC_OPUS_CATALOG = [
@@ -219,9 +219,9 @@ describe("model-selection", () => {
},
{
name: "passes through openrouter upstream provider ids",
variants: ["openrouter/anthropic/claude-sonnet-4-5"],
variants: ["openrouter/anthropic/claude-sonnet-4-6"],
defaultProvider: "openai",
expected: { provider: "openrouter", model: "anthropic/claude-sonnet-4-5" },
expected: { provider: "openrouter", model: "anthropic/claude-sonnet-4-6" },
},
{
name: "normalizes Vercel Claude shorthand to anthropic-prefixed model ids",
@@ -243,9 +243,9 @@ describe("model-selection", () => {
},
{
name: "passes through non-Claude Vercel model ids unchanged",
variants: ["vercel-ai-gateway/openai/gpt-5.2"],
variants: ["vercel-ai-gateway/openai/gpt-5.4"],
defaultProvider: "openai",
expected: { provider: "vercel-ai-gateway", model: "openai/gpt-5.2" },
expected: { provider: "vercel-ai-gateway", model: "openai/gpt-5.4" },
},
{
name: "keeps already-suffixed codex variants unchanged",
@@ -537,7 +537,7 @@ describe("model-selection", () => {
catalog: BUNDLED_ALLOWLIST_CATALOG,
raw: "anthropic/claude-sonnet-4-6",
defaultProvider: "openai",
defaultModel: "gpt-5.2",
defaultModel: "gpt-5.4",
});
expect(result).toEqual({

View File

@@ -498,7 +498,7 @@ describeLive("live models (profile keys)", () => {
if (
model.provider === "openai" &&
model.api === "openai-responses" &&
model.id === "gpt-5.2"
model.id === "gpt-5.4"
) {
logProgress(`${progressLabel}: tool-only regression`);
const noopTool = {

View File

@@ -5,8 +5,8 @@ import { describe, expect, it } from "vitest";
function buildModel(): Model<"openai-responses"> {
return {
id: "gpt-5.2",
name: "gpt-5.2",
id: "gpt-5.4",
name: "gpt-5.4",
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
@@ -66,7 +66,7 @@ function buildAssistantMessage(params: {
role: "assistant",
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: ZERO_USAGE,
stopReason: params.stopReason,
timestamp: Date.now(),

View File

@@ -355,7 +355,7 @@ describe("OpenAIWebSocketManager", () => {
const event: ResponseCreateEvent = {
type: "response.create",
model: "gpt-5.2",
model: "gpt-5.4",
input: [{ type: "message", role: "user", content: "Hello" }],
};
manager.send(event);
@@ -368,7 +368,7 @@ describe("OpenAIWebSocketManager", () => {
const manager = buildManager();
const event: ClientEvent = {
type: "response.create",
model: "gpt-5.2",
model: "gpt-5.4",
};
expect(() => manager.send(event)).toThrow(/cannot send/);
});
@@ -378,7 +378,7 @@ describe("OpenAIWebSocketManager", () => {
const event: ResponseCreateEvent = {
type: "response.create",
model: "gpt-5.2",
model: "gpt-5.4",
previous_response_id: "resp_abc123",
input: [{ type: "function_call_output", call_id: "call_1", output: "result" }],
};
@@ -686,13 +686,13 @@ describe("OpenAIWebSocketManager", () => {
it("sends a response.create event with generate: false", async () => {
const { manager, sock } = await createConnectedManager();
manager.warmUp({ model: "gpt-5.2", instructions: "You are helpful." });
manager.warmUp({ model: "gpt-5.4", instructions: "You are helpful." });
expect(sock.sentMessages).toHaveLength(1);
const sent = JSON.parse(sock.sentMessages[0] ?? "{}") as Record<string, unknown>;
expect(sent["type"]).toBe("response.create");
expect(sent["generate"]).toBe(false);
expect(sent["model"]).toBe("gpt-5.2");
expect(sent["model"]).toBe("gpt-5.4");
expect(sent["input"]).toEqual([]);
expect(sent["instructions"]).toBe("You are helpful.");
});
@@ -701,7 +701,7 @@ describe("OpenAIWebSocketManager", () => {
const { manager, sock } = await createConnectedManager();
manager.warmUp({
model: "gpt-5.2",
model: "gpt-5.4",
tools: [{ type: "function", name: "exec", description: "Run a command" }],
});
@@ -795,7 +795,7 @@ describe("OpenAIWebSocketManager", () => {
manager.onMessage((e) => received.push(e));
// Send initial turn
manager.send({ type: "response.create", model: "gpt-5.2", input: "Hello" });
manager.send({ type: "response.create", model: "gpt-5.4", input: "Hello" });
// Simulate streaming events from server
sock.simulateMessage({ type: "response.created", response: makeResponse("resp_1") });
@@ -817,7 +817,7 @@ describe("OpenAIWebSocketManager", () => {
// Send continuation turn using the tracked previous_response_id
manager.send({
type: "response.create",
model: "gpt-5.2",
model: "gpt-5.4",
previous_response_id: manager.previousResponseId!,
input: [{ type: "function_call_output", call_id: "call_99", output: "tool result" }],
});
@@ -844,7 +844,7 @@ function makeResponse(
object: "response",
created_at: Date.now(),
status,
model: "gpt-5.2",
model: "gpt-5.4",
output: [],
usage: { input_tokens: 10, output_tokens: 5, total_tokens: 15 },
};

View File

@@ -321,7 +321,7 @@ type InternalEvents = {
* }
* });
*
* manager.send({ type: "response.create", model: "gpt-5.2", input: [...] });
* manager.send({ type: "response.create", model: "gpt-5.4", input: [...] });
* ```
*/
export class OpenAIWebSocketManager extends EventEmitter<InternalEvents> {

View File

@@ -38,8 +38,8 @@ let openAIWsConnectionModule: OpenAIWsConnectionModule;
const model = {
api: "openai-responses" as const,
provider: "openai",
id: "gpt-5.2",
name: "gpt-5.2",
id: "gpt-5.4",
name: "gpt-5.4",
contextWindow: 128_000,
maxTokens: 4_096,
reasoning: true,

View File

@@ -281,7 +281,7 @@ function assistantMsg(
stopReason: toolCalls.length > 0 ? "toolUse" : "stop",
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {},
timestamp: 0,
};
@@ -305,7 +305,7 @@ function makeFakeAssistantMessage(text: string) {
stopReason: "stop" as const,
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {
input: 10,
output: 5,
@@ -348,7 +348,7 @@ function makeResponseObject(
object: "response",
created_at: Date.now(),
status: "completed",
model: "gpt-5.2",
model: "gpt-5.4",
output,
usage: { input_tokens: 100, output_tokens: 50, total_tokens: 150 },
};
@@ -583,7 +583,7 @@ describe("convertMessagesToInputItems", () => {
stopReason: "stop",
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {},
timestamp: 0,
};
@@ -694,7 +694,7 @@ describe("convertMessagesToInputItems", () => {
stopReason: "stop",
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {},
timestamp: 0,
};
@@ -723,7 +723,7 @@ describe("convertMessagesToInputItems", () => {
stopReason: "stop",
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {},
timestamp: 0,
};
@@ -751,7 +751,7 @@ describe("convertMessagesToInputItems", () => {
stopReason: "stop",
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {},
timestamp: 0,
};
@@ -779,7 +779,7 @@ describe("convertMessagesToInputItems", () => {
stopReason: "stop",
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {},
timestamp: 0,
};
@@ -806,7 +806,7 @@ describe("convertMessagesToInputItems", () => {
// ─────────────────────────────────────────────────────────────────────────────
describe("buildAssistantMessageFromResponse", () => {
const modelInfo = { api: "openai-responses", provider: "openai", id: "gpt-5.2" };
const modelInfo = { api: "openai-responses", provider: "openai", id: "gpt-5.4" };
it("extracts text content from a message output item", () => {
const response = makeResponseObject("resp_1", "Hello from assistant");
@@ -865,7 +865,7 @@ describe("buildAssistantMessageFromResponse", () => {
const msg = buildAssistantMessageFromResponse(response, modelInfo);
expect(msg.api).toBe("openai-responses");
expect(msg.provider).toBe("openai");
expect(msg.model).toBe("gpt-5.2");
expect(msg.model).toBe("gpt-5.4");
});
it("handles empty output gracefully", () => {
@@ -891,7 +891,7 @@ describe("buildAssistantMessageFromResponse", () => {
object: "response",
created_at: Date.now(),
status: "completed",
model: "gpt-5.2",
model: "gpt-5.4",
output: [
{
type: "reasoning",
@@ -923,7 +923,7 @@ describe("buildAssistantMessageFromResponse", () => {
object: "response",
created_at: Date.now(),
status: "completed",
model: "gpt-5.2",
model: "gpt-5.4",
output: [
{
type: "reasoning.summary",
@@ -950,7 +950,7 @@ describe("buildAssistantMessageFromResponse", () => {
object: "response",
created_at: Date.now(),
status: "completed",
model: "gpt-5.2",
model: "gpt-5.4",
output: [
{
type: "reasoning.summary",
@@ -986,7 +986,7 @@ describe("buildAssistantMessageFromResponse", () => {
object: "response",
created_at: Date.now(),
status: "completed",
model: "gpt-5.2",
model: "gpt-5.4",
output: [
{
type: "reasoning",
@@ -1007,7 +1007,7 @@ describe("buildAssistantMessageFromResponse", () => {
object: "response",
created_at: Date.now(),
status: "completed",
model: "gpt-5.2",
model: "gpt-5.4",
output: [
{
type: "reasoning",
@@ -1083,7 +1083,7 @@ describe("planTurnInput", () => {
object: "response",
created_at: Date.now(),
status: "completed",
model: "gpt-5.2",
model: "gpt-5.4",
output: [
{
type: "reasoning",
@@ -1108,7 +1108,7 @@ describe("planTurnInput", () => {
buildAssistantMessageFromResponse(turn1Response, {
api: "openai-responses",
provider: "openai",
id: "gpt-5.2",
id: "gpt-5.4",
}),
] as Parameters<typeof convertMessagesToInputItems>[0],
tools: [],
@@ -1163,7 +1163,7 @@ describe("createOpenAIWebSocketStreamFn", () => {
const modelStub = {
api: "openai-responses",
provider: "openai",
id: "gpt-5.2",
id: "gpt-5.4",
contextWindow: 128000,
maxTokens: 4096,
reasoning: false,
@@ -1266,7 +1266,7 @@ describe("createOpenAIWebSocketStreamFn", () => {
expect(manager.sentEvents).toHaveLength(1);
const sent = manager.sentEvents[0] as { type: string; model: string; input: unknown[] };
expect(sent.type).toBe("response.create");
expect(sent.model).toBe("gpt-5.2");
expect(sent.model).toBe("gpt-5.4");
expect(Array.isArray(sent.input)).toBe(true);
});
@@ -1752,7 +1752,7 @@ describe("createOpenAIWebSocketStreamFn", () => {
object: "response",
created_at: Date.now(),
status: "completed",
model: "gpt-5.2",
model: "gpt-5.4",
output: [
{
type: "reasoning",
@@ -2371,7 +2371,7 @@ describe("releaseWsSession / hasWsSession", () => {
{
api: "openai-responses",
provider: "openai",
id: "gpt-5.2",
id: "gpt-5.4",
contextWindow: 128000,
maxTokens: 4096,
reasoning: false,
@@ -2407,7 +2407,7 @@ describe("releaseWsSession / hasWsSession", () => {
{
api: "openai-responses",
provider: "openai",
id: "gpt-5.2",
id: "gpt-5.4",
contextWindow: 128000,
maxTokens: 4096,
reasoning: false,

View File

@@ -24,7 +24,7 @@ function makeToolCallResultPairInput(): Array<AssistantMessage | ToolResultMessa
arguments: { path: "package.json" },
},
],
model: "gpt-5.2",
model: "gpt-5.4",
stopReason: "toolUse",
timestamp: nextTimestamp(),
}),
@@ -43,7 +43,7 @@ function makeEmptyAssistantErrorMessage(): AssistantMessage {
return makeAgentAssistantMessage({
stopReason: "error",
content: [],
model: "gpt-5.2",
model: "gpt-5.4",
timestamp: nextTimestamp(),
}) satisfies AssistantMessage;
}
@@ -54,7 +54,7 @@ function makeOpenAiResponsesAssistantMessage(
): AssistantMessage {
return makeAgentAssistantMessage({
content,
model: "gpt-5.2",
model: "gpt-5.4",
stopReason,
timestamp: nextTimestamp(),
});
@@ -202,7 +202,7 @@ describe("sanitizeSessionMessagesImages", () => {
],
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {
input: 0,
output: 0,
@@ -230,7 +230,7 @@ describe("sanitizeSessionMessagesImages", () => {
content: [{ type: "text", text: "" }],
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: {
input: 0,
output: 0,

View File

@@ -18,13 +18,13 @@ const describeGeminiLive = GEMINI_LIVE && GEMINI_KEY ? describe : describe.skip;
describeLive("pi embedded extra params (live)", () => {
it("applies config maxTokens to openai streamFn", async () => {
const model = getModel("openai", "gpt-5.2") as unknown as Model<"openai-completions">;
const model = getModel("openai", "gpt-5.4") as unknown as Model<"openai-completions">;
const cfg: OpenClawConfig = {
agents: {
defaults: {
models: {
"openai/gpt-5.2": {
"openai/gpt-5.4": {
// OpenAI Responses enforces a minimum max_output_tokens of 16.
params: {
maxTokens: 16,
@@ -117,7 +117,7 @@ describeAnthropicLive("pi embedded extra params (anthropic live)", () => {
method: "POST",
headers,
body: JSON.stringify({
model: "claude-sonnet-4-5",
model: "claude-sonnet-4-6",
max_tokens: 32,
service_tier: serviceTier,
messages: [{ role: "user", content: "Reply with OK." }],

View File

@@ -718,7 +718,7 @@ describeCacheLive("pi embedded runner prompt caching (live)", () => {
provider: "openai",
api: "openai-responses",
envVar: "OPENCLAW_LIVE_OPENAI_CACHE_MODEL",
preferredModelIds: ["gpt-5.4-mini", "gpt-5.4", "gpt-5.2"],
preferredModelIds: ["gpt-5.4-mini", "gpt-5.4", "gpt-5.4"],
});
logLiveCache(`openai model=${fixture.model.provider}/${fixture.model.id}`);
}, 120_000);
@@ -927,7 +927,7 @@ describeCacheLive("pi embedded runner prompt caching (live)", () => {
provider: "anthropic",
api: "anthropic-messages",
envVar: "OPENCLAW_LIVE_ANTHROPIC_CACHE_MODEL",
preferredModelIds: ["claude-sonnet-4-6", "claude-sonnet-4-5", "claude-haiku-3-5"],
preferredModelIds: ["claude-sonnet-4-6", "claude-sonnet-4-6", "claude-haiku-3-5"],
});
logLiveCache(`anthropic model=${fixture.model.provider}/${fixture.model.id}`);
}, 120_000);

View File

@@ -13,7 +13,7 @@ describe("sanitizeSessionHistory openai tool id preservation", () => {
makeModelSnapshotEntry({
provider: "openai",
modelApi: "openai-responses",
modelId: "gpt-5.2-codex",
modelId: "gpt-5.4",
}),
]);
@@ -58,7 +58,7 @@ describe("sanitizeSessionHistory openai tool id preservation", () => {
messages: makeMessages(withReasoning),
modelApi: "openai-responses",
provider: "openai",
modelId: "gpt-5.2-codex",
modelId: "gpt-5.4",
sessionManager: makeSessionManager(),
sessionId: "test-session",
});

View File

@@ -138,7 +138,7 @@ export function makeSnapshotChangedOpenAIReasoningScenario() {
return {
sessionManager: makeInMemorySessionManager(sessionEntries),
messages: makeReasoningAssistantMessages({ thinkingSignature: "object" }),
modelId: "gpt-5.2-codex",
modelId: "gpt-5.4",
};
}

View File

@@ -159,7 +159,7 @@ describe("sanitizeSessionHistory", () => {
],
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: makeUsage(0, 0, 0),
stopReason: "stop",
timestamp: nextTimestamp(),
@@ -185,7 +185,7 @@ describe("sanitizeSessionHistory", () => {
content: [{ type: "text", text: params.text }],
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
stopReason: "stop",
timestamp: params.timestamp ?? nextTimestamp(),
usage: params.usage,
@@ -209,7 +209,7 @@ describe("sanitizeSessionHistory", () => {
content,
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: params.usage ?? makeUsage(0, 0, 0),
stopReason: params.stopReason ?? "stop",
timestamp: params.timestamp ?? nextTimestamp(),
@@ -363,7 +363,7 @@ describe("sanitizeSessionHistory", () => {
messages: mockMessages,
modelApi: "openai-completions",
provider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
sessionManager: mockSessionManager,
sessionId: TEST_SESSION_ID,
});
@@ -769,7 +769,7 @@ describe("sanitizeSessionHistory", () => {
makeModelSnapshotEntry({
provider: "openai",
modelApi: "openai-responses",
modelId: "gpt-5.2-codex",
modelId: "gpt-5.4",
}),
];
const sessionManager = makeInMemorySessionManager(sessionEntries);
@@ -778,7 +778,7 @@ describe("sanitizeSessionHistory", () => {
const result = await sanitizeWithOpenAIResponses({
sanitizeSessionHistory,
messages,
modelId: "gpt-5.2-codex",
modelId: "gpt-5.4",
sessionManager,
});
@@ -798,7 +798,7 @@ describe("sanitizeSessionHistory", () => {
makeModelSnapshotEntry({
provider: "openai",
modelApi: "openai-responses",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
}),
];
const sessionManager = makeInMemorySessionManager(sessionEntries);
@@ -957,7 +957,7 @@ describe("sanitizeSessionHistory", () => {
const messages = makeThinkingAndTextAssistantMessages();
const result = await sanitizeGithubCopilotHistory({ messages, modelId: "gpt-5.2" });
const result = await sanitizeGithubCopilotHistory({ messages, modelId: "gpt-5.4" });
const types = getAssistantContentTypes(result);
expect(types).toContain("thinking");
});

View File

@@ -233,7 +233,7 @@ describe("cacheRetention default behavior", () => {
{ cacheRetention: "long" },
"amazon-bedrock",
"openai-completions",
"us.anthropic.claude-sonnet-4-5",
"us.anthropic.claude-sonnet-4-6",
),
).toBe("long");
});

View File

@@ -150,7 +150,7 @@ function buildDynamicModel(
if (existing) {
return undefined;
}
const template = findTemplate(params, "github-copilot", ["gpt-5.2-codex"]);
const template = findTemplate(params, "github-copilot", ["gpt-5.4"]);
if (lower === "gpt-5.4" && template) {
return cloneTemplate(
template,
@@ -182,17 +182,17 @@ function buildDynamicModel(
case "openai-codex": {
const template =
lower === "gpt-5.4"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.2-codex"])
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.4"])
: lower === "gpt-5.4-mini"
? findTemplate(params, "openai-codex", [
"gpt-5.4",
"gpt-5.1-codex-mini",
"gpt-5.3-codex",
"gpt-5.2-codex",
"gpt-5.4",
])
: lower === "gpt-5.3-codex-spark"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.2-codex"])
: findTemplate(params, "openai-codex", ["gpt-5.2-codex"]);
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.4"])
: findTemplate(params, "openai-codex", ["gpt-5.4"]);
const fallback = {
provider: "openai-codex",
api: "openai-codex-responses",
@@ -256,13 +256,13 @@ function buildDynamicModel(
case "openai": {
const templateIds =
lower === "gpt-5.4"
? ["gpt-5.2"]
? ["gpt-5.4"]
: lower === "gpt-5.4-pro"
? ["gpt-5.2-pro", "gpt-5.2"]
? ["gpt-5.4-pro", "gpt-5.4"]
: lower === "gpt-5.4-mini"
? ["gpt-5-mini"]
? ["gpt-5.4-mini"]
: lower === "gpt-5.4-nano"
? ["gpt-5-nano", "gpt-5-mini"]
? ["gpt-5.4-nano", "gpt-5.4-mini"]
: undefined;
if (!templateIds) {
return undefined;
@@ -330,7 +330,7 @@ function buildDynamicModel(
const template = findTemplate(
params,
"anthropic",
lower === "claude-opus-4-6" ? ["claude-opus-4-5"] : ["claude-sonnet-4-5"],
lower === "claude-opus-4-6" ? ["claude-opus-4-6"] : ["claude-sonnet-4-6"],
);
return cloneTemplate(
template,

View File

@@ -14,7 +14,7 @@ export const makeModel = (id: string): ModelDefinitionConfig => ({
});
export const OPENAI_CODEX_TEMPLATE_MODEL = {
id: "gpt-5.2-codex",
id: "gpt-5.4",
name: "GPT-5.2 Codex",
provider: "openai-codex",
api: "openai-codex-responses",
@@ -40,12 +40,7 @@ function mockTemplateModel(
}
export function mockOpenAICodexTemplateModel(discoverModelsMock: DiscoverModelsMock): void {
mockTemplateModel(
discoverModelsMock,
"openai-codex",
"gpt-5.2-codex",
OPENAI_CODEX_TEMPLATE_MODEL,
);
mockTemplateModel(discoverModelsMock, "openai-codex", "gpt-5.4", OPENAI_CODEX_TEMPLATE_MODEL);
}
export function buildOpenAICodexForwardCompatExpectation(

View File

@@ -913,9 +913,9 @@ describe("resolveModel", () => {
it("applies provider overrides to openai gpt-5.4 forward-compat models", () => {
mockDiscoveredModel(discoverModels, {
provider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
templateModel: buildForwardCompatTemplate({
id: "gpt-5.2",
id: "gpt-5.4",
name: "GPT-5.2",
provider: "openai",
api: "openai-responses",
@@ -1003,12 +1003,12 @@ describe("resolveModel", () => {
});
});
it("builds an openai fallback for gpt-5.4 mini from the gpt-5-mini template", () => {
it("builds an openai fallback for gpt-5.4 mini from the gpt-5.4-mini template", () => {
mockDiscoveredModel(discoverModels, {
provider: "openai",
modelId: "gpt-5-mini",
modelId: "gpt-5.4-mini",
templateModel: buildForwardCompatTemplate({
id: "gpt-5-mini",
id: "gpt-5.4-mini",
name: "GPT-5 mini",
provider: "openai",
api: "openai-responses",
@@ -1035,12 +1035,12 @@ describe("resolveModel", () => {
});
});
it("builds an openai fallback for gpt-5.4 nano from the gpt-5-nano template", () => {
it("builds an openai fallback for gpt-5.4 nano from the gpt-5.4-nano template", () => {
mockDiscoveredModel(discoverModels, {
provider: "openai",
modelId: "gpt-5-nano",
modelId: "gpt-5.4-nano",
templateModel: buildForwardCompatTemplate({
id: "gpt-5-nano",
id: "gpt-5.4-nano",
name: "GPT-5 nano",
provider: "openai",
api: "openai-responses",

View File

@@ -12,7 +12,7 @@ describe("sanitizeSessionHistory toolResult details stripping", () => {
const messages: AgentMessage[] = [
makeAgentAssistantMessage({
content: [{ type: "toolCall", id: "call_1", name: "web_fetch", arguments: { url: "x" } }],
model: "gpt-5.2",
model: "gpt-5.4",
stopReason: "toolUse",
timestamp: 1,
}),
@@ -38,7 +38,7 @@ describe("sanitizeSessionHistory toolResult details stripping", () => {
messages,
modelApi: "anthropic-messages",
provider: "anthropic",
modelId: "claude-opus-4-5",
modelId: "claude-opus-4-6",
sessionManager: sm,
sessionId: "test",
});

View File

@@ -49,7 +49,7 @@ function makeAssistantMessage(
role: "assistant",
api: "openai-responses",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
usage: { input: 0, output: 0 } as AssistantMessage["usage"],
stopReason: "end_turn" as AssistantMessage["stopReason"],
timestamp: Date.now(),

View File

@@ -169,7 +169,7 @@ describeCacheLive("MCP-style prompt caching (live)", () => {
provider: "openai",
api: "openai-responses",
envVar: "OPENCLAW_LIVE_OPENAI_CACHE_MODEL",
preferredModelIds: ["gpt-5.4-mini", "gpt-5.4", "gpt-5.2"],
preferredModelIds: ["gpt-5.4-mini", "gpt-5.4", "gpt-5.4"],
});
logLiveCache(`openai mcp-style model=${fixture.model.provider}/${fixture.model.id}`);

View File

@@ -75,7 +75,7 @@ describe("Agent-specific tool filtering", () => {
workspaceDir,
agentDir: "/tmp/agent",
modelProvider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
});
const applyPatchTool = tools.find((t) => t.name === "apply_patch");
@@ -204,7 +204,7 @@ describe("Agent-specific tool filtering", () => {
workspaceDir: "/tmp/test",
agentDir: "/tmp/agent",
modelProvider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
});
const toolNames = tools.map((t) => t.name);
@@ -229,7 +229,7 @@ describe("Agent-specific tool filtering", () => {
workspaceDir: "/tmp/test",
agentDir: "/tmp/agent",
modelProvider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
});
const toolNames = tools.map((t) => t.name);

View File

@@ -61,7 +61,7 @@ describe("createOpenClawCodingTools", () => {
const openAiTools = createOpenClawCodingTools({
modelProvider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
});
expect(openAiTools.some((tool) => tool.name === "apply_patch")).toBe(true);
@@ -81,35 +81,35 @@ describe("createOpenClawCodingTools", () => {
const disabledOpenAiTools = createOpenClawCodingTools({
config: disabledConfig,
modelProvider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
});
expect(disabledOpenAiTools.some((tool) => tool.name === "apply_patch")).toBe(false);
const anthropicTools = createOpenClawCodingTools({
config: disabledConfig,
modelProvider: "anthropic",
modelId: "claude-opus-4-5",
modelId: "claude-opus-4-6",
});
expect(anthropicTools.some((tool) => tool.name === "apply_patch")).toBe(false);
const allowModelsConfig: OpenClawConfig = {
tools: {
exec: {
applyPatch: { allowModels: ["gpt-5.2"] },
applyPatch: { allowModels: ["gpt-5.4"] },
},
},
};
const allowed = createOpenClawCodingTools({
config: allowModelsConfig,
modelProvider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
});
expect(allowed.some((tool) => tool.name === "apply_patch")).toBe(true);
const denied = createOpenClawCodingTools({
config: allowModelsConfig,
modelProvider: "openai",
modelId: "gpt-5-mini",
modelId: "gpt-5.4-mini",
});
expect(denied.some((tool) => tool.name === "apply_patch")).toBe(false);

View File

@@ -34,7 +34,7 @@ function resolveApplyPatchTool(
workspaceDir: params.workspaceDir,
config: params.config,
modelProvider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
});
const applyPatchTool = tools.find((t) => t.name === "apply_patch") as ToolWithExecute | undefined;
if (!applyPatchTool) {

View File

@@ -17,7 +17,7 @@ describeLive("provider response headers (live)", () => {
provider: "openai",
api: "openai-responses",
envVar: "OPENCLAW_LIVE_OPENAI_CACHE_MODEL",
preferredModelIds: ["gpt-5.4-mini", "gpt-5.4", "gpt-5.2"],
preferredModelIds: ["gpt-5.4-mini", "gpt-5.4", "gpt-5.4"],
});
}, 120_000);
@@ -61,7 +61,7 @@ describeLive("provider response headers (live)", () => {
provider: "anthropic",
api: "anthropic-messages",
envVar: "OPENCLAW_LIVE_ANTHROPIC_CACHE_MODEL",
preferredModelIds: ["claude-sonnet-4-6", "claude-sonnet-4-5", "claude-haiku-3-5"],
preferredModelIds: ["claude-sonnet-4-6", "claude-sonnet-4-6", "claude-haiku-3-5"],
});
}, 120_000);

View File

@@ -6,7 +6,7 @@ describe("resolveSimpleCompletionSelectionForAgent", () => {
it("preserves multi-segment model ids (openrouter provider models)", () => {
const cfg = {
agents: {
defaults: { model: "openrouter/anthropic/claude-sonnet-4-5" },
defaults: { model: "openrouter/anthropic/claude-sonnet-4-6" },
},
} as OpenClawConfig;
@@ -14,7 +14,7 @@ describe("resolveSimpleCompletionSelectionForAgent", () => {
expect(selection).toEqual(
expect.objectContaining({
provider: "openrouter",
modelId: "anthropic/claude-sonnet-4-5",
modelId: "anthropic/claude-sonnet-4-6",
}),
);
});
@@ -59,7 +59,7 @@ describe("resolveSimpleCompletionSelectionForAgent", () => {
defaults: {
model: "fast@work",
models: {
"openrouter/anthropic/claude-sonnet-4-5": { alias: "fast" },
"openrouter/anthropic/claude-sonnet-4-6": { alias: "fast" },
},
},
},
@@ -69,7 +69,7 @@ describe("resolveSimpleCompletionSelectionForAgent", () => {
expect(selection).toEqual(
expect.objectContaining({
provider: "openrouter",
modelId: "anthropic/claude-sonnet-4-5",
modelId: "anthropic/claude-sonnet-4-6",
profileId: "work",
}),
);

View File

@@ -136,7 +136,7 @@ describe("prepareSimpleCompletionModel", () => {
hoisted.resolveModelMock.mockReturnValueOnce({
model: {
provider: "amazon-bedrock",
id: "anthropic.claude-sonnet-4-5",
id: "anthropic.claude-sonnet-4-6",
},
authStorage: {
setRuntimeApiKey: hoisted.setRuntimeApiKeyMock,
@@ -151,7 +151,7 @@ describe("prepareSimpleCompletionModel", () => {
const result = await prepareSimpleCompletionModel({
cfg: undefined,
provider: "amazon-bedrock",
modelId: "anthropic.claude-sonnet-4-5",
modelId: "anthropic.claude-sonnet-4-6",
allowMissingApiKeyModes: ["aws-sdk"],
});
@@ -159,7 +159,7 @@ describe("prepareSimpleCompletionModel", () => {
expect.objectContaining({
model: expect.objectContaining({
provider: "amazon-bedrock",
id: "anthropic.claude-sonnet-4-5",
id: "anthropic.claude-sonnet-4-6",
}),
auth: {
source: "aws-sdk default chain",

View File

@@ -493,14 +493,14 @@ describe("buildAgentSystemPrompt", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",
modelAliasLines: [
"- Opus: anthropic/claude-opus-4-5",
"- Sonnet: anthropic/claude-sonnet-4-5",
"- Opus: anthropic/claude-opus-4-6",
"- Sonnet: anthropic/claude-sonnet-4-6",
],
});
expect(prompt).toContain("## Model Aliases");
expect(prompt).toContain("Prefer aliases when specifying model overrides");
expect(prompt).toContain("- Opus: anthropic/claude-opus-4-5");
expect(prompt).toContain("- Opus: anthropic/claude-opus-4-6");
});
it("adds ClaudeBot self-update guidance when gateway tool is available", () => {
@@ -682,7 +682,7 @@ describe("buildAgentSystemPrompt", () => {
arch: "arm64",
node: "v20",
model: "anthropic/claude",
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
},
"telegram",
["inlineButtons"],
@@ -695,7 +695,7 @@ describe("buildAgentSystemPrompt", () => {
expect(line).toContain("os=macOS (arm64)");
expect(line).toContain("node=v20");
expect(line).toContain("model=anthropic/claude");
expect(line).toContain("default_model=anthropic/claude-opus-4-5");
expect(line).toContain("default_model=anthropic/claude-opus-4-6");
expect(line).toContain("channel=telegram");
expect(line).toContain("capabilities=inlineButtons");
expect(line).toContain("thinking=low");

View File

@@ -487,7 +487,7 @@ describe("image tool implicit imageModel config", () => {
it("stays disabled without auth when no pairing is possible", async () => {
await withTempAgentDir(async (agentDir) => {
const cfg: OpenClawConfig = {
agents: { defaults: { model: { primary: "openai/gpt-5.2" } } },
agents: { defaults: { model: { primary: "openai/gpt-5.4" } } },
};
expect(resolveImageModelConfigForTool({ cfg, agentDir })).toBeNull();
expect(createImageTool({ config: cfg, agentDir })).toBeNull();
@@ -1287,12 +1287,12 @@ describe("image tool response validation", () => {
it("returns trimmed text from image-model responses", () => {
const text = __testing.coerceImageAssistantText({
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
message: {
...createAssistantMessage({
api: "anthropic-messages",
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
}),
content: [{ type: "text", text: " hello " }],
} as never,

View File

@@ -185,7 +185,7 @@ describe("resolveTranscriptPolicy", () => {
it("enables sanitizeToolCallIds for Anthropic provider", () => {
const policy = resolveTranscriptPolicy({
provider: "anthropic",
modelId: "claude-opus-4-5",
modelId: "claude-opus-4-6",
modelApi: "anthropic-messages",
});
expect(policy.sanitizeToolCallIds).toBe(true);
@@ -230,7 +230,7 @@ describe("resolveTranscriptPolicy", () => {
it("enables strict tool call id sanitization for openai-completions APIs", () => {
const policy = resolveTranscriptPolicy({
provider: "openai",
modelId: "gpt-5.2",
modelId: "gpt-5.4",
modelApi: "openai-completions",
});
expect(policy.sanitizeToolCallIds).toBe(true);
@@ -322,7 +322,7 @@ describe("resolveTranscriptPolicy", () => {
{
title: "Anthropic provider",
provider: "anthropic",
modelId: "claude-opus-4-5",
modelId: "claude-opus-4-6",
modelApi: "anthropic-messages" as const,
preserveSignatures: true,
},

View File

@@ -354,12 +354,10 @@ describe("commands registry args", () => {
args: [{ name: "model", description: "model", type: "string", captureRemaining: true }],
};
expect(serializeCommandArgs(command, { raw: "gpt-5.2-codex" })).toBe("gpt-5.2-codex");
expect(serializeCommandArgs(command, { values: { model: "gpt-5.2-codex" } })).toBe(
"gpt-5.2-codex",
);
expect(buildCommandTextFromArgs(command, { values: { model: "gpt-5.2-codex" } })).toBe(
"/model gpt-5.2-codex",
expect(serializeCommandArgs(command, { raw: "gpt-5.4" })).toBe("gpt-5.4");
expect(serializeCommandArgs(command, { values: { model: "gpt-5.4" } })).toBe("gpt-5.4");
expect(buildCommandTextFromArgs(command, { values: { model: "gpt-5.4" } })).toBe(
"/model gpt-5.4",
);
});

View File

@@ -10,14 +10,14 @@ export function createSuccessfulImageMediaDecision() {
type: "provider",
outcome: "success",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
},
],
chosen: {
type: "provider",
outcome: "success",
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
},
},
],

View File

@@ -24,9 +24,9 @@ describe("extractModelDirective", () => {
});
it("extracts /model with provider/model format", () => {
const result = extractModelDirective("/model anthropic/claude-opus-4-5");
const result = extractModelDirective("/model anthropic/claude-opus-4-6");
expect(result.hasDirective).toBe(true);
expect(result.rawModel).toBe("anthropic/claude-opus-4-5");
expect(result.rawModel).toBe("anthropic/claude-opus-4-6");
});
it("extracts /model with profile override", () => {

View File

@@ -53,7 +53,7 @@ vi.mock("./command-auth.js", () => ({
vi.mock("./reply/directive-handling.defaults.js", () => ({
resolveDefaultModel: vi.fn(() => ({
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: new Map(),
})),
}));
@@ -106,7 +106,7 @@ function createReplyConfig(streamMode?: "block"): OpenClawConfig {
return {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
workspace: "/tmp/workspace",
},
},
@@ -152,7 +152,7 @@ function createContinueDirectivesResult() {
blockReplyChunking: undefined,
resolvedBlockStreamingBreak: "message_end",
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
modelState: {
resolveDefaultThinkingLevel: async () => undefined,
},

View File

@@ -53,7 +53,7 @@ async function runThinkDirectiveAndGetText(home: string): Promise<string | undef
{ Body: "/think", From: "+1222", To: "+1222", CommandAuthorized: true },
{},
makeWhatsAppDirectiveConfig(home, {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
thinkingDefault: "high",
}),
);
@@ -82,7 +82,7 @@ async function runInlineReasoningMessage(params: {
},
makeWhatsAppDirectiveConfig(
params.home,
{ model: "anthropic/claude-opus-4-5" },
{ model: "anthropic/claude-opus-4-6" },
{
session: { store: params.storePath },
},
@@ -93,7 +93,7 @@ async function runInlineReasoningMessage(params: {
function makeRunConfig(home: string, storePath: string) {
return makeWhatsAppDirectiveConfig(
home,
{ model: "anthropic/claude-opus-4-5" },
{ model: "anthropic/claude-opus-4-6" },
{ session: { store: storePath } },
);
}
@@ -199,7 +199,7 @@ describe("directive behavior", () => {
const enabledRes = await getReplyFromConfig(
{ Body: "/verbose on", From: "+1222", To: "+1222", CommandAuthorized: true },
{},
makeWhatsAppDirectiveConfig(home, { model: "anthropic/claude-opus-4-5" }),
makeWhatsAppDirectiveConfig(home, { model: "anthropic/claude-opus-4-6" }),
);
expect(replyText(enabledRes)).toMatch(/^⚙️ Verbose logging enabled\./);
@@ -208,7 +208,7 @@ describe("directive behavior", () => {
{},
makeWhatsAppDirectiveConfig(
home,
{ model: "anthropic/claude-opus-4-5" },
{ model: "anthropic/claude-opus-4-6" },
{
session: { store: storePath },
},
@@ -254,8 +254,8 @@ describe("directive behavior", () => {
expect(text).toContain("Options: off, minimal, low, medium, high, adaptive.");
for (const model of [
"openai-codex/gpt-5.2-codex",
"openai/gpt-5.2",
"openai-codex/gpt-5.4",
"openai/gpt-5.4",
"openai/gpt-5.4-mini",
"openai/gpt-5.4-nano",
]) {
@@ -282,9 +282,9 @@ describe("directive behavior", () => {
makeWhatsAppDirectiveConfig(
home,
{
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
models: {
"anthropic/claude-opus-4-5": { alias: " help " },
"anthropic/claude-opus-4-6": { alias: " help " },
},
},
{ session: { store: sessionStorePath(home) } },
@@ -316,10 +316,10 @@ describe("directive behavior", () => {
makeWhatsAppDirectiveConfig(
home,
{
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
workspace,
models: {
"anthropic/claude-opus-4-5": { alias: "demo_skill" },
"anthropic/claude-opus-4-6": { alias: "demo_skill" },
},
},
{ session: { store: sessionStorePath(home) } },
@@ -343,7 +343,7 @@ describe("directive behavior", () => {
{},
makeWhatsAppDirectiveConfig(
home,
{ model: "anthropic/claude-opus-4-5" },
{ model: "anthropic/claude-opus-4-6" },
{
session: { store: sessionStorePath(home) },
},
@@ -366,7 +366,7 @@ describe("directive behavior", () => {
{},
makeWhatsAppDirectiveConfig(
home,
{ model: "anthropic/claude-opus-4-5" },
{ model: "anthropic/claude-opus-4-6" },
{
messages: {
queue: {

View File

@@ -24,9 +24,9 @@ let getReplyFromConfig: typeof import("./reply.js").getReplyFromConfig;
function makeDefaultModelConfig(home: string) {
return makeWhatsAppDirectiveConfig(home, {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"openai/gpt-4.1-mini": {},
},
});
@@ -43,7 +43,7 @@ async function runReplyToCurrentCase(home: string, text: string) {
MessageSid: "msg-123",
},
{},
makeWhatsAppDirectiveConfig(home, { model: "anthropic/claude-opus-4-5" }),
makeWhatsAppDirectiveConfig(home, { model: "anthropic/claude-opus-4-6" }),
);
return Array.isArray(res) ? res[0] : res;
@@ -56,7 +56,7 @@ async function expectThinkStatusForReasoningModel(params: {
}): Promise<void> {
loadModelCatalogMock.mockResolvedValueOnce([
{
id: "claude-opus-4-5",
id: "claude-opus-4-6",
name: "Opus 4.5",
provider: "anthropic",
reasoning: params.reasoning,
@@ -66,7 +66,7 @@ async function expectThinkStatusForReasoningModel(params: {
const res = await getReplyFromConfig(
{ Body: "/think", From: "+1222", To: "+1222", CommandAuthorized: true },
{},
makeWhatsAppDirectiveConfig(params.home, { model: "anthropic/claude-opus-4-5" }),
makeWhatsAppDirectiveConfig(params.home, { model: "anthropic/claude-opus-4-6" }),
);
const text = replyText(res);
@@ -77,7 +77,7 @@ async function expectThinkStatusForReasoningModel(params: {
function mockReasoningCapableCatalog() {
loadModelCatalogMock.mockResolvedValueOnce([
{
id: "claude-opus-4-5",
id: "claude-opus-4-6",
name: "Opus 4.5",
provider: "anthropic",
reasoning: true,
@@ -103,7 +103,7 @@ async function runReasoningDefaultCase(params: {
},
{},
makeWhatsAppDirectiveConfig(params.home, {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
...(params.thinkingDefault ? { thinkingDefault: params.thinkingDefault } : {}),
}),
);
@@ -170,7 +170,7 @@ describe("directive behavior", () => {
loadModelCatalogMock.mockResolvedValueOnce([]);
const unavailableCatalogText = await runModelDirectiveText(home, "/model");
expect(unavailableCatalogText).toContain("Current: anthropic/claude-opus-4-5");
expect(unavailableCatalogText).toContain("Current: anthropic/claude-opus-4-6");
expect(unavailableCatalogText).toContain("Switch: /model <provider/model>");
expect(unavailableCatalogText).toContain(
"Browse: /models (providers) or /models <provider> (models)",
@@ -180,20 +180,20 @@ describe("directive behavior", () => {
const allowlistedStatusText = await runModelDirectiveText(home, "/model status", {
includeSessionStore: false,
});
expect(allowlistedStatusText).toContain("anthropic/claude-opus-4-5");
expect(allowlistedStatusText).toContain("anthropic/claude-opus-4-6");
expect(allowlistedStatusText).toContain("openai/gpt-4.1-mini");
expect(allowlistedStatusText).not.toContain("claude-sonnet-4-1");
expect(allowlistedStatusText).toContain("auth:");
loadModelCatalogMock.mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-opus-4-6", name: "Opus 4.5", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
{ id: "grok-4", name: "Grok 4", provider: "xai" },
]);
const noAllowlistText = await runModelDirectiveText(home, "/model list", {
defaults: {
model: {
primary: "anthropic/claude-opus-4-5",
primary: "anthropic/claude-opus-4-6",
fallbacks: ["openai/gpt-4.1-mini"],
},
imageModel: { primary: "minimax/MiniMax-M2.7" },
@@ -209,7 +209,7 @@ describe("directive behavior", () => {
loadModelCatalogMock.mockResolvedValueOnce([
{
provider: "anthropic",
id: "claude-opus-4-5",
id: "claude-opus-4-6",
name: "Claude Opus 4.5",
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
@@ -217,7 +217,7 @@ describe("directive behavior", () => {
const configOnlyProviderText = await runModelDirectiveText(home, "/models minimax", {
defaults: {
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"openai/gpt-4.1-mini": {},
"minimax/MiniMax-M2.7": { alias: "minimax" },
},
@@ -244,7 +244,7 @@ describe("directive behavior", () => {
const missingAuthText = await runModelDirectiveText(home, "/model list", {
defaults: {
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
},
},
});
@@ -263,9 +263,9 @@ describe("directive behavior", () => {
makeWhatsAppDirectiveConfig(
home,
{
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"openai/gpt-4.1-mini": {},
},
},
@@ -299,7 +299,7 @@ describe("directive behavior", () => {
expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce();
const call = runEmbeddedPiAgentMock.mock.calls[0]?.[0];
expect(call?.provider).toBe("anthropic");
expect(call?.model).toBe("claude-opus-4-5");
expect(call?.model).toBe("claude-opus-4-6");
runEmbeddedPiAgentMock.mockClear();
mockEmbeddedTextResult("done");
@@ -310,7 +310,7 @@ describe("directive behavior", () => {
To: "+2000",
},
{},
makeWhatsAppDirectiveConfig(home, { model: { primary: "anthropic/claude-opus-4-5" } }),
makeWhatsAppDirectiveConfig(home, { model: { primary: "anthropic/claude-opus-4-6" } }),
);
expect(replyTexts(inlineThinkRes)).toContain("done");
@@ -332,7 +332,7 @@ describe("directive behavior", () => {
{},
makeWhatsAppDirectiveConfig(
home,
{ model: { primary: "anthropic/claude-opus-4-5" } },
{ model: { primary: "anthropic/claude-opus-4-6" } },
{
tools: {
elevated: {
@@ -436,7 +436,7 @@ describe("directive behavior", () => {
MessageSid: "msg-123",
},
{},
makeWhatsAppDirectiveConfig(home, { model: { primary: "anthropic/claude-opus-4-5" } }),
makeWhatsAppDirectiveConfig(home, { model: { primary: "anthropic/claude-opus-4-6" } }),
);
const payload = Array.isArray(res) ? res[0] : res;

View File

@@ -18,7 +18,7 @@ export const DEFAULT_TEST_MODEL_CATALOG: Array<{
name: string;
provider: string;
}> = [
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-opus-4-6", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
];
@@ -102,7 +102,7 @@ export function makeElevatedDirectiveConfig(home: string) {
return makeWhatsAppDirectiveConfig(
home,
{
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
elevatedDefault: "on",
},
{
@@ -189,7 +189,7 @@ export function makeRestrictedElevatedDisabledConfig(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
workspace: path.join(home, "openclaw"),
},
list: [

View File

@@ -20,9 +20,9 @@ export async function runModelDirectiveText(
makeWhatsAppDirectiveConfig(
home,
{
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"openai/gpt-4.1-mini": {},
},
...options.defaults,

View File

@@ -35,7 +35,7 @@ function makeModelSwitchConfig(home: string) {
model: { primary: "openai/gpt-4.1-mini" },
models: {
"openai/gpt-4.1-mini": {},
"anthropic/claude-opus-4-5": { alias: "Opus" },
"anthropic/claude-opus-4-6": { alias: "Opus" },
},
});
}
@@ -44,10 +44,10 @@ function makeMoonshotConfig(home: string, storePath: string) {
return {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
workspace: path.join(home, "openclaw"),
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"moonshot/kimi-k2-0905-preview": {},
},
},
@@ -209,10 +209,10 @@ describe("directive behavior", () => {
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
workspace: path.join(home, "openclaw"),
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"moonshot/kimi-k2-0905-preview": { alias: "Kimi" },
"lmstudio/kimi-k2-0905-preview": {},
},
@@ -295,7 +295,7 @@ describe("directive behavior", () => {
);
let events = drainSystemEvents(MAIN_SESSION_KEY);
expect(events).toContain("Model switched to Opus (anthropic/claude-opus-4-5).");
expect(events).toContain("Model switched to Opus (anthropic/claude-opus-4-6).");
drainSystemEvents(MAIN_SESSION_KEY);

View File

@@ -34,7 +34,7 @@ async function runCommand(
makeWhatsAppDirectiveConfig(
home,
{
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
...options.defaults,
},
options.extra ?? {},
@@ -59,7 +59,7 @@ function makeWorkElevatedAllowlistConfig(home: string) {
const base = makeWhatsAppDirectiveConfig(
home,
{
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
},
{
tools: {
@@ -96,7 +96,7 @@ function makeAllowlistedElevatedConfig(
return makeWhatsAppDirectiveConfig(
home,
{
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
...defaults,
},
{
@@ -135,7 +135,7 @@ describe("directive behavior", () => {
const fastText = await runCommand(home, "/fast", {
defaults: {
models: {
"anthropic/claude-opus-4-5": {
"anthropic/claude-opus-4-6": {
params: { fastMode: true },
},
},
@@ -205,7 +205,7 @@ describe("directive behavior", () => {
const statusText = await runCommand(home, "/fast status", {
defaults: {
models: {
"anthropic/claude-opus-4-5": {
"anthropic/claude-opus-4-6": {
params: { fastMode: true },
},
},

View File

@@ -21,7 +21,7 @@ function makeAgentExecConfig(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
workspace: `${home}/openclaw`,
},
list: [

View File

@@ -33,7 +33,7 @@ function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
workspace: path.join(home, "openclaw"),
},
},

View File

@@ -83,7 +83,7 @@ export function makeReplyConfig(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
workspace: path.join(home, "openclaw"),
},
},
@@ -152,7 +152,7 @@ export function resetReplyRuntimeMocks(mocks: ReplyRuntimeMocks) {
mocks.runEmbeddedPiAgent.mockClear();
mocks.loadModelCatalog.mockClear();
mocks.loadModelCatalog.mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-opus-4-6", name: "Opus 4.5", provider: "anthropic" },
]);
}

View File

@@ -103,7 +103,7 @@ async function writeStoredModelOverride(cfg: ReturnType<typeof makeCfg>): Promis
sessionId: "main",
updatedAt: Date.now(),
providerOverride: "openai",
modelOverride: "gpt-5.2",
modelOverride: "gpt-5.4",
},
}),
"utf-8",
@@ -312,7 +312,7 @@ describe("trigger handling", () => {
{
label: "stored-override",
setup: () => undefined,
expected: { provider: "openai", model: "gpt-5.2" },
expected: { provider: "openai", model: "gpt-5.4" },
},
] as const;
@@ -424,7 +424,7 @@ describe("trigger handling", () => {
workspaceDir: join(home, "workspace"),
config: cfg,
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
timeoutMs: 10,
blockReplyBreak: "text_end",
},

View File

@@ -84,19 +84,19 @@ const modelCatalogMocks = getSharedMocks("openclaw.trigger-handling.model-catalo
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
id: "claude-opus-4-6",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
id: "anthropic/claude-opus-4-6",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "openai", id: "gpt-5.4", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.4", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.7", name: "MiniMax M2.7" },
]),
resetModelCatalogCacheForTest: vi.fn(),
@@ -274,7 +274,7 @@ export function makeCfg(home: string): OpenClawConfig {
return {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
workspace: join(home, "openclaw"),
// Test harness: avoid 1s coalescer idle sleeps that dominate trigger suites.
blockStreamingCoalesce: { idleMs: 1 },

View File

@@ -153,7 +153,7 @@ describe("abort detection", () => {
workspaceDir: path.join(params.root, "workspace"),
config: params.cfg,
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
timeoutMs: 1000,
blockReplyBreak: "text_end",
},

View File

@@ -377,13 +377,13 @@ describe("runAgentTurnWithFallback", () => {
state.runWithModelFallbackMock.mockRejectedValueOnce(
Object.assign(
new Error(
"All models failed (2): anthropic/claude: 429 (rate_limit) | openai/gpt-5.2: 402 (billing)",
"All models failed (2): anthropic/claude: 429 (rate_limit) | openai/gpt-5.4: 402 (billing)",
),
{
name: "FallbackSummaryError",
attempts: [
{ provider: "anthropic", model: "claude", error: "429", reason: "rate_limit" },
{ provider: "openai", model: "gpt-5.2", error: "402", reason: "billing" },
{ provider: "openai", model: "gpt-5.4", error: "402", reason: "billing" },
],
soonestCooldownExpiry: Date.now() + 60_000,
},

View File

@@ -246,9 +246,9 @@ describe("runReplyAgent authProfileId fallback scoping", () => {
it("drops authProfileId when provider changes during fallback", async () => {
runWithModelFallbackMock.mockImplementationOnce(
async ({ run }: RunWithModelFallbackParams) => ({
result: await run("openai-codex", "gpt-5.2"),
result: await run("openai-codex", "gpt-5.4"),
provider: "openai-codex",
model: "gpt-5.2",
model: "gpt-5.4",
}),
);
@@ -318,7 +318,7 @@ describe("runReplyAgent authProfileId fallback scoping", () => {
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath: undefined,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
@@ -339,7 +339,7 @@ describe("runReplyAgent authProfileId fallback scoping", () => {
expect(call.authProfileId).toBeUndefined();
expect(call.authProfileIdSource).toBeUndefined();
expect(sessionEntry.providerOverride).toBe("openai-codex");
expect(sessionEntry.modelOverride).toBe("gpt-5.2");
expect(sessionEntry.modelOverride).toBe("gpt-5.4");
expect(sessionEntry.authProfileOverride).toBeUndefined();
expect(sessionEntry.authProfileOverrideSource).toBeUndefined();
});
@@ -421,7 +421,7 @@ describe("runReplyAgent authProfileId fallback scoping", () => {
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath: undefined,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
@@ -580,7 +580,7 @@ describe("runReplyAgent auto-compaction token update", () => {
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 200_000,
resolvedVerboseLevel: "off",
isNewSession: false,
@@ -647,7 +647,7 @@ describe("runReplyAgent auto-compaction token update", () => {
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 200_000,
resolvedVerboseLevel: "off",
isNewSession: false,
@@ -686,9 +686,9 @@ describe("runReplyAgent auto-compaction token update", () => {
// Expected first-attempt failure.
}
return {
result: await run("openai", "gpt-5.2"),
result: await run("openai", "gpt-5.4"),
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
attempts: [{ provider: "anthropic", model: "claude", error: "attempt failed" }],
};
});
@@ -736,7 +736,7 @@ describe("runReplyAgent auto-compaction token update", () => {
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 200_000,
resolvedVerboseLevel: "off",
isNewSession: false,
@@ -771,9 +771,9 @@ describe("runReplyAgent auto-compaction token update", () => {
// Expected first-attempt failure.
}
return {
result: await run("openai", "gpt-5.2"),
result: await run("openai", "gpt-5.4"),
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
attempts: [{ provider: "anthropic", model: "claude", error: "attempt failed" }],
};
});
@@ -821,7 +821,7 @@ describe("runReplyAgent auto-compaction token update", () => {
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 200_000,
resolvedVerboseLevel: "off",
isNewSession: false,
@@ -878,7 +878,7 @@ describe("runReplyAgent auto-compaction token update", () => {
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 200_000,
resolvedVerboseLevel: "off",
isNewSession: false,
@@ -959,7 +959,7 @@ describe("runReplyAgent auto-compaction token update", () => {
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 200_000,
resolvedVerboseLevel: "off",
isNewSession: false,
@@ -1045,7 +1045,7 @@ describe("runReplyAgent block streaming", () => {
opts: { onBlockReply },
typing,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: true,
@@ -1147,7 +1147,7 @@ describe("runReplyAgent block streaming", () => {
opts: { onBlockReply, blockReplyTimeoutMs: 1 },
typing,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: true,
@@ -1318,7 +1318,7 @@ describe("runReplyAgent messaging tool suppression", () => {
sessionCtx,
sessionKey,
storePath: opts.storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
@@ -1403,7 +1403,7 @@ describe("runReplyAgent messaging tool suppression", () => {
meta: {
agentMeta: {
usage: { input: 10, output: 5 },
model: "claude-opus-4-5",
model: "claude-opus-4-6",
provider: "anthropic",
},
},
@@ -1417,7 +1417,7 @@ describe("runReplyAgent messaging tool suppression", () => {
expect(store[sessionKey]?.outputTokens).toBe(5);
expect(store[sessionKey]?.totalTokens).toBeUndefined();
expect(store[sessionKey]?.totalTokensFresh).toBe(false);
expect(store[sessionKey]?.model).toBe("claude-opus-4-5");
expect(store[sessionKey]?.model).toBe("claude-opus-4-6");
});
it("persists totalTokens from promptTokens when snapshot is available", async () => {
@@ -1437,7 +1437,7 @@ describe("runReplyAgent messaging tool suppression", () => {
agentMeta: {
usage: { input: 10, output: 5 },
promptTokens: 42_000,
model: "claude-opus-4-5",
model: "claude-opus-4-6",
provider: "anthropic",
},
},
@@ -1449,7 +1449,7 @@ describe("runReplyAgent messaging tool suppression", () => {
const store = loadSessionStore(storePath, { skipCache: true });
expect(store[sessionKey]?.totalTokens).toBe(42_000);
expect(store[sessionKey]?.totalTokensFresh).toBe(true);
expect(store[sessionKey]?.model).toBe("claude-opus-4-5");
expect(store[sessionKey]?.model).toBe("claude-opus-4-6");
});
it("persists totalTokens from promptTokens when provider omits usage", async () => {
@@ -1473,7 +1473,7 @@ describe("runReplyAgent messaging tool suppression", () => {
meta: {
agentMeta: {
promptTokens: 41_000,
model: "claude-opus-4-5",
model: "claude-opus-4-6",
provider: "anthropic",
},
},
@@ -1540,7 +1540,7 @@ describe("runReplyAgent reminder commitment guard", () => {
typing,
sessionCtx,
...(params?.omitSessionKey ? {} : { sessionKey: params?.sessionKey ?? "main" }),
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
@@ -1762,7 +1762,7 @@ describe("runReplyAgent fallback reasoning tags", () => {
sessionCtx,
sessionEntry: params?.sessionEntry,
sessionKey,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: params?.agentCfgContextTokens,
resolvedVerboseLevel: "off",
isNewSession: false,
@@ -1891,7 +1891,7 @@ describe("runReplyAgent response usage footer", () => {
sessionCtx,
sessionEntry,
sessionKey: params.sessionKey,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
@@ -1998,7 +1998,7 @@ describe("runReplyAgent transient HTTP retry", () => {
isStreaming: false,
typing,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,

View File

@@ -188,7 +188,7 @@ function createMinimalRun(params?: {
sessionKey,
storePath: params?.storePath,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
resolvedVerboseLevel: params?.resolvedVerboseLevel ?? "off",
isNewSession: false,
blockStreamingEnabled: params?.blockStreamingEnabled ?? false,
@@ -294,7 +294,7 @@ async function runReplyAgentWithBase(params: {
sessionStore: { [params.sessionKey]: params.sessionEntry } as Record<string, SessionEntry>,
sessionKey: params.sessionKey,
storePath: params.storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,

View File

@@ -32,7 +32,7 @@ async function buildStatusReplyForTest(params: { sessionKey?: string; verbose?:
sessionScope: commandParams.sessionScope,
storePath: commandParams.storePath,
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
contextTokens: 0,
resolvedThinkLevel: commandParams.resolvedThinkLevel,
resolvedFastMode: false,

View File

@@ -221,8 +221,8 @@ vi.mock("../../channels/plugins/pairing.js", async () => {
vi.mock("../../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(async () => [
{ provider: "anthropic", id: "claude-opus-4-5", name: "Claude Opus" },
{ provider: "anthropic", id: "claude-sonnet-4-5", name: "Claude Sonnet" },
{ provider: "anthropic", id: "claude-opus-4-6", name: "Claude Opus" },
{ provider: "anthropic", id: "claude-sonnet-4-6", name: "Claude Sonnet" },
{ provider: "openai", id: "gpt-4.1", name: "GPT-4.1" },
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 Mini" },
{ provider: "google", id: "gemini-2.0-flash", name: "Gemini Flash" },
@@ -2443,7 +2443,7 @@ describe("handleCommands /allowlist", () => {
describe("/models command", () => {
const cfg = {
commands: { text: true },
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } },
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-6" } } },
} as unknown as OpenClawConfig;
it.each(["discord", "whatsapp"])("lists providers on %s (text)", async (surface) => {
@@ -2487,7 +2487,7 @@ describe("/models command", () => {
includes: [
"Models (anthropic",
"page 1/",
"anthropic/claude-opus-4-5",
"anthropic/claude-opus-4-6",
"Switch: /model <provider/model>",
"All: /models anthropic all",
],
@@ -2496,7 +2496,7 @@ describe("/models command", () => {
{
name: "ignores page argument when all flag is present",
command: "/models anthropic 3 all",
includes: ["Models (anthropic", "page 1/1", "anthropic/claude-opus-4-5"],
includes: ["Models (anthropic", "page 1/1", "anthropic/claude-opus-4-6"],
excludes: ["Page out of range"],
},
{
@@ -2538,7 +2538,7 @@ describe("/models command", () => {
defaults: {
model: {
primary: "localai/ultra-chat",
fallbacks: ["anthropic/claude-opus-4-5"],
fallbacks: ["anthropic/claude-opus-4-6"],
},
imageModel: "visionpro/studio-v1",
},
@@ -2565,7 +2565,7 @@ describe("/models command", () => {
const scopedCfg = {
commands: { text: true },
agents: {
defaults: { model: { primary: "anthropic/claude-opus-4-5" } },
defaults: { model: { primary: "anthropic/claude-opus-4-6" } },
list: [{ id: "support", model: "localai/ultra-chat" }],
},
} as unknown as OpenClawConfig;

View File

@@ -126,7 +126,7 @@ function resolveModelSelectionForCommand(params: {
cfg: { commands: { text: true } } as unknown as OpenClawConfig,
agentDir: TEST_AGENT_DIR,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: baseAliasIndex(),
allowedModelKeys: params.allowedModelKeys,
allowedModelCatalog: params.allowedModelCatalog,
@@ -162,14 +162,14 @@ async function persistModelDirectiveForTest(params: {
elevatedEnabled: false,
elevatedAllowed: false,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: params.aliasIndex ?? baseAliasIndex(),
allowedModelKeys: new Set(params.allowedModelKeys),
provider: params.provider ?? "anthropic",
model: params.model ?? "claude-opus-4-5",
model: params.model ?? "claude-opus-4-6",
initialModelLabel:
params.initialModelLabel ??
`${params.provider ?? "anthropic"}/${params.model ?? "claude-opus-4-5"}`,
`${params.provider ?? "anthropic"}/${params.model ?? "claude-opus-4-6"}`,
formatModelSwitchEvent: (label) => label,
agentCfg: cfg.agents?.defaults,
});
@@ -185,9 +185,9 @@ async function resolveModelInfoReply(
agentDir: TEST_AGENT_DIR,
activeAgentId: "main",
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: baseAliasIndex(),
allowedModelCatalog: [],
resetModelOverride: false,
@@ -229,16 +229,16 @@ describe("/model chat UX", () => {
cfg,
agentDir: "/tmp/agent",
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: baseAliasIndex(),
allowedModelKeys: new Set(["anthropic/claude-opus-4-5"]),
allowedModelCatalog: [{ provider: "anthropic", id: "claude-opus-4-5" }],
allowedModelKeys: new Set(["anthropic/claude-opus-4-6"]),
allowedModelCatalog: [{ provider: "anthropic", id: "claude-opus-4-6" }],
provider: "anthropic",
});
expect(resolved.modelSelection).toEqual({
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
isDefault: true,
});
expect(resolved.errorText).toBeUndefined();
@@ -247,7 +247,7 @@ describe("/model chat UX", () => {
it("rejects numeric /model selections with a guided error", () => {
const resolved = resolveModelSelectionForCommand({
command: "/model 99",
allowedModelKeys: new Set(["anthropic/claude-opus-4-5", "openai/gpt-4o"]),
allowedModelKeys: new Set(["anthropic/claude-opus-4-6", "openai/gpt-4o"]),
allowedModelCatalog: [],
});
@@ -258,30 +258,30 @@ describe("/model chat UX", () => {
it("treats explicit default /model selection as resettable default", () => {
const resolved = resolveModelSelectionForCommand({
command: "/model anthropic/claude-opus-4-5",
allowedModelKeys: new Set(["anthropic/claude-opus-4-5", "openai/gpt-4o"]),
command: "/model anthropic/claude-opus-4-6",
allowedModelKeys: new Set(["anthropic/claude-opus-4-6", "openai/gpt-4o"]),
allowedModelCatalog: [],
});
expect(resolved.errorText).toBeUndefined();
expect(resolved.modelSelection).toEqual({
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
isDefault: true,
});
});
it("keeps openrouter provider/model split for exact selections", () => {
const resolved = resolveModelSelectionForCommand({
command: "/model openrouter/anthropic/claude-opus-4-5",
allowedModelKeys: new Set(["openrouter/anthropic/claude-opus-4-5"]),
command: "/model openrouter/anthropic/claude-opus-4-6",
allowedModelKeys: new Set(["openrouter/anthropic/claude-opus-4-6"]),
allowedModelCatalog: [],
});
expect(resolved.errorText).toBeUndefined();
expect(resolved.modelSelection).toEqual({
provider: "openrouter",
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
isDefault: false,
});
});
@@ -327,7 +327,7 @@ describe("/model chat UX", () => {
cfg: { commands: { text: true } } as unknown as OpenClawConfig,
agentDir: TEST_AGENT_DIR,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: createGptAliasIndex(),
allowedModelKeys: new Set(["openai/gpt-4o"]),
allowedModelCatalog: [],
@@ -461,9 +461,9 @@ describe("/model chat UX", () => {
});
describe("handleDirectiveOnly model persist behavior (fixes #1435)", () => {
const allowedModelKeys = new Set(["anthropic/claude-opus-4-5", "openai/gpt-4o"]);
const allowedModelKeys = new Set(["anthropic/claude-opus-4-6", "openai/gpt-4o"]);
const allowedModelCatalog = [
{ provider: "anthropic", id: "claude-opus-4-5", name: "Claude Opus 4.5" },
{ provider: "anthropic", id: "claude-opus-4-6", name: "Claude Opus 4.5" },
{ provider: "openai", id: "gpt-4o", name: "GPT-4o" },
];
const sessionKey = "agent:main:dm:1";
@@ -486,14 +486,14 @@ describe("handleDirectiveOnly model persist behavior (fixes #1435)", () => {
elevatedEnabled: false,
elevatedAllowed: false,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: baseAliasIndex(),
allowedModelKeys,
allowedModelCatalog,
resetModelOverride: false,
provider: "anthropic",
model: "claude-opus-4-5",
initialModelLabel: "anthropic/claude-opus-4-5",
model: "claude-opus-4-6",
initialModelLabel: "anthropic/claude-opus-4-6",
formatModelSwitchEvent: (label) => `Switched to ${label}`,
...rest,
sessionEntry: entry,
@@ -667,7 +667,7 @@ describe("handleDirectiveOnly model persist behavior (fixes #1435)", () => {
describe("persistInlineDirectives internal exec scope gate", () => {
it("skips exec persistence for internal operator.write callers", async () => {
const allowedModelKeys = new Set(["anthropic/claude-opus-4-5", "openai/gpt-4o"]);
const allowedModelKeys = new Set(["anthropic/claude-opus-4-6", "openai/gpt-4o"]);
const directives = parseInlineDirectives(
"/exec host=node security=allowlist ask=always node=worker-1",
);
@@ -687,12 +687,12 @@ describe("persistInlineDirectives internal exec scope gate", () => {
elevatedEnabled: true,
elevatedAllowed: true,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: baseAliasIndex(),
allowedModelKeys,
provider: "anthropic",
model: "claude-opus-4-5",
initialModelLabel: "anthropic/claude-opus-4-5",
model: "claude-opus-4-6",
initialModelLabel: "anthropic/claude-opus-4-6",
formatModelSwitchEvent: (label) => `Switched to ${label}`,
agentCfg: undefined,
surface: "webchat",
@@ -706,7 +706,7 @@ describe("persistInlineDirectives internal exec scope gate", () => {
});
it("skips verbose persistence for internal operator.write callers", async () => {
const allowedModelKeys = new Set(["anthropic/claude-opus-4-5", "openai/gpt-4o"]);
const allowedModelKeys = new Set(["anthropic/claude-opus-4-6", "openai/gpt-4o"]);
const directives = parseInlineDirectives("/verbose full");
const sessionEntry = {
sessionId: "s1",
@@ -724,12 +724,12 @@ describe("persistInlineDirectives internal exec scope gate", () => {
elevatedEnabled: true,
elevatedAllowed: true,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: baseAliasIndex(),
allowedModelKeys,
provider: "anthropic",
model: "claude-opus-4-5",
initialModelLabel: "anthropic/claude-opus-4-5",
model: "claude-opus-4-6",
initialModelLabel: "anthropic/claude-opus-4-6",
formatModelSwitchEvent: (label) => `Switched to ${label}`,
agentCfg: undefined,
surface: "webchat",
@@ -740,7 +740,7 @@ describe("persistInlineDirectives internal exec scope gate", () => {
});
it("treats internal provider context as authoritative over external surface metadata", async () => {
const allowedModelKeys = new Set(["anthropic/claude-opus-4-5", "openai/gpt-4o"]);
const allowedModelKeys = new Set(["anthropic/claude-opus-4-6", "openai/gpt-4o"]);
const directives = parseInlineDirectives("/verbose full");
const sessionEntry = {
sessionId: "s1",
@@ -758,12 +758,12 @@ describe("persistInlineDirectives internal exec scope gate", () => {
elevatedEnabled: true,
elevatedAllowed: true,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
aliasIndex: baseAliasIndex(),
allowedModelKeys,
provider: "anthropic",
model: "claude-opus-4-5",
initialModelLabel: "anthropic/claude-opus-4-5",
model: "claude-opus-4-6",
initialModelLabel: "anthropic/claude-opus-4-6",
formatModelSwitchEvent: (label) => `Switched to ${label}`,
agentCfg: undefined,
messageProvider: "webchat",

View File

@@ -371,7 +371,7 @@ describe("createFollowupRunner compaction", () => {
sessionStore,
sessionKey: "main",
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
});
const queued = createQueuedRun({
@@ -422,7 +422,7 @@ describe("createFollowupRunner compaction", () => {
sessionStore,
sessionKey: "main",
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
});
const queued = createQueuedRun({
@@ -476,7 +476,7 @@ describe("createFollowupRunner compaction", () => {
sessionStore,
sessionKey: "main",
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
});
const queuedNext = createQueuedRun({
@@ -527,7 +527,7 @@ describe("createFollowupRunner compaction", () => {
sessionStore,
sessionKey: "main",
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
});
const queued = createQueuedRun({
@@ -630,7 +630,7 @@ describe("createFollowupRunner compaction", () => {
sessionStore,
sessionKey: "main",
storePath,
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
agentCfgContextTokens: 100_000,
});
@@ -700,7 +700,7 @@ describe("createFollowupRunner bootstrap warning dedupe", () => {
sessionEntry,
sessionStore,
sessionKey: "main",
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
});
await runner(baseQueuedRun());
@@ -732,7 +732,7 @@ describe("createFollowupRunner messaging tool dedupe", () => {
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
sessionEntry: overrides.sessionEntry,
sessionStore: overrides.sessionStore,
sessionKey: overrides.sessionKey,
@@ -882,7 +882,7 @@ describe("createFollowupRunner messaging tool dedupe", () => {
agentMeta: {
usage: { input: 1_000, output: 50 },
lastCallUsage: { input: 400, output: 20 },
model: "claude-opus-4-5",
model: "claude-opus-4-6",
provider: "anthropic",
},
},
@@ -900,7 +900,7 @@ describe("createFollowupRunner messaging tool dedupe", () => {
const store = loadSessionStore(storePath, { skipCache: true });
// totalTokens should reflect the last call usage snapshot, not the accumulated input.
expect(store[sessionKey]?.totalTokens).toBe(400);
expect(store[sessionKey]?.model).toBe("claude-opus-4-5");
expect(store[sessionKey]?.model).toBe("claude-opus-4-6");
// Accumulated usage is still stored for usage/cost tracking.
expect(store[sessionKey]?.inputTokens).toBe(1_000);
expect(store[sessionKey]?.outputTokens).toBe(50);
@@ -928,7 +928,7 @@ describe("createFollowupRunner messaging tool dedupe", () => {
agentMeta: {
usage: { input: 10, output: 5 },
lastCallUsage: { input: 6, output: 3 },
model: "claude-opus-4-5",
model: "claude-opus-4-6",
},
},
});
@@ -937,7 +937,7 @@ describe("createFollowupRunner messaging tool dedupe", () => {
opts: { onBlockReply: createAsyncReplySpy() },
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
sessionEntry,
sessionStore,
sessionKey,
@@ -1037,7 +1037,7 @@ describe("createFollowupRunner typing cleanup", () => {
opts: { onBlockReply: createAsyncReplySpy() },
typing,
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
});
await runner(baseQueuedRun());
@@ -1067,7 +1067,7 @@ describe("createFollowupRunner typing cleanup", () => {
opts: { onBlockReply: vi.fn(async () => {}) },
typing,
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
});
await runner(baseQueuedRun());
@@ -1087,7 +1087,7 @@ describe("createFollowupRunner typing cleanup", () => {
opts: { onBlockReply },
typing,
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
});
await runner(baseQueuedRun());
@@ -1110,7 +1110,7 @@ describe("createFollowupRunner agentDir forwarding", () => {
opts: { onBlockReply },
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "anthropic/claude-opus-4-5",
defaultModel: "anthropic/claude-opus-4-6",
});
const agentDir = path.join("/tmp", "agent-dir");
const queued = createQueuedRun();

View File

@@ -9,7 +9,7 @@ import { createModelSelectionState, resolveContextTokens } from "./model-selecti
vi.mock("../../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(async () => [
{ provider: "anthropic", id: "claude-opus-4-5", name: "Claude Opus 4.5" },
{ provider: "anthropic", id: "claude-opus-4-6", name: "Claude Opus 4.5" },
{ provider: "inferencer", id: "deepseek-v3-4bit-mlx", name: "DeepSeek V3" },
{ provider: "kimi", id: "kimi-code", name: "Kimi Code" },
{ provider: "openai", id: "gpt-4o-mini", name: "GPT-4o mini" },
@@ -203,7 +203,7 @@ describe("createModelSelectionState parent inheritance", () => {
defaultProvider,
defaultModel,
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
hasModelDirective: false,
hasResolvedHeartbeatModelOverride,
});
@@ -280,7 +280,7 @@ describe("createModelSelectionState parent inheritance", () => {
});
const sessionEntry = makeEntry({
providerOverride: "anthropic",
modelOverride: "claude-opus-4-5",
modelOverride: "claude-opus-4-6",
});
const state = await resolveStateWithParent({
cfg,
@@ -291,7 +291,7 @@ describe("createModelSelectionState parent inheritance", () => {
});
expect(state.provider).toBe("anthropic");
expect(state.model).toBe("claude-opus-4-5");
expect(state.model).toBe("claude-opus-4-6");
});
it("ignores parent override when disallowed", async () => {
@@ -308,7 +308,7 @@ describe("createModelSelectionState parent inheritance", () => {
const sessionKey = "agent:main:slack:channel:c1:thread:123";
const parentEntry = makeEntry({
providerOverride: "anthropic",
modelOverride: "claude-opus-4-5",
modelOverride: "claude-opus-4-6",
});
const state = await resolveStateWithParent({
cfg,
@@ -332,7 +332,7 @@ describe("createModelSelectionState parent inheritance", () => {
const state = await resolveHeartbeatStoredOverrideState(true);
expect(state.provider).toBe("anthropic");
expect(state.model).toBe("claude-opus-4-5");
expect(state.model).toBe("claude-opus-4-6");
});
});
@@ -386,12 +386,12 @@ describe("createModelSelectionState respects session model override", () => {
modelProvider: "kimi",
contextTokens: 262_000,
providerOverride: "anthropic",
modelOverride: "claude-opus-4-5",
modelOverride: "claude-opus-4-6",
}),
);
expect(state.provider).toBe("anthropic");
expect(state.model).toBe("claude-opus-4-5");
expect(state.model).toBe("claude-opus-4-6");
});
it("uses default provider when providerOverride is not set but modelOverride is", async () => {
@@ -495,9 +495,9 @@ describe("createModelSelectionState respects session model override", () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"ollama-beelink2/qwen2.5-coder:7b": {},
},
},
@@ -516,9 +516,9 @@ describe("createModelSelectionState respects session model override", () => {
sessionStore,
sessionKey,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-5",
defaultModel: "claude-opus-4-6",
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
hasModelDirective: false,
});

View File

@@ -18,7 +18,7 @@ function makeRun(): FollowupRun["run"] {
workspaceDir: "/tmp/workspace",
config: {} as FollowupRun["run"]["config"],
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
authProfileId: "profile-a",
authProfileIdSource: "user",
timeoutMs: 30_000,

View File

@@ -461,14 +461,14 @@ describe("resolveResponsePrefixTemplate", () => {
{
name: "model",
template: "[{model}]",
values: { model: "gpt-5.2" },
expected: "[gpt-5.2]",
values: { model: "gpt-5.4" },
expected: "[gpt-5.4]",
},
{
name: "modelFull",
template: "[{modelFull}]",
values: { modelFull: "openai-codex/gpt-5.2" },
expected: "[openai-codex/gpt-5.2]",
values: { modelFull: "openai-codex/gpt-5.4" },
expected: "[openai-codex/gpt-5.4]",
},
{
name: "provider",
@@ -503,8 +503,8 @@ describe("resolveResponsePrefixTemplate", () => {
{
name: "case-insensitive variables",
template: "[{MODEL} | {ThinkingLevel}]",
values: { model: "gpt-5.2", thinkingLevel: "low" },
expected: "[gpt-5.2 | low]",
values: { model: "gpt-5.4", thinkingLevel: "low" },
expected: "[gpt-5.4 | low]",
},
{
name: "all variables",
@@ -512,10 +512,10 @@ describe("resolveResponsePrefixTemplate", () => {
values: {
identityName: "OpenClaw",
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
thinkingLevel: "high",
},
expected: "[OpenClaw] anthropic/claude-opus-4-5 (think:high)",
expected: "[OpenClaw] anthropic/claude-opus-4-6 (think:high)",
},
] as const;
expectResolvedTemplateCases(cases);
@@ -534,14 +534,14 @@ describe("resolveResponsePrefixTemplate", () => {
{
name: "unrecognized variable",
template: "[{unknownVar}]",
values: { model: "gpt-5.2" },
values: { model: "gpt-5.4" },
expected: "[{unknownVar}]",
},
{
name: "mixed resolved/unresolved",
template: "[{model} | {provider}]",
values: { model: "gpt-5.2" },
expected: "[gpt-5.2 | {provider}]",
values: { model: "gpt-5.4" },
expected: "[gpt-5.4 | {provider}]",
},
] as const;
expectResolvedTemplateCases(cases);
@@ -957,9 +957,9 @@ describe("createStreamingDirectiveAccumulator", () => {
describe("extractShortModelName", () => {
it("normalizes provider/date/latest suffixes while preserving other IDs", () => {
const cases = [
["openai-codex/gpt-5.2-codex", "gpt-5.2-codex"],
["claude-opus-4-5-20251101", "claude-opus-4-5"],
["gpt-5.2-latest", "gpt-5.2"],
["openai-codex/gpt-5.4", "gpt-5.4"],
["claude-opus-4-6-20251101", "claude-opus-4-6"],
["gpt-5.4-latest", "gpt-5.4"],
// Date suffix must be exactly 8 digits at the end.
["model-123456789", "model-123456789"],
] as const;

View File

@@ -6,9 +6,9 @@
*/
export type ResponsePrefixContext = {
/** Short model name (e.g., "gpt-5.2", "claude-opus-4-6") */
/** Short model name (e.g., "gpt-5.4", "claude-opus-4-6") */
model?: string;
/** Full model ID including provider (e.g., "openai-codex/gpt-5.2") */
/** Full model ID including provider (e.g., "openai-codex/gpt-5.4") */
modelFull?: string;
/** Provider name (e.g., "openai-codex", "anthropic") */
provider?: string;
@@ -30,10 +30,10 @@ const TEMPLATE_VAR_PATTERN = /\{([a-zA-Z][a-zA-Z0-9.]*)\}/g;
*
* @example
* resolveResponsePrefixTemplate("[{model} | think:{thinkingLevel}]", {
* model: "gpt-5.2",
* model: "gpt-5.4",
* thinkingLevel: "high"
* })
* // Returns: "[gpt-5.2 | think:high]"
* // Returns: "[gpt-5.4 | think:high]"
*/
export function resolveResponsePrefixTemplate(
template: string | undefined,
@@ -70,14 +70,14 @@ export function resolveResponsePrefixTemplate(
* Extract short model name from a full model string.
*
* Strips:
* - Provider prefix (e.g., "openai/" from "openai/gpt-5.2")
* - Provider prefix (e.g., "openai/" from "openai/gpt-5.4")
* - Date suffixes (e.g., "-20260205" from "claude-opus-4-6-20260205")
* - Common version suffixes (e.g., "-latest")
*
* @example
* extractShortModelName("openai-codex/gpt-5.2") // "gpt-5.2"
* extractShortModelName("openai-codex/gpt-5.4") // "gpt-5.4"
* extractShortModelName("claude-opus-4-6-20260205") // "claude-opus-4-6"
* extractShortModelName("gpt-5.2-latest") // "gpt-5.2"
* extractShortModelName("gpt-5.4-latest") // "gpt-5.4"
*/
export function extractShortModelName(fullModel: string): string {
// Strip provider prefix

View File

@@ -31,7 +31,7 @@ export function createSandboxMediaStageConfig(home: string): OpenClawConfig {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
workspace: join(home, "openclaw"),
sandbox: {
mode: "non-main",

View File

@@ -622,7 +622,7 @@ describe("buildStatusMessage", () => {
it("shows verbose/elevated labels only when enabled", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5" },
agent: { model: "anthropic/claude-opus-4-6" },
sessionEntry: { sessionId: "v1", updatedAt: 0 },
sessionKey: "agent:main:main",
sessionScope: "per-sender",
@@ -638,7 +638,7 @@ describe("buildStatusMessage", () => {
it("includes media understanding decisions when present", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5" },
agent: { model: "anthropic/claude-opus-4-6" },
sessionEntry: { sessionId: "media", updatedAt: 0 },
sessionKey: "agent:main:main",
queue: { mode: "none" },
@@ -666,12 +666,12 @@ describe("buildStatusMessage", () => {
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Media: image ok (openai/gpt-5.2) · audio skipped (maxBytes)");
expect(normalized).toContain("Media: image ok (openai/gpt-5.4) · audio skipped (maxBytes)");
});
it("omits media line when all decisions are none", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5" },
agent: { model: "anthropic/claude-opus-4-6" },
sessionEntry: { sessionId: "media-none", updatedAt: 0 },
sessionKey: "agent:main:main",
queue: { mode: "none" },
@@ -687,7 +687,7 @@ describe("buildStatusMessage", () => {
it("does not show elevated label when session explicitly disables it", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5", elevatedDefault: "on" },
agent: { model: "anthropic/claude-opus-4-6", elevatedDefault: "on" },
sessionEntry: { sessionId: "v1", updatedAt: 0, elevatedLevel: "off" },
sessionKey: "agent:main:main",
sessionScope: "per-sender",
@@ -704,7 +704,7 @@ describe("buildStatusMessage", () => {
it("shows selected model and active runtime model when they differ", () => {
const text = buildStatusMessage({
agent: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
contextTokens: 32_000,
},
sessionEntry: {
@@ -789,14 +789,14 @@ describe("buildStatusMessage", () => {
it("keeps provider prefix from configured model", () => {
const text = buildStatusMessage({
agent: {
model: "google-antigravity/claude-sonnet-4-5",
model: "google-antigravity/claude-sonnet-4-6",
},
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
});
expect(normalizeTestText(text)).toContain("Model: google-antigravity/claude-sonnet-4-5");
expect(normalizeTestText(text)).toContain("Model: google-antigravity/claude-sonnet-4-6");
});
it("handles missing agent config gracefully", () => {
@@ -853,7 +853,7 @@ describe("buildStatusMessage", () => {
it("inserts usage summary beneath context line", () => {
const text = buildStatusMessage({
agent: { model: "anthropic/claude-opus-4-5", contextTokens: 32_000 },
agent: { model: "anthropic/claude-opus-4-6", contextTokens: 32_000 },
sessionEntry: { sessionId: "u1", updatedAt: 0, totalTokens: 1000 },
sessionKey: "agent:main:main",
sessionScope: "per-sender",
@@ -876,7 +876,7 @@ describe("buildStatusMessage", () => {
anthropic: {
models: [
{
id: "claude-opus-4-5",
id: "claude-opus-4-6",
cost: {
input: 1,
output: 1,
@@ -889,7 +889,7 @@ describe("buildStatusMessage", () => {
},
},
} as unknown as OpenClawConfig,
agent: { model: "anthropic/claude-opus-4-5" },
agent: { model: "anthropic/claude-opus-4-6" },
sessionEntry: { sessionId: "c1", updatedAt: 0, inputTokens: 10 },
sessionKey: "agent:main:main",
sessionScope: "per-sender",
@@ -929,7 +929,7 @@ describe("buildStatusMessage", () => {
type: "message",
message: {
role: "assistant",
model: params.model ?? "claude-opus-4-5",
model: params.model ?? "claude-opus-4-6",
usage: params.usage,
},
}),
@@ -960,7 +960,7 @@ describe("buildStatusMessage", () => {
function buildTranscriptStatusText(params: { sessionId: string; sessionKey: string }) {
return buildStatusMessage({
agent: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
contextTokens: 32_000,
},
sessionEntry: {
@@ -1038,7 +1038,7 @@ describe("buildStatusMessage", () => {
const text = buildStatusMessage({
agent: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
contextTokens: 32_000,
},
agentId: "worker2",

View File

@@ -83,23 +83,23 @@ describe("listThinkingLevels", () => {
it("includes xhigh for provider-advertised models", () => {
providerRuntimeMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
(provider === "openai" && ["gpt-5.2", "gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)) ||
(provider === "openai" && ["gpt-5.4", "gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)) ||
(provider === "openai-codex" &&
["gpt-5.2-codex", "gpt-5.4", "gpt-5.3-codex-spark"].includes(context.modelId)) ||
(provider === "github-copilot" && ["gpt-5.2", "gpt-5.2-codex"].includes(context.modelId))
["gpt-5.4", "gpt-5.4", "gpt-5.3-codex-spark"].includes(context.modelId)) ||
(provider === "github-copilot" && ["gpt-5.4", "gpt-5.4"].includes(context.modelId))
? true
: undefined,
);
expect(listThinkingLevels("openai-codex", "gpt-5.2-codex")).toContain("xhigh");
expect(listThinkingLevels("openai-codex", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("openai-codex", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("openai-codex", "gpt-5.3-codex-spark")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.2")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.4-pro")).toContain("xhigh");
expect(listThinkingLevels("openai-codex", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("github-copilot", "gpt-5.2")).toContain("xhigh");
expect(listThinkingLevels("github-copilot", "gpt-5.2-codex")).toContain("xhigh");
expect(listThinkingLevels("github-copilot", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("github-copilot", "gpt-5.4")).toContain("xhigh");
});
it("excludes xhigh for non-codex models", () => {

View File

@@ -76,8 +76,8 @@ function mockConfig(
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: { "anthropic/claude-opus-4-5": {} },
model: { primary: "anthropic/claude-opus-4-6" },
models: { "anthropic/claude-opus-4-6": {} },
workspace: path.join(home, "openclaw"),
...agentOverrides,
},

View File

@@ -148,8 +148,8 @@ function mockConfig(
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: { "anthropic/claude-opus-4-5": {} },
model: { primary: "anthropic/claude-opus-4-6" },
models: { "anthropic/claude-opus-4-6": {} },
workspace: path.join(home, "openclaw"),
...agentOverrides,
},
@@ -373,8 +373,8 @@ describe("agentCommand", () => {
const loadedConfig = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: { "anthropic/claude-opus-4-5": {} },
model: { primary: "anthropic/claude-opus-4-6" },
models: { "anthropic/claude-opus-4-6": {} },
workspace: path.join(home, "openclaw"),
},
},
@@ -709,7 +709,7 @@ describe("agentCommand", () => {
mockConfig(home, store, {
model: { primary: "openai/gpt-4.1-mini" },
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"openai/gpt-4.1-mini": {},
},
});
@@ -728,26 +728,26 @@ describe("agentCommand", () => {
sessionId: "session-subagent",
updatedAt: Date.now(),
providerOverride: "anthropic",
modelOverride: "claude-opus-4-5",
modelOverride: "claude-opus-4-6",
},
});
mockConfig(home, store, {
model: {
primary: "openai/gpt-4.1-mini",
fallbacks: ["openai/gpt-5.2"],
fallbacks: ["openai/gpt-5.4"],
},
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"openai/gpt-4.1-mini": {},
"openai/gpt-5.2": {},
"openai/gpt-5.4": {},
},
});
vi.mocked(loadModelCatalog).mockResolvedValueOnce([
{ id: "claude-opus-4-5", name: "Opus", provider: "anthropic" },
{ id: "claude-opus-4-6", name: "Opus", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
{ id: "gpt-5.2", name: "GPT-5.2", provider: "openai" },
{ id: "gpt-5.4", name: "GPT-5.2", provider: "openai" },
]);
vi.mocked(runEmbeddedPiAgent)
.mockRejectedValueOnce(Object.assign(new Error("rate limited"), { status: 429 }))
@@ -755,7 +755,7 @@ describe("agentCommand", () => {
payloads: [{ text: "ok" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "session-subagent", provider: "openai", model: "gpt-5.2" },
agentMeta: { sessionId: "session-subagent", provider: "openai", model: "gpt-5.4" },
},
});
@@ -771,8 +771,8 @@ describe("agentCommand", () => {
.mocked(runEmbeddedPiAgent)
.mock.calls.map((call) => ({ provider: call[0]?.provider, model: call[0]?.model }));
expect(attempts).toEqual([
{ provider: "anthropic", model: "claude-opus-4-5" },
{ provider: "openai", model: "gpt-5.2" },
{ provider: "anthropic", model: "claude-opus-4-6" },
{ provider: "openai", model: "gpt-5.4" },
]);
});
});
@@ -790,12 +790,12 @@ describe("agentCommand", () => {
});
mockConfig(home, store, {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
models: {},
});
vi.mocked(loadModelCatalog).mockResolvedValueOnce([
{ id: "claude-opus-4-5", name: "Opus", provider: "anthropic" },
{ id: "claude-opus-4-6", name: "Opus", provider: "anthropic" },
]);
await runAgentWithSessionKey("agent:main:subagent:allow-any");
@@ -821,11 +821,11 @@ describe("agentCommand", () => {
sessionId: "session-clear-overrides",
updatedAt: Date.now(),
providerOverride: "anthropic",
modelOverride: "claude-opus-4-5",
modelOverride: "claude-opus-4-6",
authProfileOverride: "profile-legacy",
authProfileOverrideSource: "user",
authProfileOverrideCompactionCount: 2,
fallbackNoticeSelectedModel: "anthropic/claude-opus-4-5",
fallbackNoticeSelectedModel: "anthropic/claude-opus-4-6",
fallbackNoticeActiveModel: "openai/gpt-4.1-mini",
fallbackNoticeReason: "fallback",
},
@@ -839,7 +839,7 @@ describe("agentCommand", () => {
});
vi.mocked(loadModelCatalog).mockResolvedValueOnce([
{ id: "claude-opus-4-5", name: "Opus", provider: "anthropic" },
{ id: "claude-opus-4-6", name: "Opus", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
@@ -877,7 +877,7 @@ describe("agentCommand", () => {
const store = path.join(home, "sessions.json");
mockConfig(home, store, {
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"openai/gpt-4.1-mini": {},
},
});
@@ -908,7 +908,7 @@ describe("agentCommand", () => {
const store = path.join(home, "sessions.json");
mockConfig(home, store, {
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"openai/gpt-4.1-mini": {},
},
});
@@ -974,7 +974,7 @@ describe("agentCommand", () => {
});
mockConfig(home, store, {
models: {
"anthropic/claude-opus-4-5": {},
"anthropic/claude-opus-4-6": {},
"openai/gpt-4.1-mini": {},
},
});
@@ -1088,7 +1088,7 @@ describe("agentCommand", () => {
it("defaults thinking to low for reasoning-capable models", async () => {
await expectDefaultThinkLevel({
catalogEntry: {
id: "claude-opus-4-5",
id: "claude-opus-4-6",
name: "Opus 4.5",
provider: "anthropic",
reasoning: true,
@@ -1118,13 +1118,13 @@ describe("agentCommand", () => {
agentOverrides: {
thinkingDefault: "low",
models: {
"anthropic/claude-opus-4-5": {
"anthropic/claude-opus-4-6": {
params: { thinking: "high" },
},
},
},
catalogEntry: {
id: "claude-opus-4-5",
id: "claude-opus-4-6",
name: "Opus 4.5",
provider: "anthropic",
reasoning: true,

View File

@@ -215,7 +215,7 @@ describe("applyAuthChoiceLoadedPluginProvider", () => {
config: {
agents: {
defaults: {
model: { primary: "anthropic/claude-sonnet-4-5" },
model: { primary: "anthropic/claude-sonnet-4-6" },
},
},
},

View File

@@ -63,7 +63,7 @@ describe("applyAuthChoice (moonshot)", () => {
config: {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
},
},
},
@@ -74,7 +74,7 @@ describe("applyAuthChoice (moonshot)", () => {
expect.objectContaining({ message: "Enter Moonshot API key (.cn)" }),
);
expect(resolveAgentModelPrimaryValue(result.config.agents?.defaults?.model)).toBe(
"anthropic/claude-opus-4-5",
"anthropic/claude-opus-4-6",
);
expect(result.config.models?.providers?.moonshot?.baseUrl).toBe("https://api.moonshot.cn/v1");
expect(result.config.models?.providers?.moonshot?.models?.[0]?.input).toContain("image");

View File

@@ -92,7 +92,7 @@ describe("promptDefaultModel", () => {
loadModelCatalog.mockResolvedValue([
{
provider: "anthropic",
id: "claude-sonnet-4-5",
id: "claude-sonnet-4-6",
name: "Claude Sonnet 4.5",
},
]);
@@ -223,17 +223,17 @@ describe("promptModelAllowlist", () => {
loadModelCatalog.mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
id: "claude-opus-4-6",
name: "Claude Opus 4.5",
},
{
provider: "anthropic",
id: "claude-sonnet-4-5",
id: "claude-sonnet-4-6",
name: "Claude Sonnet 4.5",
},
{
provider: "openai",
id: "gpt-5.2",
id: "gpt-5.4",
name: "GPT-5.2",
},
]);
@@ -245,12 +245,12 @@ describe("promptModelAllowlist", () => {
await promptModelAllowlist({
config,
prompter,
allowedKeys: ["anthropic/claude-opus-4-5"],
allowedKeys: ["anthropic/claude-opus-4-6"],
});
const options = multiselect.mock.calls[0]?.[0]?.options ?? [];
expect(options.map((opt: { value: string }) => opt.value)).toEqual([
"anthropic/claude-opus-4-5",
"anthropic/claude-opus-4-6",
]);
});
@@ -258,7 +258,7 @@ describe("promptModelAllowlist", () => {
loadModelCatalog.mockResolvedValue([
{
provider: "anthropic",
id: "claude-sonnet-4-5",
id: "claude-sonnet-4-6",
name: "Claude Sonnet 4.5",
},
{
@@ -329,16 +329,16 @@ describe("applyModelAllowlist", () => {
agents: {
defaults: {
models: {
"openai/gpt-5.2": { alias: "gpt" },
"anthropic/claude-opus-4-5": { alias: "opus" },
"openai/gpt-5.4": { alias: "gpt" },
"anthropic/claude-opus-4-6": { alias: "opus" },
},
},
},
} as OpenClawConfig;
const next = applyModelAllowlist(config, ["openai/gpt-5.2"]);
const next = applyModelAllowlist(config, ["openai/gpt-5.4"]);
expect(next.agents?.defaults?.models).toEqual({
"openai/gpt-5.2": { alias: "gpt" },
"openai/gpt-5.4": { alias: "gpt" },
});
});
@@ -347,7 +347,7 @@ describe("applyModelAllowlist", () => {
agents: {
defaults: {
models: {
"openai/gpt-5.2": { alias: "gpt" },
"openai/gpt-5.4": { alias: "gpt" },
},
},
},
@@ -363,18 +363,18 @@ describe("applyModelFallbacksFromSelection", () => {
const config = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
},
},
} as OpenClawConfig;
const next = applyModelFallbacksFromSelection(config, [
"anthropic/claude-opus-4-5",
"anthropic/claude-sonnet-4-5",
"anthropic/claude-opus-4-6",
"anthropic/claude-sonnet-4-6",
]);
expect(next.agents?.defaults?.model).toEqual({
primary: "anthropic/claude-opus-4-5",
fallbacks: ["anthropic/claude-sonnet-4-5"],
primary: "anthropic/claude-opus-4-6",
fallbacks: ["anthropic/claude-sonnet-4-6"],
});
});
@@ -382,15 +382,15 @@ describe("applyModelFallbacksFromSelection", () => {
const config = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5", fallbacks: ["openai/gpt-5.2"] },
model: { primary: "anthropic/claude-opus-4-6", fallbacks: ["openai/gpt-5.4"] },
},
},
} as OpenClawConfig;
const next = applyModelFallbacksFromSelection(config, ["openai/gpt-5.2"]);
const next = applyModelFallbacksFromSelection(config, ["openai/gpt-5.4"]);
expect(next.agents?.defaults?.model).toEqual({
primary: "anthropic/claude-opus-4-5",
fallbacks: ["openai/gpt-5.2"],
primary: "anthropic/claude-opus-4-6",
fallbacks: ["openai/gpt-5.4"],
});
});
});

View File

@@ -348,7 +348,7 @@ describe("models list/status", () => {
code: "MODEL_DISCOVERY_UNAVAILABLE",
});
modelRegistryState.available = [
makeGoogleAntigravityTemplate("claude-opus-4-5-thinking", "Claude Opus 4.5 Thinking"),
makeGoogleAntigravityTemplate("claude-opus-4-6-thinking", "Claude Opus 4.5 Thinking"),
];
await expect(loadModelRegistry({})).rejects.toThrow("model discovery unavailable");

View File

@@ -74,8 +74,8 @@ const mocks = vi.hoisted(() => {
loadConfig: vi.fn().mockReturnValue({
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5", fallbacks: [] },
models: { "anthropic/claude-opus-4-5": { alias: "Opus" } },
model: { primary: "anthropic/claude-opus-4-6", fallbacks: [] },
models: { "anthropic/claude-opus-4-6": { alias: "Opus" } },
},
},
models: { providers: {} },
@@ -211,7 +211,7 @@ describe("modelsStatusCommand auth overview", () => {
const payload = JSON.parse(String((runtime.log as Mock).mock.calls[0]?.[0]));
expect(mocks.resolveOpenClawAgentDir).toHaveBeenCalled();
expect(payload.defaultModel).toBe("anthropic/claude-opus-4-5");
expect(payload.defaultModel).toBe("anthropic/claude-opus-4-6");
expect(payload.configPath).toBe("/tmp/openclaw-dev/openclaw.json");
expect(payload.auth.storePath).toBe("/tmp/openclaw-agent/auth-profiles.json");
expect(payload.auth.shellEnvFallback.enabled).toBe(true);

View File

@@ -60,8 +60,8 @@ function isAzureUrl(baseUrl: string): boolean {
* The api-version will be handled by the Azure OpenAI client or as a query param.
*
* Example:
* https://my-resource.services.ai.azure.com + gpt-5-nano
* => https://my-resource.services.ai.azure.com/openai/deployments/gpt-5-nano
* https://my-resource.services.ai.azure.com + gpt-5.4-nano
* => https://my-resource.services.ai.azure.com/openai/deployments/gpt-5.4-nano
*/
function transformAzureUrl(baseUrl: string, modelId: string): string {
const normalizedUrl = baseUrl.endsWith("/") ? baseUrl.slice(0, -1) : baseUrl;

View File

@@ -57,7 +57,7 @@ const SHARED_DEFAULT_MODEL_CASES: SharedDefaultModelCase[] = [
apply: applyGoogleGeminiModelDefault,
defaultModel: GOOGLE_GEMINI_DEFAULT_MODEL,
overrideConfig: {
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } },
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-6" } } },
} as OpenClawConfig,
alreadyDefaultConfig: {
agents: { defaults: { model: { primary: GOOGLE_GEMINI_DEFAULT_MODEL } } },
@@ -67,7 +67,7 @@ const SHARED_DEFAULT_MODEL_CASES: SharedDefaultModelCase[] = [
apply: applyOpencodeZenModelDefault,
defaultModel: OPENCODE_ZEN_DEFAULT_MODEL,
overrideConfig: {
agents: { defaults: { model: "anthropic/claude-opus-4-5" } },
agents: { defaults: { model: "anthropic/claude-opus-4-6" } },
} as OpenClawConfig,
alreadyDefaultConfig: {
agents: { defaults: { model: OPENCODE_ZEN_DEFAULT_MODEL } },
@@ -196,7 +196,7 @@ describe("applyOpenAIConfig", () => {
describe("applyOpencodeZenModelDefault", () => {
it("no-ops when already legacy opencode-zen default", () => {
const cfg = {
agents: { defaults: { model: "opencode-zen/claude-opus-4-5" } },
agents: { defaults: { model: "opencode-zen/claude-opus-4-6" } },
} as OpenClawConfig;
const applied = applyOpencodeZenModelDefault(cfg);
expectConfigUnchanged(applied, cfg);
@@ -207,7 +207,7 @@ describe("applyOpencodeZenModelDefault", () => {
agents: {
defaults: {
model: {
primary: "anthropic/claude-opus-4-5",
primary: "anthropic/claude-opus-4-6",
fallbacks: ["google/gemini-3-pro"],
},
},

View File

@@ -84,12 +84,12 @@ describe("statusSummaryRuntime.resolveSessionModelRef", () => {
},
} as never,
{
model: "gpt-5.2",
model: "gpt-5.4",
},
),
).toEqual({
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
});
});
});

View File

@@ -199,7 +199,7 @@ describe("legacy config detection", () => {
});
it("rejects legacy agent.model string", async () => {
const res = validateConfigObject({
agent: { model: "anthropic/claude-opus-4-5" },
agent: { model: "anthropic/claude-opus-4-6" },
});
expect(res.ok).toBe(false);
if (!res.ok) {
@@ -243,12 +243,12 @@ describe("legacy config detection", () => {
it("does not rewrite removed legacy model config migrations", async () => {
const res = migrateLegacyConfig({
agent: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
modelFallbacks: ["openai/gpt-4.1-mini"],
imageModel: "openai/gpt-4.1-mini",
imageModelFallbacks: ["anthropic/claude-opus-4-5"],
allowedModels: ["anthropic/claude-opus-4-5", "openai/gpt-4.1-mini"],
modelAliases: { Opus: "anthropic/claude-opus-4-5" },
imageModelFallbacks: ["anthropic/claude-opus-4-6"],
allowedModels: ["anthropic/claude-opus-4-6", "openai/gpt-4.1-mini"],
modelAliases: { Opus: "anthropic/claude-opus-4-6" },
},
});
expect(res.changes).toEqual([]);

View File

@@ -111,7 +111,7 @@ describe("legacy config detection", () => {
it("does not rewrite removed agent config migrations", async () => {
const res = migrateLegacyConfig({
agent: {
model: "openai/gpt-5.2",
model: "openai/gpt-5.4",
tools: { allow: ["sessions.list"], deny: ["danger"] },
elevated: { enabled: true, allowFrom: { discord: ["user:1"] } },
bash: { timeoutSec: 12 },

View File

@@ -537,7 +537,7 @@ describe("config plugin validation", () => {
channels: {
modelByChannel: {
openai: {
whatsapp: "openai/gpt-5.2",
whatsapp: "openai/gpt-5.4",
},
},
},

View File

@@ -74,14 +74,14 @@ describe("config pruning defaults", () => {
},
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
},
},
});
expectAnthropicPruningDefaults(cfg);
expect(
cfg.agents?.defaults?.models?.["anthropic/claude-opus-4-5"]?.params?.cacheRetention,
cfg.agents?.defaults?.models?.["anthropic/claude-opus-4-6"]?.params?.cacheRetention,
).toBe("short");
});

View File

@@ -14,7 +14,7 @@ describe("applyModelDefaults", () => {
api: "openai-completions",
models: [
{
id: "gpt-5.2",
id: "gpt-5.4",
name: "GPT-5.2",
reasoning: false,
input: ["text"],
@@ -80,7 +80,7 @@ describe("applyModelDefaults", () => {
agents: {
defaults: {
models: {
"anthropic/claude-opus-4-5": { alias: "Opus" },
"anthropic/claude-opus-4-6": { alias: "Opus" },
},
},
},
@@ -88,7 +88,7 @@ describe("applyModelDefaults", () => {
const next = applyModelDefaults(cfg);
expect(next.agents?.defaults?.models?.["anthropic/claude-opus-4-5"]?.alias).toBe("Opus");
expect(next.agents?.defaults?.models?.["anthropic/claude-opus-4-6"]?.alias).toBe("Opus");
});
it("respects explicit empty alias disables", () => {

View File

@@ -333,7 +333,7 @@ describe("applyPluginAutoEnable", () => {
channels: {
modelByChannel: {
openai: {
whatsapp: "openai/gpt-5.2",
whatsapp: "openai/gpt-5.4",
},
},
},

View File

@@ -21043,7 +21043,7 @@ export const GENERATED_BASE_CONFIG_SCHEMA = {
},
"tools.exec.applyPatch.allowModels": {
label: "apply_patch Model Allowlist",
help: 'Optional allowlist of model ids (e.g. "gpt-5.2" or "openai/gpt-5.2").',
help: 'Optional allowlist of model ids (e.g. "gpt-5.4" or "openai/gpt-5.4").',
tags: ["access", "tools"],
},
"tools.loopDetection.enabled": {

View File

@@ -528,7 +528,7 @@ export const FIELD_HELP: Record<string, string> = {
"tools.exec.applyPatch.workspaceOnly":
"Restrict apply_patch paths to the workspace directory (default: true). Set false to allow writing outside the workspace (dangerous).",
"tools.exec.applyPatch.allowModels":
'Optional allowlist of model ids (e.g. "gpt-5.2" or "openai/gpt-5.2").',
'Optional allowlist of model ids (e.g. "gpt-5.4" or "openai/gpt-5.4").',
"tools.loopDetection.enabled":
"Enable repetitive tool-call loop detection and backoff safety checks (default: false).",
"tools.loopDetection.historySize": "Tool history window size for loop detection (default: 30).",

View File

@@ -699,7 +699,7 @@ describe("sessions", () => {
update: async () => {
firstStarted.resolve();
await releaseFirst.promise;
return { modelOverride: "anthropic/claude-opus-4-5" };
return { modelOverride: "anthropic/claude-opus-4-6" };
},
});
const p2 = updateSessionStoreEntry({
@@ -716,7 +716,7 @@ describe("sessions", () => {
await Promise.all([p1, p2]);
const store = loadSessionStore(storePath);
expect(store[mainSessionKey]?.modelOverride).toBe("anthropic/claude-opus-4-5");
expect(store[mainSessionKey]?.modelOverride).toBe("anthropic/claude-opus-4-6");
expect(store[mainSessionKey]?.thinkingLevel).toBe("high");
await expect(fs.stat(`${storePath}.lock`)).rejects.toThrow();
});

View File

@@ -278,10 +278,10 @@ describe("session store lock (Promise chain mutex)", () => {
model: "claude-opus-4-6",
},
{
model: "gpt-5.2",
model: "gpt-5.4",
},
);
expect(merged.model).toBe("gpt-5.2");
expect(merged.model).toBe("gpt-5.4");
expect(merged.modelProvider).toBeUndefined();
});

View File

@@ -346,7 +346,7 @@ export type AgentCompactionConfig = {
* Set to [] to disable post-compaction context injection entirely.
*/
postCompactionSections?: string[];
/** Optional model override for compaction summarization (e.g. "openrouter/anthropic/claude-sonnet-4-5").
/** Optional model override for compaction summarization (e.g. "openrouter/anthropic/claude-sonnet-4-6").
* When set, compaction uses this model instead of the agent's primary model.
* Falls back to the primary model when unset. */
model?: string;

View File

@@ -277,7 +277,7 @@ export type ExecToolConfig = {
workspaceOnly?: boolean;
/**
* Optional allowlist of model ids that can use apply_patch.
* Accepts either raw ids (e.g. "gpt-5.2") or full ids (e.g. "openai/gpt-5.2").
* Accepts either raw ids (e.g. "gpt-5.4") or full ids (e.g. "openai/gpt-5.4").
*/
allowModels?: string[];
};

View File

@@ -39,7 +39,7 @@ import { resolveCronModelSelection } from "./isolated-agent/model-selection.js";
const DEFAULT_MESSAGE = "do it";
const DEFAULT_PROVIDER = "anthropic";
const DEFAULT_MODEL = "claude-opus-4-5";
const DEFAULT_MODEL = "claude-opus-4-6";
type AgentTurnPayload = {
kind: "agentTurn";
@@ -76,7 +76,7 @@ function parseModelRef(raw: string): { provider: string; model: string } | { err
}
const provider = providerRaw === "bedrock" ? "amazon-bedrock" : providerRaw;
const model = provider === "anthropic" && modelRaw === "opus-4.5" ? "claude-opus-4-5" : modelRaw;
const model = provider === "anthropic" && modelRaw === "opus-4.5" ? "claude-opus-4-6" : modelRaw;
return { provider, model };
}
@@ -225,7 +225,7 @@ describe("cron model formatting and precedence edge cases", () => {
model: "anthropic/opus-4.5",
},
},
{ provider: "anthropic", model: "claude-opus-4-5" },
{ provider: "anthropic", model: "claude-opus-4-6" },
);
});
@@ -235,10 +235,10 @@ describe("cron model formatting and precedence edge cases", () => {
payload: {
kind: "agentTurn",
message: DEFAULT_MESSAGE,
model: "bedrock/claude-sonnet-4-5",
model: "bedrock/claude-sonnet-4-6",
},
},
{ provider: "amazon-bedrock", model: "claude-sonnet-4-5" },
{ provider: "amazon-bedrock", model: "claude-sonnet-4-6" },
);
});
});
@@ -275,14 +275,14 @@ describe("cron model formatting and precedence edge cases", () => {
payload: {
kind: "agentTurn",
message: DEFAULT_MESSAGE,
model: "anthropic/claude-sonnet-4-5",
model: "anthropic/claude-sonnet-4-6",
},
sessionEntry: {
providerOverride: "openai",
modelOverride: "gpt-4.1-mini",
},
},
{ provider: "anthropic", model: "claude-sonnet-4-5" },
{ provider: "anthropic", model: "claude-sonnet-4-6" },
);
});
@@ -319,14 +319,14 @@ describe("cron model formatting and precedence edge cases", () => {
payload: {
kind: "agentTurn",
message: DEFAULT_MESSAGE,
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
},
sessionEntry: {
providerOverride: "openai",
modelOverride: "gpt-4.1-mini",
},
},
{ provider: "anthropic", model: "claude-opus-4-5" },
{ provider: "anthropic", model: "claude-opus-4-6" },
);
});
@@ -431,10 +431,10 @@ describe("cron model formatting and precedence edge cases", () => {
payload: {
kind: "agentTurn",
message: DEFAULT_MESSAGE,
model: "anthropic/claude-sonnet-4-5",
model: "anthropic/claude-sonnet-4-6",
},
},
{ provider: "anthropic", model: "claude-sonnet-4-5" },
{ provider: "anthropic", model: "claude-sonnet-4-6" },
);
});
});

View File

@@ -41,7 +41,7 @@ describe("runCronIsolatedAgentTurn model overrides", () => {
provider: "openai",
},
{
id: "claude-opus-4-5",
id: "claude-opus-4-6",
name: "Claude Opus 4.5",
provider: "anthropic",
},
@@ -76,13 +76,13 @@ describe("runCronIsolatedAgentTurn model overrides", () => {
await runTurnWithStoredModelOverride(home, {
kind: "agentTurn",
message: DEFAULT_MESSAGE,
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
})
).res;
expect(res.status).toBe("ok");
const explicitOverride = expectEmbeddedProviderModel({
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
});
explicitOverride.assert();
});
@@ -105,7 +105,7 @@ describe("runCronIsolatedAgentTurn model overrides", () => {
sessionId: "existing-gmail-session",
updatedAt: Date.now(),
providerOverride: "anthropic",
modelOverride: "claude-opus-4-5",
modelOverride: "claude-opus-4-6",
},
})
).res;
@@ -122,7 +122,7 @@ describe("runCronIsolatedAgentTurn model overrides", () => {
await withTempHome(async (home) => {
vi.mocked(loadModelCatalog).mockResolvedValueOnce([
{
id: "claude-opus-4-5",
id: "claude-opus-4-6",
name: "Opus 4.5",
provider: "anthropic",
},
@@ -132,9 +132,9 @@ describe("runCronIsolatedAgentTurn model overrides", () => {
cfgOverrides: {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
model: { primary: "anthropic/claude-opus-4-6" },
models: {
"anthropic/claude-opus-4-5": { alias: "Opus" },
"anthropic/claude-opus-4-6": { alias: "Opus" },
},
},
},
@@ -151,7 +151,7 @@ describe("runCronIsolatedAgentTurn model overrides", () => {
expect(res.status).toBe("ok");
const ignoredGmailModel = expectEmbeddedProviderModel({
provider: "anthropic",
model: "claude-opus-4-5",
model: "claude-opus-4-6",
});
ignoredGmailModel.assert();
});

View File

@@ -45,7 +45,7 @@ function makeCfg(
const base: OpenClawConfig = {
agents: {
defaults: {
model: "anthropic/claude-sonnet-4-5",
model: "anthropic/claude-sonnet-4-6",
workspace: path.join(home, "openclaw"),
},
},
@@ -131,7 +131,7 @@ describe("runCronIsolatedAgentTurn: subagent model resolution (#11461)", () => {
cfgOverrides: {
agents: {
defaults: {
model: "anthropic/claude-sonnet-4-5",
model: "anthropic/claude-sonnet-4-6",
subagents: { model: "ollama/llama3.2:3b" },
},
},
@@ -143,14 +143,14 @@ describe("runCronIsolatedAgentTurn: subagent model resolution (#11461)", () => {
name: "falls back to main model when subagents.model is unset",
cfgOverrides: undefined,
expectedProvider: "anthropic",
expectedModel: "claude-sonnet-4-5",
expectedModel: "claude-sonnet-4-6",
},
{
name: "supports subagents.model with {primary} object format",
cfgOverrides: {
agents: {
defaults: {
model: "anthropic/claude-sonnet-4-5",
model: "anthropic/claude-sonnet-4-6",
subagents: { model: { primary: "google/gemini-2.5-flash" } },
},
},
@@ -184,7 +184,7 @@ describe("runCronIsolatedAgentTurn: subagent model resolution (#11461)", () => {
cfgOverrides: {
agents: {
defaults: {
model: "anthropic/claude-sonnet-4-5",
model: "anthropic/claude-sonnet-4-6",
workspace: path.join(home, "openclaw"),
subagents: { model: "ollama/llama3.2:3b" },
},
@@ -205,7 +205,7 @@ describe("runCronIsolatedAgentTurn: subagent model resolution (#11461)", () => {
cfgOverrides: {
agents: {
defaults: {
model: "anthropic/claude-sonnet-4-5",
model: "anthropic/claude-sonnet-4-6",
workspace: path.join(home, "openclaw"),
subagents: { model: "ollama/llama3.2:3b" },
},

View File

@@ -40,7 +40,7 @@ export function makeCfg(
const base: OpenClawConfig = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
model: "anthropic/claude-opus-4-6",
workspace: path.join(home, "openclaw"),
},
},

View File

@@ -176,17 +176,17 @@ describe("runCronIsolatedAgentTurn — skill filter", () => {
});
expectDefaultModelCall({
primary: "anthropic/claude-sonnet-4-5",
primary: "anthropic/claude-sonnet-4-6",
fallbacks: defaultFallbacks,
});
}
it("preserves defaults when agent overrides primary as string", async () => {
await expectPrimaryOverridePreservesDefaults("anthropic/claude-sonnet-4-5");
await expectPrimaryOverridePreservesDefaults("anthropic/claude-sonnet-4-6");
});
it("preserves defaults when agent overrides primary in object form", async () => {
await expectPrimaryOverridePreservesDefaults({ primary: "anthropic/claude-sonnet-4-5" });
await expectPrimaryOverridePreservesDefaults({ primary: "anthropic/claude-sonnet-4-6" });
});
it("applies payload.model override when model is allowed", async () => {

View File

@@ -87,7 +87,7 @@ describe("resolveCronSession", () => {
entry: {
sessionId: "old-session-id",
updatedAt: 1000,
model: "claude-opus-4-5",
model: "claude-opus-4-6",
},
});
@@ -182,7 +182,7 @@ describe("resolveCronSession", () => {
to: "channel:C0XXXXXXXXX",
threadId: "1737500000.123456",
},
modelOverride: "gpt-5.2",
modelOverride: "gpt-5.4",
},
fresh: true,
forceNew: true,
@@ -198,7 +198,7 @@ describe("resolveCronSession", () => {
expect(result.sessionEntry.lastThreadId).toBeUndefined();
expect(result.sessionEntry.deliveryContext).toBeUndefined();
// Per-session overrides must be preserved
expect(result.sessionEntry.modelOverride).toBe("gpt-5.2");
expect(result.sessionEntry.modelOverride).toBe("gpt-5.4");
});
it("clears delivery routing metadata when session is stale", () => {

View File

@@ -609,13 +609,13 @@ describe("normalizeCronJobPatch", () => {
it("infers agentTurn kind for model-only payload patches", () => {
const normalized = normalizeCronJobPatch({
payload: {
model: "anthropic/claude-sonnet-4-5",
model: "anthropic/claude-sonnet-4-6",
},
}) as unknown as Record<string, unknown>;
const payload = normalized.payload as Record<string, unknown>;
expect(payload.kind).toBe("agentTurn");
expect(payload.model).toBe("anthropic/claude-sonnet-4-5");
expect(payload.model).toBe("anthropic/claude-sonnet-4-6");
});
it("infers agentTurn kind for lightContext-only payload patches", () => {

View File

@@ -232,7 +232,7 @@ describe("cron run log", () => {
jobId: "job-1",
action: "finished",
status: "ok",
model: "gpt-5.2",
model: "gpt-5.4",
provider: "openai",
usage: {
input_tokens: 10,
@@ -258,7 +258,7 @@ describe("cron run log", () => {
);
const entries = await readCronRunLogEntries(logPath, { limit: 10, jobId: "job-1" });
expect(entries[0]?.model).toBe("gpt-5.2");
expect(entries[0]?.model).toBe("gpt-5.4");
expect(entries[0]?.provider).toBe("openai");
expect(entries[0]?.usage).toEqual({
input_tokens: 10,

Some files were not shown because too many files have changed in this diff Show More