Stop preserving stale whole-agent Codex runtime pins

This removes stale whole-agent Codex runtime pins from the remaining doctor/Crestodian paths and teaches doctor cron repair to normalize old openai-codex payload model refs to canonical openai refs. Runtime policy stays provider/model-scoped and cron execution stays strict.
This commit is contained in:
pashpashpash
2026-05-09 15:01:18 -07:00
committed by GitHub
parent 34d275cf87
commit 52771b65e2
9 changed files with 142 additions and 12 deletions

View File

@@ -235,6 +235,7 @@ Docs: https://docs.openclaw.ai
- macOS/config: reject stale or destructive app fallback config writes before direct replacement and keep rejected payloads as private audit artifacts, so `gateway.mode`, metadata, and auth are not silently clobbered. Fixes #64973 and #74890. Thanks @BunsDev.
- Gateway/macOS: include Apple Silicon Homebrew bin and sbin directories in generated LaunchAgent service PATHs and service-audit expectations so `openclaw gateway restart` keeps Homebrew Node installs reachable. Fixes #79232. Thanks @BunsDev and @TurboTheTurtle.
- Doctor/OpenAI: stop pinning migrated `openai-codex/*` routes to the Codex runtime so mixed-provider agents keep automatic PI routing for MiniMax, Anthropic, and other non-OpenAI model switches.
- Doctor/OpenAI: remove stale whole-agent Codex runtime pins while repairing legacy OpenAI-Codex routes, so upgraded agents do not force an unregistered Codex harness before provider/model routing can choose the right runtime.
- Gateway/macOS: `openclaw gateway stop` now uses `launchctl bootout` by default instead of unconditionally calling `launchctl disable`, so KeepAlive auto-recovery still works after unexpected crashes; use the new `--disable` flag to opt into the persistent-disable behavior when a manual stop should survive reboots. Fixes #77934. Thanks @bmoran1022.
- Gateway/macOS: `repairLaunchAgentBootstrap` no longer kickstarts an already-running LaunchAgent, preventing unnecessary service restarts and session disconnects when repair runs against a healthy gateway. Fixes #77428. Thanks @ramitrkar-hash.
- Gateway/macOS: `openclaw gateway stop --disable` now persists the LaunchAgent disable bit even after a previous bootout left the service not loaded, keeping the explicit stay-down path reliable. (#78412) Thanks @wdeveloper16.
@@ -334,7 +335,7 @@ Docs: https://docs.openclaw.ai
- Codex harness: honor `models.providers.openai-codex.models[].contextTokens` for native `openai/*` Codex runtime runs and `/status` context reporting, so subscription-backed Codex agents use the configured OAuth context cap without inflating past the runtime model window. Fixes #77858. Thanks @lilesjtu.
- Sessions cleanup: add `openclaw sessions cleanup --fix-dm-scope` so operators who return `session.dmScope` to `main` can dry-run and retire stale direct-DM session rows while preserving transcripts as deleted archives. Fixes #47561 and #45554. Thanks @BunsDev.
- Doctor/Codex: repair legacy `openai-codex/*` routes to canonical `openai/*`, keep OpenAI agent turns on Codex by default, ignore stale whole-agent/session runtime pins, preserve explicit provider/model runtime policy, and migrate legacy runtime model refs to model-scoped runtime entries. Thanks @vincentkoc.
- Doctor/Codex: repair legacy `openai-codex/*` routes and cron payload model refs to canonical `openai/*`, keep OpenAI agent turns on Codex by default, ignore stale whole-agent/session runtime pins, preserve explicit provider/model runtime policy, and migrate legacy runtime model refs to model-scoped runtime entries. Thanks @vincentkoc.
- Video generation: wait up to 20 minutes for slow fal/MiniMax queue-backed jobs, stop forwarding unsupported Google Veo generated-audio options, and normalize MiniMax `720P` requests to its supported `768P` resolution with the usual override warning/details instead of failing fallback.
- Channels/durable delivery: preserve channel-specific final reply semantics when using durable sends, including Telegram selected quotes and silent error replies plus WhatsApp message-sending cancellations.
- Channels/message lifecycle: build legacy channel delivery results from message receipts and add receipts to BlueBubbles, Feishu, Google Chat, iMessage, IRC, LINE, Nextcloud Talk, QQ Bot, Signal, Synology Chat, Tlon, Twitch, WhatsApp, Zalo, and Zalo Personal send results and owner-path reply delivery plus Discord, Matrix, Mattermost, Slack, and Teams send results while preserving existing message id compatibility.

View File

@@ -5,10 +5,59 @@ import {
type UnknownRecord = Record<string, unknown>;
function toCanonicalOpenAIModelRef(value: unknown): string | undefined {
const raw = readString(value);
if (typeof raw !== "string") {
return undefined;
}
const trimmed = raw.trim();
const slash = trimmed.indexOf("/");
if (slash <= 0) {
return undefined;
}
const provider = trimmed.slice(0, slash).trim().toLowerCase();
if (provider !== "openai-codex") {
return undefined;
}
const model = trimmed.slice(slash + 1).trim();
return model ? `openai/${model}` : undefined;
}
function normalizeChannel(value: string): string {
return normalizeOptionalLowercaseString(value) ?? "";
}
export function hasLegacyOpenAICodexCronModelRef(payload: UnknownRecord): boolean {
if (toCanonicalOpenAIModelRef(payload.model)) {
return true;
}
const fallbacks = payload.fallbacks;
return (
Array.isArray(fallbacks) && fallbacks.some((fallback) => toCanonicalOpenAIModelRef(fallback))
);
}
function migrateLegacyOpenAICodexModelRefs(payload: UnknownRecord): boolean {
let mutated = false;
const model = toCanonicalOpenAIModelRef(payload.model);
if (model && payload.model !== model) {
payload.model = model;
mutated = true;
}
const fallbacks = payload.fallbacks;
if (Array.isArray(fallbacks)) {
const next = fallbacks.map((fallback) => toCanonicalOpenAIModelRef(fallback) ?? fallback);
if (next.some((fallback, index) => fallback !== fallbacks[index])) {
payload.fallbacks = next;
mutated = true;
}
}
return mutated;
}
export function migrateLegacyCronPayload(payload: UnknownRecord): boolean {
let mutated = false;
@@ -34,5 +83,9 @@ export function migrateLegacyCronPayload(payload: UnknownRecord): boolean {
mutated = true;
}
if (migrateLegacyOpenAICodexModelRefs(payload)) {
mutated = true;
}
return mutated;
}

View File

@@ -106,6 +106,30 @@ describe("normalizeStoredCronJobs", () => {
});
});
it("rewrites legacy OpenAI Codex model refs in cron payloads", () => {
const { job, result } = normalizeOneJob(
makeLegacyJob({
id: "legacy-codex-cron-model",
schedule: { kind: "every", everyMs: 60_000 },
payload: {
kind: "agentTurn",
message: "ping",
model: " openai-codex/gpt-5.5 ",
fallbacks: ["anthropic/claude-opus-4.6", "openai-codex/gpt-5.4-mini"],
},
}),
);
expect(result.mutated).toBe(true);
expect(result.issues.legacyPayloadCodexModel).toBe(1);
expect(job.payload).toMatchObject({
kind: "agentTurn",
message: "ping",
model: "openai/gpt-5.5",
fallbacks: ["anthropic/claude-opus-4.6", "openai/gpt-5.4-mini"],
});
});
it("does not report legacyPayloadKind for already-normalized payload kinds", () => {
const jobs = [
{

View File

@@ -10,7 +10,10 @@ import {
normalizeOptionalStringifiedId,
} from "../shared/string-coerce.js";
import { normalizeLegacyDeliveryInput } from "./doctor-cron-legacy-delivery.js";
import { migrateLegacyCronPayload } from "./doctor-cron-payload-migration.js";
import {
hasLegacyOpenAICodexCronModelRef,
migrateLegacyCronPayload,
} from "./doctor-cron-payload-migration.js";
type CronStoreIssueKey =
| "jobId"
@@ -19,6 +22,7 @@ type CronStoreIssueKey =
| "legacyScheduleString"
| "legacyScheduleCron"
| "legacyPayloadKind"
| "legacyPayloadCodexModel"
| "legacyPayloadProvider"
| "legacyTopLevelPayloadFields"
| "legacyTopLevelDeliveryFields"
@@ -380,8 +384,12 @@ export function normalizeStoredCronJobs(
if (payloadRecord) {
const hadLegacyPayloadProvider = Boolean(normalizeOptionalString(payloadRecord.provider));
const hadLegacyPayloadCodexModel = hasLegacyOpenAICodexCronModelRef(payloadRecord);
if (migrateLegacyCronPayload(payloadRecord)) {
mutated = true;
if (hadLegacyPayloadCodexModel) {
trackIssue("legacyPayloadCodexModel");
}
if (hadLegacyPayloadProvider) {
trackIssue("legacyPayloadProvider");
}

View File

@@ -54,6 +54,11 @@ function formatLegacyIssuePreview(issues: Partial<Record<string, number>>): stri
if (issues.legacyPayloadKind) {
lines.push(`- ${pluralize(issues.legacyPayloadKind, "job")} needs payload kind normalization`);
}
if (issues.legacyPayloadCodexModel) {
lines.push(
`- ${pluralize(issues.legacyPayloadCodexModel, "job")} still uses legacy \`openai-codex/*\` cron model refs`,
);
}
if (issues.legacyPayloadProvider) {
lines.push(
`- ${pluralize(issues.legacyPayloadProvider, "job")} still uses payload \`provider\` as a delivery alias`,

View File

@@ -124,6 +124,7 @@ describe("collectCodexRouteWarnings", () => {
cfg: {
agents: {
defaults: {
agentRuntime: { id: "codex" },
model: {
primary: "openai-codex/gpt-5.5",
fallbacks: ["openai-codex/gpt-5.4", "anthropic/claude-sonnet-4-6"],
@@ -191,7 +192,11 @@ describe("collectCodexRouteWarnings", () => {
});
expect(result.warnings).toStrictEqual([]);
expect(result.changes).toEqual([expect.stringContaining("Repaired Codex model routes")]);
expect(result.changes).toEqual([
expect.stringContaining("Repaired Codex model routes"),
"Removed agents.defaults.agentRuntime; runtime is now provider/model scoped.",
"Removed agents.list.worker.agentRuntime; runtime is now provider/model scoped.",
]);
expect(result.cfg.agents?.defaults?.model).toEqual({
primary: "openai/gpt-5.5",
fallbacks: ["openai/gpt-5.4", "anthropic/claude-sonnet-4-6"],
@@ -210,8 +215,8 @@ describe("collectCodexRouteWarnings", () => {
expect(result.cfg.agents?.list?.[0]).toMatchObject({
id: "worker",
model: "openai/gpt-5.4",
agentRuntime: { id: "codex" },
});
expect(result.cfg.agents?.list?.[0]?.agentRuntime).toBeUndefined();
expect(result.cfg.channels?.modelByChannel?.telegram?.default).toBe("openai/gpt-5.4");
expect(result.cfg.hooks?.mappings?.[0]?.model).toBe("openai/gpt-5.4-mini");
expect(result.cfg.hooks?.gmail?.model).toBe("openai/gpt-5.4");

View File

@@ -18,6 +18,11 @@ type SessionRouteRepairResult = {
changed: boolean;
sessionKeys: string[];
};
type ConfigRouteRepairResult = {
cfg: OpenClawConfig;
changes: CodexRouteHit[];
runtimePinChanges: string[];
};
type CodexSessionRouteRepairSummary = {
scannedStores: number;
repairedStores: number;
@@ -442,10 +447,38 @@ function rewriteAgentModelRefs(params: {
}
}
function rewriteConfigModelRefs(params: { cfg: OpenClawConfig; env?: NodeJS.ProcessEnv }): {
function clearLegacyAgentRuntimePolicy(
container: MutableRecord | undefined,
pathLabel: string,
changes: string[],
): void {
if (!container) {
return;
}
if (asMutableRecord(container.embeddedHarness)) {
delete container.embeddedHarness;
changes.push(`Removed ${pathLabel}.embeddedHarness; runtime is now provider/model scoped.`);
}
if (asMutableRecord(container.agentRuntime)) {
delete container.agentRuntime;
changes.push(`Removed ${pathLabel}.agentRuntime; runtime is now provider/model scoped.`);
}
}
function clearConfigLegacyAgentRuntimePolicies(cfg: OpenClawConfig): string[] {
const changes: string[] = [];
clearLegacyAgentRuntimePolicy(asMutableRecord(cfg.agents?.defaults), "agents.defaults", changes);
for (const [index, agent] of (cfg.agents?.list ?? []).entries()) {
const id = typeof agent.id === "string" && agent.id.trim() ? agent.id.trim() : String(index);
clearLegacyAgentRuntimePolicy(agent as MutableRecord, `agents.list.${id}`, changes);
}
return changes;
}
function rewriteConfigModelRefs(params: {
cfg: OpenClawConfig;
changes: CodexRouteHit[];
} {
env?: NodeJS.ProcessEnv;
}): ConfigRouteRepairResult {
const nextConfig = structuredClone(params.cfg);
const hits: CodexRouteHit[] = [];
const defaultsRuntime = nextConfig.agents?.defaults?.agentRuntime;
@@ -518,9 +551,12 @@ function rewriteConfigModelRefs(params: { cfg: OpenClawConfig; env?: NodeJS.Proc
key: "model",
path: "channels.discord.voice.model",
});
const runtimePinChanges =
hits.length > 0 ? clearConfigLegacyAgentRuntimePolicies(nextConfig) : [];
return {
cfg: hits.length > 0 ? nextConfig : params.cfg,
cfg: hits.length > 0 || runtimePinChanges.length > 0 ? nextConfig : params.cfg,
changes: hits,
runtimePinChanges,
};
}
@@ -545,7 +581,7 @@ export function collectCodexRouteWarnings(params: {
hit.runtime ? `; current runtime is "${hit.runtime}"` : ""
}.`,
),
"- Run `openclaw doctor --fix`: it rewrites configured model refs and stale sessions to `openai/*` without changing explicit runtime policy.",
"- Run `openclaw doctor --fix`: it rewrites configured model refs and stale sessions to `openai/*`, clears old whole-agent runtime pins, and keeps provider/model runtime policy.",
].join("\n"),
];
}
@@ -578,6 +614,7 @@ export function maybeRepairCodexRoutes(params: {
`Repaired Codex model routes:\n${repaired.changes
.map((hit) => `- ${formatCodexRouteChange(hit)}`)
.join("\n")}`,
...repaired.runtimePinChanges,
],
};
}

View File

@@ -71,7 +71,6 @@ function buildCodexAppServerPlannerConfig(workspaceDir: string): OpenClawConfig
agents: {
defaults: {
workspace: workspaceDir,
agentRuntime: { id: "codex" },
model: { primary: `openai/${CRESTODIAN_CODEX_MODEL}` },
},
},

View File

@@ -159,7 +159,6 @@ describe("Crestodian assistant", () => {
agents: {
defaults: {
workspace: "/tmp/workspace",
agentRuntime: { id: "codex" },
model: { primary: "openai/gpt-5.5" },
},
},
@@ -220,7 +219,6 @@ describe("Crestodian assistant", () => {
expect(firstEmbeddedCall.config).toMatchObject({
agents: {
defaults: {
agentRuntime: { id: "codex" },
model: { primary: "openai/gpt-5.5" },
},
},