Reply: surface OAuth reauth failures (#63217)

Merged via squash.

Prepared head SHA: 68b7ffd59e
Co-authored-by: mbelinky <132747814+mbelinky@users.noreply.github.com>
Co-authored-by: mbelinky <132747814+mbelinky@users.noreply.github.com>
Reviewed-by: @mbelinky
This commit is contained in:
Mariano
2026-04-08 18:03:03 +02:00
committed by GitHub
parent 45195e3645
commit b77db8c0b6
17 changed files with 663 additions and 62 deletions

View File

@@ -17,6 +17,7 @@ Docs: https://docs.openclaw.ai
- QQBot/media-tags: support HTML entity-encoded angle brackets (`&lt;`/`&gt;`) in media-tag regexes so entity-escaped `<qqimg>` tags from upstream are correctly parsed and normalized. (#60493) Thanks @ylc0919.
- npm packaging: mirror bundled Slack, Telegram, Discord, and Feishu channel runtime deps at the root and harden published-install verification so fresh installs fail fast on manifest drift instead of missing-module crashes. (#63065) Thanks @scoootscooob.
- npm packaging: derive required root runtime mirrors from bundled plugin manifests and built root chunks, then install packed release tarballs without the repo `node_modules` so release checks catch missing plugin deps before publish.
- Reply/doctor: resolve reply-run SecretRefs before preflight helpers touch config, surface gateway OAuth reauth failures to users, and make `openclaw doctor` call out exact reauth commands.
## 2026.4.8

View File

@@ -323,6 +323,11 @@ Anthropic setup-token path.
Refresh prompts only appear when running interactively (TTY); `--non-interactive`
skips refresh attempts.
When an OAuth refresh fails permanently (for example `refresh_token_reused`,
`invalid_grant`, or a provider telling you to sign in again), doctor reports
that re-auth is required and prints the exact `openclaw models auth login --provider ...`
command to run.
Doctor also reports auth profiles that are temporarily unusable due to:
- short cooldowns (rate limits/timeouts/auth failures)

View File

@@ -0,0 +1,68 @@
import { formatCliCommand } from "../../cli/command-format.js";
import { sanitizeForLog } from "../../terminal/ansi.js";
import { normalizeProviderId } from "../model-selection.js";
export type OAuthRefreshFailureReason =
| "refresh_token_reused"
| "invalid_grant"
| "sign_in_again"
| "invalid_refresh_token"
| "revoked";
const OAUTH_REFRESH_FAILURE_PROVIDER_RE = /OAuth token refresh failed for ([^:]+):/i;
const SAFE_PROVIDER_ID_RE = /^[a-z0-9][a-z0-9._-]*$/;
export function extractOAuthRefreshFailureProvider(message: string): string | null {
const provider = message.match(OAUTH_REFRESH_FAILURE_PROVIDER_RE)?.[1]?.trim();
return provider && provider.length > 0 ? provider : null;
}
export function sanitizeOAuthRefreshFailureProvider(
provider: string | null | undefined,
): string | null {
const sanitized = provider ? sanitizeForLog(provider).replaceAll("`", "").trim() : "";
const normalized = normalizeProviderId(sanitized);
return normalized && SAFE_PROVIDER_ID_RE.test(normalized) ? normalized : null;
}
export function classifyOAuthRefreshFailureReason(
message: string,
): OAuthRefreshFailureReason | null {
const lower = message.toLowerCase();
if (lower.includes("refresh_token_reused")) {
return "refresh_token_reused";
}
if (lower.includes("invalid_grant")) {
return "invalid_grant";
}
if (lower.includes("signing in again") || lower.includes("sign in again")) {
return "sign_in_again";
}
if (lower.includes("invalid refresh token")) {
return "invalid_refresh_token";
}
if (lower.includes("expired or revoked") || lower.includes("revoked")) {
return "revoked";
}
return null;
}
export function classifyOAuthRefreshFailure(message: string): {
provider: string | null;
reason: OAuthRefreshFailureReason | null;
} | null {
if (!/oauth token refresh failed/i.test(message)) {
return null;
}
return {
provider: sanitizeOAuthRefreshFailureProvider(extractOAuthRefreshFailureProvider(message)),
reason: classifyOAuthRefreshFailureReason(message),
};
}
export function buildOAuthRefreshFailureLoginCommand(provider: string | null | undefined): string {
const safeProvider = sanitizeOAuthRefreshFailureProvider(provider);
return safeProvider
? formatCliCommand(`openclaw models auth login --provider ${safeProvider}`)
: formatCliCommand("openclaw models auth login");
}

View File

@@ -0,0 +1,225 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { TemplateContext } from "../templating.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const freshCfg = { runtimeFresh: true };
const staleCfg = {
runtimeFresh: false,
skills: {
entries: {
whisper: {
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
},
},
},
};
const sentinelError = new Error("stop-after-preflight");
const resolveQueuedReplyExecutionConfigMock = vi.fn();
const resolveReplyToModeMock = vi.fn();
const createReplyToModeFilterForChannelMock = vi.fn();
const createReplyMediaPathNormalizerMock = vi.fn();
const runPreflightCompactionIfNeededMock = vi.fn();
const runMemoryFlushIfNeededMock = vi.fn();
const enqueueFollowupRunMock = vi.fn();
vi.mock("./agent-runner-utils.js", () => ({
resolveQueuedReplyExecutionConfig: (...args: unknown[]) =>
resolveQueuedReplyExecutionConfigMock(...args),
}));
vi.mock("./reply-threading.js", () => ({
resolveReplyToMode: (...args: unknown[]) => resolveReplyToModeMock(...args),
createReplyToModeFilterForChannel: (...args: unknown[]) =>
createReplyToModeFilterForChannelMock(...args),
}));
vi.mock("./reply-media-paths.js", () => ({
createReplyMediaPathNormalizer: (...args: unknown[]) =>
createReplyMediaPathNormalizerMock(...args),
}));
vi.mock("./agent-runner-memory.js", () => ({
runPreflightCompactionIfNeeded: (...args: unknown[]) =>
runPreflightCompactionIfNeededMock(...args),
runMemoryFlushIfNeeded: (...args: unknown[]) => runMemoryFlushIfNeededMock(...args),
}));
vi.mock("./queue.js", async () => {
const actual = await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: (...args: unknown[]) => enqueueFollowupRunMock(...args),
};
});
const { runReplyAgent } = await import("./agent-runner.js");
describe("runReplyAgent runtime config", () => {
beforeEach(() => {
resolveQueuedReplyExecutionConfigMock.mockReset();
resolveReplyToModeMock.mockReset();
createReplyToModeFilterForChannelMock.mockReset();
createReplyMediaPathNormalizerMock.mockReset();
runPreflightCompactionIfNeededMock.mockReset();
runMemoryFlushIfNeededMock.mockReset();
enqueueFollowupRunMock.mockReset();
resolveQueuedReplyExecutionConfigMock.mockResolvedValue(freshCfg);
resolveReplyToModeMock.mockReturnValue("default");
createReplyToModeFilterForChannelMock.mockReturnValue((payload: unknown) => payload);
createReplyMediaPathNormalizerMock.mockReturnValue((payload: unknown) => payload);
runPreflightCompactionIfNeededMock.mockRejectedValue(sentinelError);
runMemoryFlushIfNeededMock.mockResolvedValue(undefined);
});
it("resolves direct reply runs before early helpers read config", async () => {
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session-1",
sessionKey: "agent:main:telegram:default:direct:test",
messageProvider: "telegram",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: staleCfg,
skillsSnapshot: {},
provider: "openai",
model: "gpt-5.4",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
const resolvedQueue = { mode: "interrupt" } as QueueSettings;
const typing = createMockTypingController();
const sessionCtx = {
Provider: "telegram",
OriginatingChannel: "telegram",
OriginatingTo: "12345",
AccountId: "default",
ChatType: "dm",
MessageSid: "msg-1",
} as unknown as TemplateContext;
await expect(
runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
defaultModel: "openai/gpt-5.4",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
}),
).rejects.toBe(sentinelError);
expect(followupRun.run.config).toBe(freshCfg);
expect(resolveQueuedReplyExecutionConfigMock).toHaveBeenCalledWith(staleCfg);
expect(resolveReplyToModeMock).toHaveBeenCalledWith(freshCfg, "telegram", "default", "dm");
expect(createReplyMediaPathNormalizerMock).toHaveBeenCalledWith({
cfg: freshCfg,
sessionKey: undefined,
workspaceDir: "/tmp",
});
expect(runPreflightCompactionIfNeededMock).toHaveBeenCalledWith(
expect.objectContaining({
cfg: freshCfg,
followupRun,
}),
);
});
it("does not resolve secrets before the enqueue-followup queue path", async () => {
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session-1",
sessionKey: "agent:main:telegram:default:direct:test",
messageProvider: "telegram",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: staleCfg,
skillsSnapshot: {},
provider: "openai",
model: "gpt-5.4",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
const resolvedQueue = { mode: "interrupt" } as QueueSettings;
const typing = createMockTypingController();
const sessionCtx = {
Provider: "telegram",
OriginatingChannel: "telegram",
OriginatingTo: "12345",
AccountId: "default",
ChatType: "dm",
MessageSid: "msg-1",
} as unknown as TemplateContext;
await expect(
runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: true,
isActive: true,
isStreaming: false,
typing,
sessionCtx,
defaultModel: "openai/gpt-5.4",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
}),
).resolves.toBeUndefined();
expect(resolveQueuedReplyExecutionConfigMock).not.toHaveBeenCalled();
expect(enqueueFollowupRunMock).toHaveBeenCalledWith(
"main",
followupRun,
resolvedQueue,
"message-id",
expect.any(Function),
false,
);
});
});

View File

@@ -27,9 +27,15 @@ vi.mock("../../agents/model-fallback.js", () => ({
Array.isArray((err as { attempts?: unknown[] }).attempts),
}));
vi.mock("../../agents/model-selection.js", () => ({
isCliProvider: () => false,
}));
vi.mock("../../agents/model-selection.js", async () => {
const actual = await vi.importActual<typeof import("../../agents/model-selection.js")>(
"../../agents/model-selection.js",
);
return {
...actual,
isCliProvider: () => false,
};
});
vi.mock("../../agents/bootstrap-budget.js", () => ({
resolveBootstrapWarningSignaturesSeen: () => [],
@@ -960,6 +966,86 @@ describe("runAgentTurnWithFallback", () => {
}
});
it("surfaces gateway reauth guidance for known OAuth refresh failures", async () => {
state.runEmbeddedPiAgentMock.mockRejectedValueOnce(
new Error(
"OAuth token refresh failed for openai-codex: refresh_token_reused. Please try again or re-authenticate.",
),
);
const runAgentTurnWithFallback = await getRunAgentTurnWithFallback();
const result = await runAgentTurnWithFallback({
commandBody: "hello",
followupRun: createFollowupRun(),
sessionCtx: {
Provider: "whatsapp",
MessageSid: "msg",
} as unknown as TemplateContext,
opts: {},
typingSignals: createMockTypingSignaler(),
blockReplyPipeline: null,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
applyReplyToMode: (payload) => payload,
shouldEmitToolResult: () => true,
shouldEmitToolOutput: () => false,
pendingToolTasks: new Set(),
resetSessionAfterCompactionFailure: async () => false,
resetSessionAfterRoleOrderingConflict: async () => false,
isHeartbeat: false,
sessionKey: "main",
getActiveSessionEntry: () => undefined,
resolvedVerboseLevel: "off",
});
expect(result.kind).toBe("final");
if (result.kind === "final") {
expect(result.payload.text).toBe(
"⚠️ Model login expired on the gateway for openai-codex. Re-auth with `openclaw models auth login --provider openai-codex`, then try again.",
);
}
});
it("falls back to a generic reauth command when the provider in the OAuth error is unsafe", async () => {
state.runEmbeddedPiAgentMock.mockRejectedValueOnce(
new Error(
"OAuth token refresh failed for openai-codex`\nrm -rf /: invalid_grant. Please try again or re-authenticate.",
),
);
const runAgentTurnWithFallback = await getRunAgentTurnWithFallback();
const result = await runAgentTurnWithFallback({
commandBody: "hello",
followupRun: createFollowupRun(),
sessionCtx: {
Provider: "whatsapp",
MessageSid: "msg",
} as unknown as TemplateContext,
opts: {},
typingSignals: createMockTypingSignaler(),
blockReplyPipeline: null,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
applyReplyToMode: (payload) => payload,
shouldEmitToolResult: () => true,
shouldEmitToolOutput: () => false,
pendingToolTasks: new Set(),
resetSessionAfterCompactionFailure: async () => false,
resetSessionAfterRoleOrderingConflict: async () => false,
isHeartbeat: false,
sessionKey: "main",
getActiveSessionEntry: () => undefined,
resolvedVerboseLevel: "off",
});
expect(result.kind).toBe("final");
if (result.kind === "final") {
expect(result.payload.text).toBe(
"⚠️ Model login expired on the gateway. Re-auth with `openclaw models auth login`, then try again.",
);
}
});
it("returns a session reset hint for Bedrock tool mismatch errors on external chat channels", async () => {
state.runEmbeddedPiAgentMock.mockRejectedValueOnce(
new Error(

View File

@@ -4,6 +4,10 @@ import {
hasOutboundReplyContent,
resolveSendableOutboundReplyParts,
} from "openclaw/plugin-sdk/reply-payload";
import {
buildOAuthRefreshFailureLoginCommand,
classifyOAuthRefreshFailure,
} from "../../agents/auth-profiles/oauth-refresh-failure.js";
import { resolveBootstrapWarningSignaturesSeen } from "../../agents/bootstrap-budget.js";
import { runCliAgent } from "../../agents/cli-runner.js";
import { getCliSessionBinding } from "../../agents/cli-session.js";
@@ -308,6 +312,14 @@ function buildExternalRunFailureText(message: string): string {
if (isToolResultTurnMismatchError(message)) {
return "⚠️ Session history got out of sync. Please try again, or use /new to start a fresh session.";
}
const oauthRefreshFailure = classifyOAuthRefreshFailure(message);
if (oauthRefreshFailure) {
const loginCommand = buildOAuthRefreshFailureLoginCommand(oauthRefreshFailure.provider);
if (oauthRefreshFailure.reason) {
return `⚠️ Model login expired on the gateway${oauthRefreshFailure.provider ? ` for ${oauthRefreshFailure.provider}` : ""}. Re-auth with \`${loginCommand}\`, then try again.`;
}
return `⚠️ Model login failed on the gateway${oauthRefreshFailure.provider ? ` for ${oauthRefreshFailure.provider}` : ""}. Please try again. If this keeps happening, re-auth with \`${loginCommand}\`.`;
}
return "⚠️ Something went wrong while processing your request. Please try again, or use /new to start a fresh session.";
}

View File

@@ -38,8 +38,8 @@ afterEach(() => {
});
describe("buildEmbeddedRunBaseParams runtime config", () => {
it("prefers the active runtime snapshot when queued reply config still contains SecretRefs", () => {
const sourceConfig: OpenClawConfig = {
it("keeps an already-resolved run config instead of reverting to a stale runtime snapshot", () => {
const staleSnapshot: OpenClawConfig = {
models: {
providers: {
openai: {
@@ -54,7 +54,7 @@ describe("buildEmbeddedRunBaseParams runtime config", () => {
},
},
};
const runtimeConfig: OpenClawConfig = {
const resolvedRunConfig: OpenClawConfig = {
models: {
providers: {
openai: {
@@ -65,10 +65,10 @@ describe("buildEmbeddedRunBaseParams runtime config", () => {
},
},
};
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
setRuntimeConfigSnapshot(staleSnapshot, staleSnapshot);
const resolved = buildEmbeddedRunBaseParams({
run: makeRun(sourceConfig),
run: makeRun(resolvedRunConfig),
provider: "openai",
model: "gpt-4.1-mini",
runId: "run-1",
@@ -78,6 +78,6 @@ describe("buildEmbeddedRunBaseParams runtime config", () => {
}),
});
expect(resolved.config).toBe(runtimeConfig);
expect(resolved.config).toBe(resolvedRunConfig);
});
});

View File

@@ -2,6 +2,8 @@ import { resolveRunModelFallbacksOverride } from "../../agents/agent-scope.js";
import { getChannelPlugin } from "../../channels/plugins/index.js";
import type { ChannelId, ChannelThreadingToolContext } from "../../channels/plugins/types.js";
import { normalizeAnyChannelId, normalizeChannelId } from "../../channels/registry.js";
import { resolveCommandSecretRefsViaGateway } from "../../cli/command-secret-gateway.js";
import { getAgentRuntimeCommandSecretTargetIds } from "../../cli/command-secret-targets.js";
import { getRuntimeConfigSnapshot, type OpenClawConfig } from "../../config/config.js";
import {
normalizeOptionalLowercaseString,
@@ -25,6 +27,18 @@ export function resolveQueuedReplyRuntimeConfig(config: OpenClawConfig): OpenCla
);
}
export async function resolveQueuedReplyExecutionConfig(
config: OpenClawConfig,
): Promise<OpenClawConfig> {
const runtimeConfig = resolveQueuedReplyRuntimeConfig(config);
const { resolvedConfig } = await resolveCommandSecretRefsViaGateway({
config: runtimeConfig,
commandName: "reply",
targetIds: getAgentRuntimeCommandSecretTargetIds(),
});
return resolvedConfig ?? runtimeConfig;
}
/**
* Build provider-specific threading context for tool auto-injection.
*/
@@ -111,14 +125,14 @@ export const resolveEnforceFinalTag = (
(run.skipProviderRuntimeHints ? false : undefined) ??
(run.enforceFinalTag ||
isReasoningTagProvider(provider, {
config: resolveQueuedReplyRuntimeConfig(run.config),
config: run.config,
workspaceDir: run.workspaceDir,
modelId: model,
})),
);
export function resolveModelFallbackOptions(run: FollowupRun["run"]) {
const config = resolveQueuedReplyRuntimeConfig(run.config);
const config = run.config;
return {
cfg: config,
provider: run.provider,
@@ -140,7 +154,7 @@ export function buildEmbeddedRunBaseParams(params: {
authProfile: ReturnType<typeof resolveProviderScopedAuthProfile>;
allowTransientCooldownProbe?: boolean;
}) {
const config = resolveQueuedReplyRuntimeConfig(params.run.config);
const config = params.run.config;
return {
sessionFile: params.run.sessionFile,
workspaceDir: params.run.workspaceDir,
@@ -171,7 +185,7 @@ export function buildEmbeddedContextFromTemplate(params: {
sessionCtx: TemplateContext;
hasRepliedRef: { value: boolean } | undefined;
}) {
const config = resolveQueuedReplyRuntimeConfig(params.run.config);
const config = params.run.config;
return {
sessionId: params.run.sessionId,
sessionKey: params.run.sessionKey,

View File

@@ -48,6 +48,7 @@ import {
hasUnbackedReminderCommitment,
} from "./agent-runner-reminder-guard.js";
import { appendUsageLine, formatResponseUsageLine } from "./agent-runner-usage-line.js";
import { resolveQueuedReplyExecutionConfig } from "./agent-runner-utils.js";
import { createAudioAsVoiceBuffer, createBlockReplyPipeline } from "./block-reply-pipeline.js";
import { resolveEffectiveBlockStreamingConfig } from "./block-streaming.js";
import { createFollowupRunner } from "./followup-runner.js";
@@ -162,42 +163,6 @@ export async function runReplyAgent(params: {
const pendingToolTasks = new Set<Promise<void>>();
const blockReplyTimeoutMs = opts?.blockReplyTimeoutMs ?? BLOCK_REPLY_SEND_TIMEOUT_MS;
const replyToChannel = resolveOriginMessageProvider({
originatingChannel: sessionCtx.OriginatingChannel,
provider: sessionCtx.Surface ?? sessionCtx.Provider,
}) as OriginatingChannelType | undefined;
const replyToMode = resolveReplyToMode(
followupRun.run.config,
replyToChannel,
sessionCtx.AccountId,
sessionCtx.ChatType,
);
const applyReplyToMode = createReplyToModeFilterForChannel(replyToMode, replyToChannel);
const cfg = followupRun.run.config;
const normalizeReplyMediaPaths = createReplyMediaPathNormalizer({
cfg,
sessionKey,
workspaceDir: followupRun.run.workspaceDir,
});
const blockReplyCoalescing =
blockStreamingEnabled && opts?.onBlockReply
? resolveEffectiveBlockStreamingConfig({
cfg,
provider: sessionCtx.Provider,
accountId: sessionCtx.AccountId,
chunking: blockReplyChunking,
}).coalescing
: undefined;
const blockReplyPipeline =
blockStreamingEnabled && opts?.onBlockReply
? createBlockReplyPipeline({
onBlockReply: opts.onBlockReply,
timeoutMs: blockReplyTimeoutMs,
coalescing: blockReplyCoalescing,
buffer: createAudioAsVoiceBuffer({ isAudioPayload }),
})
: null;
const touchActiveSessionEntry = async () => {
if (!activeSessionEntry || !activeSessionStore || !sessionKey) {
return;
@@ -269,6 +234,44 @@ export async function runReplyAgent(params: {
return undefined;
}
followupRun.run.config = await resolveQueuedReplyExecutionConfig(followupRun.run.config);
const replyToChannel = resolveOriginMessageProvider({
originatingChannel: sessionCtx.OriginatingChannel,
provider: sessionCtx.Surface ?? sessionCtx.Provider,
}) as OriginatingChannelType | undefined;
const replyToMode = resolveReplyToMode(
followupRun.run.config,
replyToChannel,
sessionCtx.AccountId,
sessionCtx.ChatType,
);
const applyReplyToMode = createReplyToModeFilterForChannel(replyToMode, replyToChannel);
const cfg = followupRun.run.config;
const normalizeReplyMediaPaths = createReplyMediaPathNormalizer({
cfg,
sessionKey,
workspaceDir: followupRun.run.workspaceDir,
});
const blockReplyCoalescing =
blockStreamingEnabled && opts?.onBlockReply
? resolveEffectiveBlockStreamingConfig({
cfg,
provider: sessionCtx.Provider,
accountId: sessionCtx.AccountId,
chunking: blockReplyChunking,
}).coalescing
: undefined;
const blockReplyPipeline =
blockStreamingEnabled && opts?.onBlockReply
? createBlockReplyPipeline({
onBlockReply: opts.onBlockReply,
timeoutMs: blockReplyTimeoutMs,
coalescing: blockReplyCoalescing,
buffer: createAudioAsVoiceBuffer({ isAudioPayload }),
})
: null;
const replySessionKey = sessionKey ?? followupRun.run.sessionKey;
let replyOperation: ReplyOperation;
try {

View File

@@ -11,6 +11,7 @@ const compactEmbeddedPiSessionMock = vi.fn();
const routeReplyMock = vi.fn();
const isRoutableChannelMock = vi.fn();
const runPreflightCompactionIfNeededMock = vi.fn();
const resolveCommandSecretRefsViaGatewayMock = vi.fn();
let createFollowupRunner: typeof import("./followup-runner.js").createFollowupRunner;
let clearRuntimeConfigSnapshot: typeof import("../../config/config.js").clearRuntimeConfigSnapshot;
let loadSessionStore: typeof import("../../config/sessions/store.js").loadSessionStore;
@@ -275,6 +276,13 @@ async function loadFreshFollowupRunnerModuleForTest() {
isRoutableChannel: (...args: unknown[]) => isRoutableChannelMock(...args),
routeReply: (...args: unknown[]) => routeReplyMock(...args),
}));
vi.doMock("../../cli/command-secret-gateway.js", () => ({
resolveCommandSecretRefsViaGateway: (...args: unknown[]) =>
resolveCommandSecretRefsViaGatewayMock(...args),
}));
vi.doMock("../../cli/command-secret-targets.js", () => ({
getAgentRuntimeCommandSecretTargetIds: () => new Set(["skills.entries."]),
}));
({ createFollowupRunner } = await import("./followup-runner.js"));
({ clearRuntimeConfigSnapshot, setRuntimeConfigSnapshot } =
await import("../../config/config.js"));
@@ -301,9 +309,16 @@ beforeEach(async () => {
runEmbeddedPiAgentMock.mockReset();
compactEmbeddedPiSessionMock.mockReset();
runPreflightCompactionIfNeededMock.mockReset();
resolveCommandSecretRefsViaGatewayMock.mockReset();
runPreflightCompactionIfNeededMock.mockImplementation(
async (params: { sessionEntry?: SessionEntry }) => params.sessionEntry,
);
resolveCommandSecretRefsViaGatewayMock.mockImplementation(async ({ config }) => ({
resolvedConfig: config,
diagnostics: [],
targetStatesByPath: {},
hadUnresolvedTargets: false,
}));
routeReplyMock.mockReset();
routeReplyMock.mockResolvedValue({ ok: true });
isRoutableChannelMock.mockReset();
@@ -432,6 +447,69 @@ describe("createFollowupRunner runtime config", () => {
| undefined;
expect(call?.config).toBe(runtimeConfig);
});
it("resolves queued embedded followups before preflight helpers read config", async () => {
const sourceConfig: OpenClawConfig = {
skills: {
entries: {
whisper: {
apiKey: {
source: "env",
provider: "default",
id: "OPENAI_API_KEY",
},
},
},
},
};
const runtimeConfig: OpenClawConfig = {
skills: {
entries: {
whisper: {
apiKey: "resolved-runtime-key",
},
},
},
};
resolveCommandSecretRefsViaGatewayMock.mockResolvedValueOnce({
resolvedConfig: runtimeConfig,
diagnostics: [],
targetStatesByPath: { "skills.entries.whisper.apiKey": "resolved_local" },
hadUnresolvedTargets: false,
});
runEmbeddedPiAgentMock.mockResolvedValueOnce({
payloads: [],
meta: {},
});
const runner = createFollowupRunner({
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "openai/gpt-5.4",
});
const queued = createQueuedRun({
run: {
config: sourceConfig,
provider: "openai",
model: "gpt-5.4",
},
});
await runner(queued);
expect(queued.run.config).toBe(runtimeConfig);
expect(runPreflightCompactionIfNeededMock).toHaveBeenCalledWith(
expect.objectContaining({
cfg: runtimeConfig,
}),
);
const call = runEmbeddedPiAgentMock.mock.calls.at(-1)?.[0] as
| {
config?: unknown;
}
| undefined;
expect(call?.config).toBe(runtimeConfig);
});
});
describe("createFollowupRunner compaction", () => {

View File

@@ -21,7 +21,11 @@ import { stripHeartbeatToken } from "../heartbeat.js";
import { isSilentReplyText, SILENT_REPLY_TOKEN } from "../tokens.js";
import type { GetReplyOptions, ReplyPayload } from "../types.js";
import { runPreflightCompactionIfNeeded } from "./agent-runner-memory.js";
import { resolveQueuedReplyRuntimeConfig, resolveRunAuthProfile } from "./agent-runner-utils.js";
import {
resolveQueuedReplyExecutionConfig,
resolveQueuedReplyRuntimeConfig,
resolveRunAuthProfile,
} from "./agent-runner-utils.js";
import { resolveFollowupDeliveryPayloads } from "./followup-delivery.js";
import { resolveOriginMessageProvider } from "./origin-routing.js";
import { refreshQueuedFollowupSession, type FollowupRun } from "./queue.js";
@@ -127,6 +131,7 @@ export function createFollowupRunner(params: {
};
return async (queued: FollowupRun) => {
queued.run.config = await resolveQueuedReplyExecutionConfig(queued.run.config);
const replySessionKey = queued.run.sessionKey ?? sessionKey;
const runtimeConfig = resolveQueuedReplyRuntimeConfig(queued.run.config);
const effectiveQueued =

View File

@@ -7,6 +7,7 @@ const PROFILE_FLAG_RE = /(?:^|\s)--profile(?:\s|=|$)/;
const DEV_FLAG_RE = /(?:^|\s)--dev(?:\s|$)/;
const UPDATE_COMMAND_RE =
/^(?:pnpm|npm|bunx|npx)\s+openclaw\b.*(?:^|\s)update(?:\s|$)|^openclaw\b.*(?:^|\s)update(?:\s|$)/;
const CONTAINER_HINT_RE = /^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,127}$/;
export function formatCliCommand(
command: string,
@@ -14,7 +15,8 @@ export function formatCliCommand(
): string {
const cliName = resolveCliName();
const normalizedCommand = replaceCliName(command, cliName);
const container = env.OPENCLAW_CONTAINER_HINT?.trim();
const rawContainer = env.OPENCLAW_CONTAINER_HINT?.trim();
const container = rawContainer && CONTAINER_HINT_RE.test(rawContainer) ? rawContainer : undefined;
const profile = normalizeProfileName(env.OPENCLAW_PROFILE);
if (!container && !profile) {
return normalizedCommand;

View File

@@ -214,6 +214,14 @@ describe("formatCliCommand", () => {
).toBe("openclaw --container demo gateway status --deep");
});
it("ignores unsafe container hints", () => {
expect(
formatCliCommand("openclaw gateway status --deep", {
OPENCLAW_CONTAINER_HINT: "demo; rm -rf /",
}),
).toBe("openclaw gateway status --deep");
});
it("preserves both --container and --profile hints", () => {
expect(
formatCliCommand("openclaw doctor", {

View File

@@ -1,5 +1,5 @@
import { describe, expect, it } from "vitest";
import { resolveUnusableProfileHint } from "./doctor-auth.js";
import { formatOAuthRefreshFailureDoctorLine, resolveUnusableProfileHint } from "./doctor-auth.js";
describe("resolveUnusableProfileHint", () => {
it("returns billing guidance for disabled billing profiles", () => {
@@ -25,4 +25,43 @@ describe("resolveUnusableProfileHint", () => {
"Wait for cooldown or switch provider.",
);
});
it("formats permanent OAuth refresh failures as reauth-required", () => {
expect(
formatOAuthRefreshFailureDoctorLine({
profileId: "openai-codex:default",
provider: "openai-codex",
message:
"OAuth token refresh failed for openai-codex: refresh_token_reused. Please try again or re-authenticate.",
}),
).toBe(
"- openai-codex:default: re-auth required [refresh_token_reused] — Run `openclaw models auth login --provider openai-codex`.",
);
});
it("formats non-permanent OAuth refresh failures as retry-then-reauth guidance", () => {
expect(
formatOAuthRefreshFailureDoctorLine({
profileId: "openai-codex:default",
provider: "openai-codex",
message:
"OAuth token refresh failed for openai-codex: temporary upstream issue. Please try again or re-authenticate.",
}),
).toBe(
"- openai-codex:default: OAuth refresh failed — Try again; if this persists, run `openclaw models auth login --provider openai-codex`.",
);
});
it("drops the provider-specific command when the parsed provider is unsafe", () => {
expect(
formatOAuthRefreshFailureDoctorLine({
profileId: "openai-codex:default",
provider: "openai-codex",
message:
"OAuth token refresh failed for openai-codex`\nrm -rf /: invalid_grant. Please try again or re-authenticate.",
}),
).toBe(
"- openai-codex:default: re-auth required [invalid_grant] — Run `openclaw models auth login --provider openai-codex`.",
);
});
});

View File

@@ -11,6 +11,11 @@ import {
resolveProfileUnusableUntilForDisplay,
} from "../agents/auth-profiles.js";
import { formatAuthDoctorHint } from "../agents/auth-profiles/doctor.js";
import {
buildOAuthRefreshFailureLoginCommand,
classifyOAuthRefreshFailure,
type OAuthRefreshFailureReason,
} from "../agents/auth-profiles/oauth-refresh-failure.js";
import type { OpenClawConfig } from "../config/config.js";
import { formatErrorMessage } from "../infra/errors.js";
import { resolvePluginProviders } from "../plugins/providers.runtime.js";
@@ -167,6 +172,40 @@ export function resolveUnusableProfileHint(params: {
return "Wait for cooldown or switch provider.";
}
function formatOAuthRefreshFailureReason(reason: OAuthRefreshFailureReason | null): string {
switch (reason) {
case "refresh_token_reused":
return "refresh_token_reused";
case "invalid_grant":
return "invalid_grant";
case "sign_in_again":
return "sign in again";
case "invalid_refresh_token":
return "invalid refresh token";
case "revoked":
return "revoked";
default:
return "refresh failed";
}
}
export function formatOAuthRefreshFailureDoctorLine(params: {
profileId: string;
provider: string;
message: string;
}): string | null {
const classified = classifyOAuthRefreshFailure(params.message);
if (!classified) {
return null;
}
const provider = classified.provider ?? params.provider;
const command = buildOAuthRefreshFailureLoginCommand(provider);
if (classified.reason) {
return `- ${params.profileId}: re-auth required [${formatOAuthRefreshFailureReason(classified.reason)}] — Run \`${command}\`.`;
}
return `- ${params.profileId}: OAuth refresh failed — Try again; if this persists, run \`${command}\`.`;
}
export async function resolveAuthIssueHint(
issue: AuthIssue,
cfg: OpenClawConfig,
@@ -275,7 +314,14 @@ export async function noteAuthProfileHealth(params: {
profileId: profile.profileId,
});
} catch (err) {
errors.push(`- ${profile.profileId}: ${formatErrorMessage(err)}`);
const message = formatErrorMessage(err);
errors.push(
formatOAuthRefreshFailureDoctorLine({
profileId: profile.profileId,
provider: profile.provider,
message,
}) ?? `- ${profile.profileId}: ${message}`,
);
}
}
if (errors.length > 0) {

View File

@@ -0,0 +1,14 @@
import { describe, expect, it } from "vitest";
import { summarizeLogTail } from "./gateway.js";
describe("summarizeLogTail", () => {
it("marks permanent OAuth refresh failures as reauth-required", () => {
const lines = summarizeLogTail([
"[openai-codex] Token refresh failed: 401 {",
'"error":{"code":"invalid_grant","message":"Session invalidated due to signing in again"}',
"}",
]);
expect(lines).toEqual(["[openai-codex] token refresh 401 invalid_grant · re-auth required"]);
});
});

View File

@@ -1,8 +1,6 @@
import fs from "node:fs/promises";
import {
normalizeLowercaseStringOrEmpty,
normalizeOptionalString,
} from "../../shared/string-coerce.js";
import { classifyOAuthRefreshFailureReason } from "../../agents/auth-profiles/oauth-refresh-failure.js";
import { normalizeOptionalString } from "../../shared/string-coerce.js";
export async function readFileTailLines(filePath: string, maxLines: number): Promise<string[]> {
const raw = await fs.readFile(filePath, "utf8").catch(() => "");
@@ -120,11 +118,8 @@ export function summarizeLogTail(rawLines: string[], opts?: { maxLines?: number
})();
const code = normalizeOptionalString(parsed?.error?.code) ?? null;
const msg = normalizeOptionalString(parsed?.error?.message) ?? null;
const msgShort = msg
? normalizeLowercaseStringOrEmpty(msg).includes("signing in again")
? "re-auth required"
: shorten(msg, 52)
: null;
const refreshReason = classifyOAuthRefreshFailureReason(msg ?? "");
const msgShort = msg ? (refreshReason ? "re-auth required" : shorten(msg, 52)) : null;
const base = `[${tag}] token refresh ${status}${code ? ` ${code}` : ""}${msgShort ? ` · ${msgShort}` : ""}`;
addGroup(`token:${tag}:${status}:${code ?? ""}:${msgShort ?? ""}`, base);
continue;