diff --git a/extensions/codex/src/app-server/run-attempt.test.ts b/extensions/codex/src/app-server/run-attempt.test.ts index 298c18c6ba9..ae70d8b425a 100644 --- a/extensions/codex/src/app-server/run-attempt.test.ts +++ b/extensions/codex/src/app-server/run-attempt.test.ts @@ -44,6 +44,73 @@ function createParams(sessionFile: string, workspaceDir: string): EmbeddedRunAtt } as EmbeddedRunAttemptParams; } +function createAppServerHarness( + requestImpl: (method: string, params: unknown) => Promise, +) { + const requests: Array<{ method: string; params: unknown }> = []; + let notify: (notification: CodexServerNotification) => Promise = async () => undefined; + const request = vi.fn(async (method: string, params?: unknown) => { + requests.push({ method, params }); + return requestImpl(method, params); + }); + + __testing.setCodexAppServerClientFactoryForTests( + async () => + ({ + request, + addNotificationHandler: (handler: typeof notify) => { + notify = handler; + return () => undefined; + }, + addRequestHandler: () => () => undefined, + }) as never, + ); + + return { + request, + requests, + async waitForMethod(method: string) { + await vi.waitFor(() => expect(requests.some((entry) => entry.method === method)).toBe(true)); + }, + async completeTurn(params: { threadId: string; turnId: string }) { + await notify({ + method: "turn/completed", + params: { + threadId: params.threadId, + turnId: params.turnId, + turn: { id: params.turnId, status: "completed" }, + }, + }); + }, + }; +} + +function expectResumeRequest( + requests: Array<{ method: string; params: unknown }>, + params: Record, +) { + expect(requests).toEqual( + expect.arrayContaining([ + { + method: "thread/resume", + params, + }, + ]), + ); +} + +function createResumeHarness() { + return createAppServerHarness(async (method) => { + if (method === "thread/resume") { + return { thread: { id: "thread-existing" }, modelProvider: "openai" }; + } + if (method === "turn/start") { + return { turn: { id: "turn-1", status: "inProgress" } }; + } + return {}; + }); +} + describe("runCodexAppServerAttempt", () => { beforeEach(async () => { tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-codex-run-")); @@ -56,9 +123,7 @@ describe("runCodexAppServerAttempt", () => { }); it("forwards queued user input and aborts the active app-server turn", async () => { - const requests: Array<{ method: string; params: unknown }> = []; - const request = vi.fn(async (method: string, params?: unknown) => { - requests.push({ method, params }); + const { requests, waitForMethod } = createAppServerHarness(async (method, _params) => { if (method === "thread/start") { return { thread: { id: "thread-1" }, model: "gpt-5.4-codex", modelProvider: "openai" }; } @@ -67,21 +132,11 @@ describe("runCodexAppServerAttempt", () => { } return {}; }); - __testing.setCodexAppServerClientFactoryForTests( - async () => - ({ - request, - addNotificationHandler: () => () => undefined, - addRequestHandler: () => () => undefined, - }) as never, - ); const run = runCodexAppServerAttempt( createParams(path.join(tempDir, "session.jsonl"), path.join(tempDir, "workspace")), ); - await vi.waitFor(() => - expect(requests.some((entry) => entry.method === "turn/start")).toBe(true), - ); + await waitForMethod("turn/start"); expect(queueAgentHarnessMessage("session-1", "more context")).toBe(true); await vi.waitFor(() => @@ -126,9 +181,7 @@ describe("runCodexAppServerAttempt", () => { }; process.on("unhandledRejection", onUnhandledRejection); try { - const requests: Array<{ method: string; params: unknown }> = []; - const request = vi.fn(async (method: string, params?: unknown) => { - requests.push({ method, params }); + const { waitForMethod } = createAppServerHarness(async (method, _params) => { if (method === "thread/start") { return { thread: { id: "thread-1" }, model: "gpt-5.4-codex", modelProvider: "openai" }; } @@ -140,14 +193,6 @@ describe("runCodexAppServerAttempt", () => { } return {}; }); - __testing.setCodexAppServerClientFactoryForTests( - async () => - ({ - request, - addNotificationHandler: () => () => undefined, - addRequestHandler: () => () => undefined, - }) as never, - ); const abortController = new AbortController(); const params = createParams( path.join(tempDir, "session.jsonl"), @@ -156,9 +201,7 @@ describe("runCodexAppServerAttempt", () => { params.abortSignal = abortController.signal; const run = runCodexAppServerAttempt(params); - await vi.waitFor(() => - expect(requests.some((entry) => entry.method === "turn/start")).toBe(true), - ); + await waitForMethod("turn/start"); abortController.abort("shutdown"); await expect(run).resolves.toMatchObject({ aborted: true }); @@ -170,10 +213,7 @@ describe("runCodexAppServerAttempt", () => { }); it("forwards image attachments to the app-server turn input", async () => { - const requests: Array<{ method: string; params: unknown }> = []; - let notify: (notification: CodexServerNotification) => Promise = async () => undefined; - const request = vi.fn(async (method: string, params?: unknown) => { - requests.push({ method, params }); + const { requests, waitForMethod, completeTurn } = createAppServerHarness(async (method) => { if (method === "thread/start") { return { thread: { id: "thread-1" }, model: "gpt-5.4-codex", modelProvider: "openai" }; } @@ -182,17 +222,6 @@ describe("runCodexAppServerAttempt", () => { } return {}; }); - __testing.setCodexAppServerClientFactoryForTests( - async () => - ({ - request, - addNotificationHandler: (handler: typeof notify) => { - notify = handler; - return () => undefined; - }, - addRequestHandler: () => () => undefined, - }) as never, - ); const params = createParams( path.join(tempDir, "session.jsonl"), path.join(tempDir, "workspace"), @@ -210,17 +239,8 @@ describe("runCodexAppServerAttempt", () => { ]; const run = runCodexAppServerAttempt(params); - await vi.waitFor(() => - expect(requests.some((entry) => entry.method === "turn/start")).toBe(true), - ); - await notify({ - method: "turn/completed", - params: { - threadId: "thread-1", - turnId: "turn-1", - turn: { id: "turn-1", status: "completed" }, - }, - }); + await waitForMethod("turn/start"); + await completeTurn({ threadId: "thread-1", turnId: "turn-1" }); await run; expect(requests).toEqual( @@ -338,60 +358,22 @@ describe("runCodexAppServerAttempt", () => { modelProvider: "openai", dynamicToolsFingerprint: "[]", }); - const requests: Array<{ method: string; params: unknown }> = []; - let notify: (notification: CodexServerNotification) => Promise = async () => undefined; - const request = vi.fn(async (method: string, params?: unknown) => { - requests.push({ method, params }); - if (method === "thread/resume") { - return { thread: { id: "thread-existing" }, modelProvider: "openai" }; - } - if (method === "turn/start") { - return { turn: { id: "turn-1", status: "inProgress" } }; - } - return {}; - }); - __testing.setCodexAppServerClientFactoryForTests( - async () => - ({ - request, - addNotificationHandler: (handler: typeof notify) => { - notify = handler; - return () => undefined; - }, - addRequestHandler: () => () => undefined, - }) as never, - ); + const { requests, waitForMethod, completeTurn } = createResumeHarness(); const run = runCodexAppServerAttempt(createParams(sessionFile, workspaceDir)); - await vi.waitFor(() => - expect(requests.some((entry) => entry.method === "turn/start")).toBe(true), - ); - await notify({ - method: "turn/completed", - params: { - threadId: "thread-existing", - turnId: "turn-1", - turn: { id: "turn-1", status: "completed" }, - }, - }); + await waitForMethod("turn/start"); + await completeTurn({ threadId: "thread-existing", turnId: "turn-1" }); await run; - expect(requests).toEqual( - expect.arrayContaining([ - { - method: "thread/resume", - params: { - threadId: "thread-existing", - model: "gpt-5.4-codex", - modelProvider: "openai", - approvalPolicy: "never", - approvalsReviewer: "user", - sandbox: "workspace-write", - persistExtendedHistory: true, - }, - }, - ]), - ); + expectResumeRequest(requests, { + threadId: "thread-existing", + model: "gpt-5.4-codex", + modelProvider: "openai", + approvalPolicy: "never", + approvalsReviewer: "user", + sandbox: "workspace-write", + persistExtendedHistory: true, + }); }); it("passes configured app-server policy, sandbox, service tier, and model on resume", async () => { @@ -403,29 +385,7 @@ describe("runCodexAppServerAttempt", () => { model: "gpt-5.2", modelProvider: "openai", }); - const requests: Array<{ method: string; params: unknown }> = []; - let notify: (notification: CodexServerNotification) => Promise = async () => undefined; - const request = vi.fn(async (method: string, params?: unknown) => { - requests.push({ method, params }); - if (method === "thread/resume") { - return { thread: { id: "thread-existing" }, modelProvider: "openai" }; - } - if (method === "turn/start") { - return { turn: { id: "turn-1", status: "inProgress" } }; - } - return {}; - }); - __testing.setCodexAppServerClientFactoryForTests( - async () => - ({ - request, - addNotificationHandler: (handler: typeof notify) => { - notify = handler; - return () => undefined; - }, - addRequestHandler: () => () => undefined, - }) as never, - ); + const { requests, waitForMethod, completeTurn } = createResumeHarness(); const run = runCodexAppServerAttempt(createParams(sessionFile, workspaceDir), { pluginConfig: { @@ -437,34 +397,22 @@ describe("runCodexAppServerAttempt", () => { }, }, }); - await vi.waitFor(() => - expect(requests.some((entry) => entry.method === "turn/start")).toBe(true), - ); - await notify({ - method: "turn/completed", - params: { - threadId: "thread-existing", - turnId: "turn-1", - turn: { id: "turn-1", status: "completed" }, - }, - }); + await waitForMethod("turn/start"); + await completeTurn({ threadId: "thread-existing", turnId: "turn-1" }); await run; + expectResumeRequest(requests, { + threadId: "thread-existing", + model: "gpt-5.4-codex", + modelProvider: "openai", + approvalPolicy: "on-request", + approvalsReviewer: "guardian_subagent", + sandbox: "danger-full-access", + serviceTier: "priority", + persistExtendedHistory: true, + }); expect(requests).toEqual( expect.arrayContaining([ - { - method: "thread/resume", - params: { - threadId: "thread-existing", - model: "gpt-5.4-codex", - modelProvider: "openai", - approvalPolicy: "on-request", - approvalsReviewer: "guardian_subagent", - sandbox: "danger-full-access", - serviceTier: "priority", - persistExtendedHistory: true, - }, - }, { method: "turn/start", params: expect.objectContaining({