From 8d2dd8cf2e2a442bc4f3cd2afbe7d2598b876c44 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Mon, 11 May 2026 14:19:07 +0100 Subject: [PATCH] test: tighten openai extension assertions --- .../openai/memory-embedding-adapter.test.ts | 32 ++++++----- extensions/openai/speech-provider.test.ts | 53 ++++++++----------- extensions/openai/tts.test.ts | 30 +++++------ 3 files changed, 53 insertions(+), 62 deletions(-) diff --git a/extensions/openai/memory-embedding-adapter.test.ts b/extensions/openai/memory-embedding-adapter.test.ts index a4fb3f3c0b9..73b908b553b 100644 --- a/extensions/openai/memory-embedding-adapter.test.ts +++ b/extensions/openai/memory-embedding-adapter.test.ts @@ -60,19 +60,23 @@ describe("OpenAI memory embedding adapter", () => { debug: () => {}, }); - expect(mocks.runOpenAiEmbeddingBatches).toHaveBeenCalledWith( - expect.objectContaining({ - requests: [ - expect.objectContaining({ - body: { - model: "text-embedding-3-small", - input: "doc one", - dimensions: 512, - input_type: "document", - }, - }), - ], - }), - ); + const batchCalls = mocks.runOpenAiEmbeddingBatches.mock.calls as unknown as Array< + [ + { + requests: Array<{ + body: Record; + }>; + }, + ] + >; + const [batchOptions] = batchCalls[0] ?? []; + expect(batchOptions?.requests).toHaveLength(1); + const request = batchOptions?.requests[0]; + expect(request?.body).toEqual({ + model: "text-embedding-3-small", + input: "doc one", + dimensions: 512, + input_type: "document", + }); }); }); diff --git a/extensions/openai/speech-provider.test.ts b/extensions/openai/speech-provider.test.ts index 7d9ee46eea6..a478677c528 100644 --- a/extensions/openai/speech-provider.test.ts +++ b/extensions/openai/speech-provider.test.ts @@ -137,27 +137,24 @@ describe("buildOpenAISpeechProvider", () => { it("preserves talk responseFormat overrides", () => { const provider = buildOpenAISpeechProvider(); - expect( - provider.resolveTalkConfig?.({ - cfg: {} as never, - timeoutMs: 30_000, - baseTtsConfig: { - providers: { - openai: { - apiKey: "sk-base", - responseFormat: "mp3", - }, + const resolvedConfig = provider.resolveTalkConfig?.({ + cfg: {} as never, + timeoutMs: 30_000, + baseTtsConfig: { + providers: { + openai: { + apiKey: "sk-base", + responseFormat: "mp3", }, }, - talkProviderConfig: { - apiKey: "sk-talk", - responseFormat: " WAV ", - }, - }), - ).toMatchObject({ - apiKey: "sk-talk", - responseFormat: "wav", + }, + talkProviderConfig: { + apiKey: "sk-talk", + responseFormat: " WAV ", + }, }); + expect(resolvedConfig?.apiKey).toBe("sk-talk"); + expect(resolvedConfig?.responseFormat).toBe("wav"); }); it("maps Talk speak params onto OpenAI speech overrides", () => { @@ -240,12 +237,10 @@ describe("buildOpenAISpeechProvider", () => { const provider = buildOpenAISpeechProvider(); const fetchMock = vi.fn(async (_url: string, init?: RequestInit) => { const body = parseRequestBody(init); - expect(body).toMatchObject({ - model: "tts-1", - voice: "nova", - speed: 1.25, - response_format: "pcm", - }); + expect(body.model).toBe("tts-1"); + expect(body.voice).toBe("nova"); + expect(body.speed).toBe(1.25); + expect(body.response_format).toBe("pcm"); return new Response(new Uint8Array([1, 2, 3]), { status: 200 }); }); globalThis.fetch = fetchMock as unknown as typeof fetch; @@ -298,12 +293,10 @@ describe("buildOpenAISpeechProvider", () => { const provider = buildOpenAISpeechProvider(); const fetchMock = vi.fn(async (_url: string, init?: RequestInit) => { const body = parseRequestBody(init); - expect(body).toMatchObject({ - model: "custom-tts", - voice: "custom-voice", - lang: "en-US", - response_format: "mp3", - }); + expect(body.model).toBe("custom-tts"); + expect(body.voice).toBe("custom-voice"); + expect(body.lang).toBe("en-US"); + expect(body.response_format).toBe("mp3"); return new Response(new Uint8Array([1, 2, 3]), { status: 200 }); }); globalThis.fetch = fetchMock as unknown as typeof fetch; diff --git a/extensions/openai/tts.test.ts b/extensions/openai/tts.test.ts index 360b26b4cde..35cfa145f06 100644 --- a/extensions/openai/tts.test.ts +++ b/extensions/openai/tts.test.ts @@ -148,16 +148,12 @@ describe("openai tts", () => { timeoutMs: 5_000, }); - expect(fetchMock).toHaveBeenCalledWith( - "https://api.openai.com/v1/audio/speech", - expect.objectContaining({ - headers: expect.objectContaining({ - originator: "openclaw", - version: "2026.3.22", - "User-Agent": "openclaw/2026.3.22", - }), - }), - ); + const [url, init] = fetchMock.mock.calls[0] ?? []; + const headers = init?.headers as Record | undefined; + expect(url).toBe("https://api.openai.com/v1/audio/speech"); + expect(headers?.originator).toBe("openclaw"); + expect(headers?.version).toBe("2026.3.22"); + expect(headers?.["User-Agent"]).toBe("openclaw/2026.3.22"); }); it("sends instructions to custom OpenAI-compatible endpoints", async () => { @@ -215,14 +211,12 @@ describe("openai tts", () => { throw new Error("expected JSON request body"); } const body = JSON.parse(init.body) as Record; - expect(body).toMatchObject({ - model: "tts-1", - input: "hello", - voice: "custom-voice", - response_format: "mp3", - lang: "e", - speed: 1.2, - }); + expect(body.model).toBe("tts-1"); + expect(body.input).toBe("hello"); + expect(body.voice).toBe("custom-voice"); + expect(body.response_format).toBe("mp3"); + expect(body.lang).toBe("e"); + expect(body.speed).toBe(1.2); expect(Object.hasOwn(body, "__proto__")).toBe(false); expect(Object.hasOwn(body, "constructor")).toBe(false); expect(Object.hasOwn(body, "prototype")).toBe(false);