fix(agents): forward websocket maxTokens=0 correctly

Landed from #39148 by @scoootscooob.

Co-authored-by: scoootscooob <zhentongfan@gmail.com>
This commit is contained in:
Peter Steinberger
2026-03-07 20:51:26 +00:00
parent 330579ef96
commit f2a92e7c84
3 changed files with 33 additions and 1 deletions

View File

@@ -276,6 +276,7 @@ Docs: https://docs.openclaw.ai
- Browser/dispatcher error clarity: preserve dispatcher-side failure context in browser fetch errors while still appending operator guidance and explicit no-retry model hints, preventing misleading `"Can't reach service"` wrapping and avoiding LLM retry loops. (#39090) Thanks @NewdlDewdl.
- Telegram/polling offset safety: confirm persisted offsets before polling startup while validating stored `lastUpdateId` values as non-negative safe integers (with overflow guards) so malformed offset state cannot cause update skipping/dropping. (#39111) Thanks @MumuTW.
- Telegram/status SecretRef read-only resolution: resolve env-backed bot-token SecretRefs in config-only/status inspection while respecting provider source/defaults and env allowlists, so status no longer crashes or reports false-ready tokens for disallowed providers. (#39130) Thanks @neocody.
- Agents/OpenAI WS max-token zero forwarding: treat `maxTokens: 0` as an explicit value in websocket `response.create` payloads (instead of dropping it as falsy), with regression coverage for zero-token forwarding. (#39148) Thanks @scoootscooob.
## 2026.3.2

View File

@@ -636,6 +636,7 @@ describe("createOpenAIWebSocketStreamFn", () => {
releaseWsSession("sess-tools");
releaseWsSession("sess-store-default");
releaseWsSession("sess-store-compat");
releaseWsSession("sess-max-tokens-zero");
});
it("connects to the WebSocket on first call", async () => {
@@ -1008,6 +1009,36 @@ describe("createOpenAIWebSocketStreamFn", () => {
expect(sent.max_output_tokens).toBe(256);
});
it("forwards maxTokens: 0 to response.create as max_output_tokens", async () => {
const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-max-tokens-zero");
const opts = { maxTokens: 0 };
const stream = streamFn(
modelStub as Parameters<typeof streamFn>[0],
contextStub as Parameters<typeof streamFn>[1],
opts as Parameters<typeof streamFn>[2],
);
await new Promise<void>((resolve, reject) => {
queueMicrotask(async () => {
try {
await new Promise((r) => setImmediate(r));
MockManager.lastInstance!.simulateEvent({
type: "response.completed",
response: makeResponseObject("resp-max-zero", "Done"),
});
for await (const _ of await resolveStream(stream)) {
/* consume */
}
resolve();
} catch (e) {
reject(e);
}
});
});
const sent = MockManager.lastInstance!.sentEvents[0] as Record<string, unknown>;
expect(sent.type).toBe("response.create");
expect(sent.max_output_tokens).toBe(0);
});
it("forwards reasoningEffort/reasoningSummary to response.create reasoning block", async () => {
const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-reason");
const opts = { reasoningEffort: "high", reasoningSummary: "auto" };

View File

@@ -569,7 +569,7 @@ export function createOpenAIWebSocketStreamFn(
if (streamOpts?.temperature !== undefined) {
extraParams.temperature = streamOpts.temperature;
}
if (streamOpts?.maxTokens) {
if (streamOpts?.maxTokens !== undefined) {
extraParams.max_output_tokens = streamOpts.maxTokens;
}
if (streamOpts?.topP !== undefined) {