mirror of
https://github.com/moltbot/moltbot.git
synced 2026-05-06 23:55:12 +00:00
fix(openai): clamp chat-latest verbosity
This commit is contained in:
@@ -179,7 +179,9 @@ Choose your preferred auth method and follow the setup steps.
|
||||
`chat-latest` is a moving alias. OpenAI documents it as the latest Instant
|
||||
model used in ChatGPT and recommends `gpt-5.5` for production API usage, so
|
||||
keep `openai/gpt-5.5` as the stable default unless you explicitly want that
|
||||
alias behavior.
|
||||
alias behavior. The alias currently accepts only `medium` text verbosity, so
|
||||
OpenClaw normalizes incompatible OpenAI text-verbosity overrides for this
|
||||
model.
|
||||
|
||||
<Warning>
|
||||
OpenClaw does **not** expose `openai/gpt-5.3-codex-spark`. Live OpenAI API requests reject that model, and the current Codex catalog does not expose it either.
|
||||
|
||||
@@ -17,6 +17,7 @@ type LiveModelCase = {
|
||||
contextWindow: number;
|
||||
maxTokens: number;
|
||||
reasoning: boolean;
|
||||
textVerbosity: "low" | "medium";
|
||||
};
|
||||
|
||||
function findOpenAIModel(modelId: string): Model<Api> | null {
|
||||
@@ -34,6 +35,7 @@ function resolveLiveModelCase(modelId: string): LiveModelCase {
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
reasoning: false,
|
||||
textVerbosity: "medium",
|
||||
};
|
||||
case "gpt-5.5":
|
||||
return {
|
||||
@@ -44,6 +46,7 @@ function resolveLiveModelCase(modelId: string): LiveModelCase {
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 128_000,
|
||||
reasoning: true,
|
||||
textVerbosity: "low",
|
||||
};
|
||||
case "gpt-5.5-pro":
|
||||
return {
|
||||
@@ -54,6 +57,7 @@ function resolveLiveModelCase(modelId: string): LiveModelCase {
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 128_000,
|
||||
reasoning: true,
|
||||
textVerbosity: "low",
|
||||
};
|
||||
case "gpt-5.4":
|
||||
return {
|
||||
@@ -64,6 +68,7 @@ function resolveLiveModelCase(modelId: string): LiveModelCase {
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
reasoning: true,
|
||||
textVerbosity: "low",
|
||||
};
|
||||
case "gpt-5.4-pro":
|
||||
return {
|
||||
@@ -74,6 +79,7 @@ function resolveLiveModelCase(modelId: string): LiveModelCase {
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
reasoning: true,
|
||||
textVerbosity: "low",
|
||||
};
|
||||
case "gpt-5.4-mini":
|
||||
return {
|
||||
@@ -84,6 +90,7 @@ function resolveLiveModelCase(modelId: string): LiveModelCase {
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
reasoning: true,
|
||||
textVerbosity: "low",
|
||||
};
|
||||
case "gpt-5.4-nano":
|
||||
return {
|
||||
@@ -94,6 +101,7 @@ function resolveLiveModelCase(modelId: string): LiveModelCase {
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 128_000,
|
||||
reasoning: true,
|
||||
textVerbosity: "low",
|
||||
};
|
||||
default:
|
||||
throw new Error(`Unsupported live OpenAI model: ${modelId}`);
|
||||
@@ -177,7 +185,7 @@ describeLive("buildOpenAIProvider live", () => {
|
||||
input: "Return exactly OK.",
|
||||
max_output_tokens: 64,
|
||||
...(liveCase.reasoning ? { reasoning: { effort: "none" as const } } : {}),
|
||||
text: { verbosity: "low" },
|
||||
text: { verbosity: liveCase.textVerbosity },
|
||||
});
|
||||
|
||||
expect(response.output_text.trim()).toMatch(/^OK[.!]?$/);
|
||||
|
||||
@@ -587,6 +587,40 @@ describe("buildOpenAIProvider", () => {
|
||||
expect(result.payload.tools).toEqual([{ type: "web_search" }]);
|
||||
});
|
||||
|
||||
it("clamps chat-latest text verbosity to the only live-supported value", () => {
|
||||
const provider = buildOpenAIProvider();
|
||||
const wrap = provider.wrapStreamFn;
|
||||
expect(wrap).toBeTypeOf("function");
|
||||
if (!wrap) {
|
||||
throw new Error("expected OpenAI wrapper");
|
||||
}
|
||||
const extraParams = provider.prepareExtraParams?.({
|
||||
provider: "openai",
|
||||
modelId: "chat-latest",
|
||||
extraParams: {
|
||||
textVerbosity: "low",
|
||||
},
|
||||
} as never);
|
||||
const result = runWrappedPayloadCase({
|
||||
wrap,
|
||||
provider: "openai",
|
||||
modelId: "chat-latest",
|
||||
extraParams: extraParams ?? undefined,
|
||||
model: {
|
||||
api: "openai-responses",
|
||||
provider: "openai",
|
||||
id: "chat-latest",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
contextWindow: 400_000,
|
||||
} as Model<"openai-responses">,
|
||||
payload: {
|
||||
text: { verbosity: "high" },
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.payload.text).toEqual({ verbosity: "medium" });
|
||||
});
|
||||
|
||||
it("uses native OpenAI web search instead of the managed web_search function", () => {
|
||||
const provider = buildOpenAIProvider();
|
||||
const wrap = provider.wrapStreamFn;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { StreamFn } from "@mariozechner/pi-agent-core";
|
||||
import {
|
||||
type ProviderResolveDynamicModelContext,
|
||||
type ProviderRuntimeModel,
|
||||
@@ -77,6 +78,42 @@ const OPENAI_MODERN_MODEL_IDS = [
|
||||
OPENAI_GPT_54_NANO_MODEL_ID,
|
||||
"gpt-5.2",
|
||||
] as const;
|
||||
|
||||
function clampChatLatestTextVerbosity(payload: unknown): void {
|
||||
if (!payload || typeof payload !== "object") {
|
||||
return;
|
||||
}
|
||||
const payloadObj = payload as Record<string, unknown>;
|
||||
const text = payloadObj.text;
|
||||
if (!text || typeof text !== "object") {
|
||||
return;
|
||||
}
|
||||
const textObj = text as Record<string, unknown>;
|
||||
if (textObj.verbosity !== undefined && textObj.verbosity !== "medium") {
|
||||
payloadObj.text = { ...textObj, verbosity: "medium" };
|
||||
}
|
||||
}
|
||||
|
||||
function createOpenAIChatLatestCompatWrapper(baseStreamFn: StreamFn, modelId: string): StreamFn {
|
||||
const underlying = baseStreamFn;
|
||||
return (model, context, options) => {
|
||||
if (
|
||||
model.api !== "openai-responses" ||
|
||||
normalizeLowercaseStringOrEmpty(modelId) !== OPENAI_CHAT_LATEST_MODEL_ID
|
||||
) {
|
||||
return underlying(model, context, options);
|
||||
}
|
||||
const originalOnPayload = options?.onPayload;
|
||||
return underlying(model, context, {
|
||||
...options,
|
||||
onPayload: (payload) => {
|
||||
clampChatLatestTextVerbosity(payload);
|
||||
return originalOnPayload?.(payload, model);
|
||||
},
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
function shouldUseOpenAIResponsesTransport(params: {
|
||||
provider: string;
|
||||
api?: string | null;
|
||||
@@ -210,6 +247,7 @@ function resolveOpenAIGptForwardCompatModel(ctx: ProviderResolveDynamicModelCont
|
||||
}
|
||||
|
||||
export function buildOpenAIProvider(): ProviderPlugin {
|
||||
const responsesHooks = buildOpenAIResponsesProviderHooks({ transport: "sse" });
|
||||
return {
|
||||
id: PROVIDER_ID,
|
||||
label: "OpenAI",
|
||||
@@ -247,7 +285,12 @@ export function buildOpenAIProvider(): ProviderPlugin {
|
||||
shouldUseOpenAIResponsesTransport({ provider, api, baseUrl })
|
||||
? { api: "openai-responses", baseUrl }
|
||||
: undefined,
|
||||
...buildOpenAIResponsesProviderHooks({ transport: "sse" }),
|
||||
...responsesHooks,
|
||||
wrapStreamFn: (ctx) =>
|
||||
createOpenAIChatLatestCompatWrapper(
|
||||
responsesHooks.wrapStreamFn?.(ctx) ?? ctx.streamFn,
|
||||
ctx.modelId,
|
||||
),
|
||||
matchesContextOverflowError: ({ errorMessage }) =>
|
||||
/content_filter.*(?:prompt|input).*(?:too long|exceed)/i.test(errorMessage),
|
||||
resolveReasoningOutputMode: () => "native",
|
||||
|
||||
Reference in New Issue
Block a user