refactor(openai): centralize provider defaults

This commit is contained in:
Vincent Koc
2026-03-21 08:39:50 -07:00
parent 931fc9989d
commit 2e8c8a7ae6
10 changed files with 22 additions and 10 deletions

View File

@@ -59,6 +59,7 @@ Docs: https://docs.openclaw.ai
- Docs/plugins: add the community wecom plugin listing to the docs catalog. (#29905) Thanks @sliverp.
- Models/GitHub Copilot: allow forward-compat dynamic model ids without code updates, while preserving configured provider and per-model overrides for those synthetic models. (#51325) Thanks @fuller-stack-dev.
- Agents/compaction: notify users when followup auto-compaction starts and finishes, keeping those notices out of TTS and preserving reply threading for the real assistant reply. (#38805) Thanks @zidongdesign.
- Models/OpenAI: switch the default OpenAI setup model to `openai/gpt-5.4`, keep Codex on `openai-codex/gpt-5.4`, and centralize OpenAI chat, image, TTS, transcription, and embedding defaults in one shared module so future default-model updates stay low-churn. Thanks @vincentkoc.
### Fixes

View File

@@ -149,7 +149,7 @@ What you set:
<Accordion title="OpenAI API key">
Uses `OPENAI_API_KEY` if present or prompts for a key, then stores the credential in auth profiles.
Sets `agents.defaults.model` to `openai/gpt-5.1-codex` when model is unset, `openai/*`, or `openai-codex/*`.
Sets `agents.defaults.model` to `openai/gpt-5.4` when model is unset, `openai/*`, or `openai-codex/*`.
</Accordion>
<Accordion title="xAI (Grok) API key">

View File

@@ -5,15 +5,15 @@ import {
type AudioTranscriptionRequest,
type MediaUnderstandingProvider,
} from "openclaw/plugin-sdk/media-understanding";
import { OPENAI_DEFAULT_AUDIO_TRANSCRIPTION_MODEL } from "../../src/providers/openai-defaults.js";
export const DEFAULT_OPENAI_AUDIO_BASE_URL = "https://api.openai.com/v1";
const DEFAULT_OPENAI_AUDIO_MODEL = "gpt-4o-mini-transcribe";
export async function transcribeOpenAiAudio(params: AudioTranscriptionRequest) {
return await transcribeOpenAiCompatibleAudio({
...params,
defaultBaseUrl: DEFAULT_OPENAI_AUDIO_BASE_URL,
defaultModel: DEFAULT_OPENAI_AUDIO_MODEL,
defaultModel: OPENAI_DEFAULT_AUDIO_TRANSCRIPTION_MODEL,
});
}

View File

@@ -19,6 +19,7 @@ import {
} from "openclaw/plugin-sdk/provider-models";
import { createOpenAIAttributionHeadersWrapper } from "openclaw/plugin-sdk/provider-stream";
import { fetchCodexUsage } from "openclaw/plugin-sdk/provider-usage";
import { OPENAI_CODEX_DEFAULT_MODEL } from "../../src/providers/openai-defaults.js";
import { buildOpenAICodexProvider } from "./openai-codex-catalog.js";
import {
cloneFirstTemplateModel,
@@ -38,7 +39,6 @@ const OPENAI_CODEX_GPT_53_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS = 128_000;
const OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS = 128_000;
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const OPENAI_CODEX_DEFAULT_MODEL = `${PROVIDER_ID}/${OPENAI_CODEX_GPT_54_MODEL_ID}`;
const OPENAI_CODEX_XHIGH_MODEL_IDS = [
OPENAI_CODEX_GPT_54_MODEL_ID,
OPENAI_CODEX_GPT_53_MODEL_ID,

View File

@@ -112,7 +112,7 @@ describe("applyDefaultModelChoice", () => {
});
it("uses applyDefaultConfig path when setDefaultModel is true", async () => {
const defaultModel = "openai/gpt-5.1-codex";
const defaultModel = "openai/gpt-5.4";
const applied = await applyDefaultModelChoice({
config: {},
setDefaultModel: true,

View File

@@ -1,8 +1,8 @@
import { resolveApiKeyForProvider } from "../../agents/model-auth.js";
import type { ImageGenerationProviderPlugin } from "../../plugins/types.js";
import { OPENAI_DEFAULT_IMAGE_MODEL as DEFAULT_OPENAI_IMAGE_MODEL } from "../../providers/openai-defaults.js";
const DEFAULT_OPENAI_IMAGE_BASE_URL = "https://api.openai.com/v1";
const DEFAULT_OPENAI_IMAGE_MODEL = "gpt-image-1";
const DEFAULT_OUTPUT_MIME = "image/png";
const DEFAULT_SIZE = "1024x1024";
const OPENAI_SUPPORTED_SIZES = ["1024x1024", "1024x1536", "1536x1024"] as const;

View File

@@ -1,4 +1,5 @@
import type { SsrFPolicy } from "../infra/net/ssrf.js";
import { OPENAI_DEFAULT_EMBEDDING_MODEL } from "../providers/openai-defaults.js";
import { normalizeEmbeddingModelWithPrefixes } from "./embeddings-model-normalize.js";
import {
createRemoteEmbeddingProvider,
@@ -13,7 +14,6 @@ export type OpenAiEmbeddingClient = {
model: string;
};
export const DEFAULT_OPENAI_EMBEDDING_MODEL = "text-embedding-3-small";
const DEFAULT_OPENAI_BASE_URL = "https://api.openai.com/v1";
const OPENAI_MAX_INPUT_TOKENS: Record<string, number> = {
"text-embedding-3-small": 8192,

View File

@@ -1,9 +1,10 @@
import type { OpenClawConfig } from "../config/config.js";
import { OPENAI_DEFAULT_MODEL } from "../providers/openai-defaults.js";
import { ensureModelAllowlistEntry } from "./provider-model-allowlist.js";
import { applyAgentDefaultPrimaryModel } from "./provider-model-primary.js";
export const GOOGLE_GEMINI_DEFAULT_MODEL = "google/gemini-3.1-pro-preview";
export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.1-codex";
export { OPENAI_DEFAULT_MODEL } from "../providers/openai-defaults.js";
export const OPENCODE_GO_DEFAULT_MODEL_REF = "opencode-go/kimi-k2.5";
export const OPENCODE_ZEN_DEFAULT_MODEL = "opencode/claude-opus-4-6";

View File

@@ -0,0 +1,8 @@
export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.4";
export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.4";
export const OPENAI_DEFAULT_IMAGE_MODEL = "gpt-image-1";
export const OPENAI_DEFAULT_TTS_MODEL = "gpt-4o-mini-tts";
export const OPENAI_DEFAULT_TTS_VOICE = "alloy";
export const OPENAI_DEFAULT_AUDIO_TRANSCRIPTION_MODEL = "gpt-4o-mini-transcribe";
export const OPENAI_DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small";

View File

@@ -32,6 +32,10 @@ import {
normalizeSpeechProviderId,
} from "./provider-registry.js";
import type { SpeechVoiceOption } from "./provider-types.js";
import {
OPENAI_DEFAULT_TTS_MODEL as DEFAULT_OPENAI_MODEL,
OPENAI_DEFAULT_TTS_VOICE as DEFAULT_OPENAI_VOICE,
} from "../providers/openai-defaults.js";
import {
DEFAULT_OPENAI_BASE_URL,
isValidOpenAIModel,
@@ -54,8 +58,6 @@ const DEFAULT_MAX_TEXT_LENGTH = 4096;
const DEFAULT_ELEVENLABS_BASE_URL = "https://api.elevenlabs.io";
const DEFAULT_ELEVENLABS_VOICE_ID = "pMsXgVXv3BLzUgSXRplE";
const DEFAULT_ELEVENLABS_MODEL_ID = "eleven_multilingual_v2";
const DEFAULT_OPENAI_MODEL = "gpt-4o-mini-tts";
const DEFAULT_OPENAI_VOICE = "alloy";
const DEFAULT_EDGE_VOICE = "en-US-MichelleNeural";
const DEFAULT_EDGE_LANG = "en-US";
const DEFAULT_EDGE_OUTPUT_FORMAT = "audio-24khz-48kbitrate-mono-mp3";